├── .coveragerc ├── .flake8 ├── .git-blame-ignore-revs ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ └── questions-about-the-challenge.md └── workflows │ ├── ORDA.yaml │ ├── pypi.yaml │ ├── run_tests.yml │ └── sphinx_docs_to_gh_pages.yaml ├── .gitignore ├── .markdownlint-cli2.yaml ├── .pre-commit-config.yaml ├── .pylintrc ├── .pylintrc.precommit.ini ├── .python-version ├── .vscode └── settings.json ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── clarity ├── __init__.py ├── data │ ├── HOA_tools_cec2.py │ ├── __init__.py │ ├── demo_data.py │ ├── params │ │ └── speech_weight.mat │ ├── scene_builder_cec2.py │ ├── scene_renderer_cec1.py │ ├── scene_renderer_cec2.py │ └── utils.py ├── dataset │ └── cec1_dataset.py ├── engine │ ├── __init__.py │ ├── losses.py │ └── system.py ├── enhancer │ ├── __init__.py │ ├── compressor.py │ ├── dnn │ │ ├── __init__.py │ │ └── mc_conv_tasnet.py │ ├── dsp │ │ ├── __init__.py │ │ └── filter.py │ ├── gha │ │ ├── __init__.py │ │ ├── cfg_files │ │ │ ├── prerelease_combination3_smooth_template.cfg │ │ │ └── prerelease_combination4_smooth_template.cfg │ │ ├── gainrule_camfit.py │ │ ├── gha_interface.py │ │ └── gha_utils.py │ ├── multiband_compressor │ │ ├── __init__.py │ │ ├── compressor_qmul.py │ │ ├── crossover.py │ │ └── multiband_compressor.py │ └── nalr.py ├── evaluator │ ├── __init__.py │ ├── haaqi │ │ ├── __init__.py │ │ └── haaqi.py │ ├── haspi │ │ ├── __init__.py │ │ ├── eb.py │ │ ├── ebm.py │ │ ├── haspi.py │ │ └── ip.py │ ├── hasqi │ │ ├── __init__.py │ │ └── hasqi.py │ ├── mbstoi │ │ ├── __init__.py │ │ ├── mbstoi.py │ │ ├── mbstoi_utils.py │ │ └── parameters.yaml │ └── msbg │ │ ├── __init__.py │ │ ├── cochlea.py │ │ ├── msbg.py │ │ ├── msbg_hparams │ │ ├── GT4FBank_Brd1.5E_Spaced1.1E_44100Fs.json │ │ ├── GT4FBank_Brd2.0E_Spaced1.5E_44100Fs.json │ │ ├── GT4FBank_Brd3.0E_Spaced2.3E_44100Fs.json │ │ └── __init__.py │ │ ├── msbg_utils.py │ │ └── smearing.py ├── predictor │ ├── __init__.py │ ├── torch_msbg.py │ └── torch_stoi.py └── utils │ ├── __init__.py │ ├── audiogram.py │ ├── car_noise_simulator │ ├── __init__.py │ ├── carnoise_parameters_generator.py │ └── carnoise_signal_generator.py │ ├── file_io.py │ ├── flac_encoder.py │ ├── results_support.py │ ├── signal_processing.py │ └── source_separation_support.py ├── docs ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Makefile ├── conf.py ├── images │ ├── cadenza_logo.png │ ├── cropped-cadenza_logo_square-1.png │ └── earfinal_clarity_customColour.png ├── index.md ├── installation.md ├── introduction.md ├── recipe_rsync_exclude.txt ├── recipes_doc.md └── usage.md ├── notebooks ├── 01_Installing_clarity_tools_and_using_metadata.ipynb ├── 02_Running_the_CEC2_baseline_from_commandline.ipynb ├── 03_Running_the_CEC2_baseline_from_python.ipynb └── README.md ├── pylint_audit.md ├── pyproject.toml ├── recipes ├── __init__.py ├── cad1 │ ├── README.md │ ├── __init__.py │ ├── task1 │ │ ├── __init__.py │ │ └── baseline │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── config.yaml │ │ │ ├── enhance.py │ │ │ ├── evaluate.py │ │ │ ├── merge_batches_results.py │ │ │ └── test.py │ └── task2 │ │ ├── __init__.py │ │ ├── baseline │ │ ├── README.md │ │ ├── __init__.py │ │ ├── audio_manager.py │ │ ├── baseline_utils.py │ │ ├── car_scene_acoustics.py │ │ ├── config.yaml │ │ ├── enhance.py │ │ ├── evaluate.py │ │ ├── merge_batches_results.py │ │ └── test.py │ │ └── data_preparation │ │ ├── __init__.py │ │ ├── build_scene_metadata.py │ │ └── config.yaml ├── cad2 │ ├── README.md │ ├── __init__.py │ ├── task1 │ │ ├── ConvTasNet │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── eval.py │ │ │ ├── local │ │ │ │ ├── __init__.py │ │ │ │ ├── conf.yml │ │ │ │ ├── musdb18_dataset.py │ │ │ │ ├── prepare_data.sh │ │ │ │ ├── system.py │ │ │ │ └── tasnet.py │ │ │ ├── requirements.txt │ │ │ ├── train.py │ │ │ └── utils │ │ │ │ ├── parse_options.sh │ │ │ │ └── prepare_python_env.sh │ │ ├── README.md │ │ ├── __init__.py │ │ ├── baseline │ │ │ ├── __init__.py │ │ │ ├── config.yaml │ │ │ ├── enhance.py │ │ │ └── evaluate.py │ │ └── requirements.txt │ └── task2 │ │ ├── ConvTasNet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── eval.py │ │ ├── local │ │ │ ├── __init__.py │ │ │ ├── cad2task2_dataloader.py │ │ │ ├── conf.yml │ │ │ └── tasnet.py │ │ ├── run.sh │ │ ├── train.py │ │ └── utils │ │ │ ├── parse_options.sh │ │ │ └── prepare_python_env.sh │ │ ├── README.md │ │ ├── __init__.py │ │ ├── baseline │ │ ├── __init__.py │ │ ├── config.yaml │ │ ├── enhance.py │ │ ├── evaluate.py │ │ └── merge_batches_results.py │ │ └── process_dataset │ │ ├── __init__.py │ │ ├── config.yaml │ │ ├── generate_train_scenes.py │ │ └── process_zenodo_download.py ├── cad_icassp_2024 │ ├── __init__.py │ ├── baseline │ │ ├── README.md │ │ ├── config.yaml │ │ ├── enhance.py │ │ ├── evaluate.py │ │ └── merge_batches_results.py │ └── generate_dataset │ │ ├── README.md │ │ ├── config.yaml │ │ ├── generate_at_mic_musdb18.py │ │ └── generate_train_scenes.py ├── cec1 │ ├── README.md │ ├── __init__.py │ ├── baseline │ │ ├── README.md │ │ ├── __init__.py │ │ ├── config.yaml │ │ ├── enhance.py │ │ ├── evaluate.py │ │ └── results │ │ │ ├── __init__.py │ │ │ └── sii.csv │ ├── data_preparation │ │ ├── README.md │ │ ├── __init__.py │ │ ├── data_config.yaml │ │ └── prepare_cec1_data.py │ └── e009_sheffield │ │ ├── README.md │ │ ├── __init__.py │ │ ├── config.yaml │ │ ├── test.py │ │ └── train.py ├── cec2 │ ├── README.md │ ├── __init__.py │ ├── baseline │ │ ├── __init__.py │ │ ├── config.yaml │ │ ├── data_generation │ │ │ ├── additional_data_config.yaml │ │ │ ├── build_additional_scenes.py │ │ │ ├── hydra │ │ │ │ └── launcher │ │ │ │ │ ├── cec2_submitit_local.yaml │ │ │ │ │ └── cec2_submitit_slurm.yaml │ │ │ ├── render_additional_scenes.py │ │ │ └── render_additional_scenes.sh │ │ ├── enhance.py │ │ ├── evaluate.py │ │ └── exp │ │ │ ├── __init__.py │ │ │ ├── si.csv │ │ │ └── si_unproc.csv │ └── data_preparation │ │ ├── __init__.py │ │ ├── build_scenes.py │ │ ├── config.yaml │ │ ├── hydra │ │ └── launcher │ │ │ ├── cec2_submitit_local.yaml │ │ │ └── cec2_submitit_slurm.yaml │ │ ├── render_scenes.py │ │ └── render_scenes.sh ├── cec3 │ ├── README.md │ ├── __init__.py │ └── baseline │ │ ├── __init__.py │ │ ├── config.yaml │ │ ├── enhance.py │ │ ├── evaluate.py │ │ ├── hydra │ │ └── launcher │ │ │ ├── cec3_submitit_local.yaml │ │ │ └── cec3_submitit_slurm.yaml │ │ └── report_score.py ├── cpc1 │ ├── README.md │ ├── __init__.py │ ├── baseline │ │ ├── __init__.py │ │ ├── compute_scores.py │ │ ├── config.yaml │ │ ├── results.json │ │ └── run.py │ ├── e029_sheffield │ │ ├── README.md │ │ ├── __init__.py │ │ ├── config.yaml │ │ ├── evaluate.py │ │ ├── infer.py │ │ ├── prepare_data.py │ │ ├── train_asr.py │ │ ├── transformer_cpc1.yaml │ │ └── transformer_cpc1_ensemble_decoder.py │ ├── e032_sheffield │ │ ├── README.md │ │ ├── __init__.py │ │ ├── config.yaml │ │ ├── evaluate.py │ │ ├── infer.py │ │ ├── prepare_data.py │ │ ├── train_asr.py │ │ ├── transformer_cpc1.yaml │ │ └── transformer_cpc1_decoder.py │ └── test_listener_responses │ │ ├── CPC1.test.json │ │ ├── CPC1.test_indep.json │ │ └── __init__.py ├── cpc2 │ ├── README.md │ ├── __init__.py │ └── baseline │ │ ├── .gitignore │ │ ├── README.md │ │ ├── __init__.py │ │ ├── compute_haspi.py │ │ ├── config.yaml │ │ ├── evaluate.py │ │ └── predict.py ├── cpc3 │ ├── README.md │ ├── __init__.py │ └── baseline │ │ ├── README.md │ │ ├── __init__.py │ │ ├── compute_haspi.py │ │ ├── config.yaml │ │ ├── evaluate.py │ │ ├── precomputed_haspi │ │ ├── clarity_data.dev.haspi.jsonl │ │ └── clarity_data.train.haspi.jsonl │ │ ├── predict_dev.py │ │ ├── predict_train.py │ │ └── shared_predict_utils.py └── icassp_2023 │ ├── README.md │ ├── __init__.py │ └── baseline │ ├── __init__.py │ ├── config.yaml │ ├── enhance.py │ ├── evaluate.py │ └── report_score.py └── tests ├── __init__.py ├── conftest.py ├── data ├── test_HOA_tools_cec2.py └── test_utils.py ├── enhancer ├── dnn │ ├── __init__.py │ └── test_mc_conv_tasnet.py ├── dsp │ └── test_filter.py ├── gha │ ├── __init__.py │ ├── test_gainrule_camfit.py │ ├── test_gha_interface.py │ └── test_gha_utils.py ├── multiband_compressor │ ├── test_compressor_qmul.py │ ├── test_crossover.py │ └── test_multiband_compresor.py ├── test_compressor.py └── test_nalr.py ├── evaluator ├── haaqi │ └── test_haaqi.py ├── haspi │ ├── test_eb.py │ ├── test_ebm.py │ ├── test_haspi.py │ └── test_ip.py ├── hasqi │ └── test_hasqi.py ├── mbstoi │ ├── test_mbstoi.py │ └── test_mbstoi_utils.py └── msbg │ ├── test_cochlea.py │ ├── test_msbg.py │ ├── test_msbg_utils.py │ └── test_smearing.py ├── predictor ├── test_torch_msbg.py └── test_torch_stoi.py ├── recipes ├── __init__.py ├── cad1 │ ├── __init__.py │ ├── task1 │ │ ├── __init__.py │ │ └── baseline │ │ │ ├── __init__.py │ │ │ ├── test_enhance_task1.py │ │ │ ├── test_evaluate.py │ │ │ └── test_merge_batches.py │ └── task2 │ │ ├── __init__.py │ │ ├── baseline │ │ ├── __init__.py │ │ ├── test_audio_manager.py │ │ ├── test_baseline_utils.py │ │ ├── test_car_scene_acoustics.py │ │ ├── test_enhance_task2.py │ │ ├── test_evaluate.py │ │ └── test_merge_batches.py │ │ └── data_preparation │ │ ├── __init__.py │ │ └── test_build_scene_metadata.py ├── cad_icassp_2024 │ ├── __init__.py │ ├── baseline │ │ ├── __init__.py │ │ ├── test_enhance.py │ │ └── test_evaluate.py │ └── generate_dataset │ │ ├── test_generate_at_mic_musdb18.py │ │ └── test_generate_train_scenes.py ├── cec1 │ ├── __init__.py │ ├── baseline │ │ ├── __init__.py │ │ ├── test_enhance.py │ │ └── test_evaluate.py │ ├── data_preparation │ │ ├── __init__.py │ │ └── test_prepare_cec1_data.py │ └── e009_sheffield │ │ ├── __init__.py │ │ ├── test_test.py │ │ └── test_train.py ├── cec2 │ ├── __init__.py │ ├── baseline │ │ ├── __init__.py │ │ ├── data_generation │ │ │ ├── __init__.py │ │ │ ├── test_build_additional_scenes.py │ │ │ └── test_render_additional_scenes.py │ │ ├── test_enhance.py │ │ └── test_evaluate.py │ └── data_preparation │ │ ├── __init__.py │ │ ├── test_build_scenes.py │ │ └── test_render_scenes.py ├── cpc1 │ ├── __init__.py │ ├── baseline │ │ ├── __init__.py │ │ ├── test_compute_scores.py │ │ └── test_run.py │ ├── e029_sheffield │ │ ├── __init__.py │ │ ├── test_evaluate.py │ │ ├── test_infer.py │ │ ├── test_prepare_data.py │ │ ├── test_train_asr.py │ │ └── test_transformer_cpc1_ensemble_decoder.py │ └── e032_sheffield │ │ ├── __init__.py │ │ ├── test_evaluate.py │ │ ├── test_infer.py │ │ ├── test_prepare_data.py │ │ ├── test_train_asr.py │ │ └── test_transformer_cpc1_decoder.py ├── cpc2 │ ├── __init__.py │ └── baseline │ │ ├── __init__.py │ │ ├── test_compute_haspi.py │ │ ├── test_evaluate_cpc2.py │ │ └── test_predict.py └── icassp_2023 │ ├── __init__.py │ └── baseline │ ├── __init__.py │ ├── test_enhance.py │ ├── test_evaluate.py │ └── test_report_score.py ├── regression ├── _regtest_outputs │ ├── test_CEC2_scene_builder.test_CEC2_room_builder.out │ ├── test_CEC2_scene_builder.test_CEC2_scene_builder.out │ ├── test_data_HOA_tools_cec2.test_P.out │ ├── test_data_HOA_tools_cec2.test_U.out │ ├── test_data_HOA_tools_cec2.test_V.out │ ├── test_data_HOA_tools_cec2.test_W.out │ ├── test_data_HOA_tools_cec2.test_centred_element.out │ ├── test_data_HOA_tools_cec2.test_compute_UVW_coefficients.out │ ├── test_data_HOA_tools_cec2.test_compute_band_rotation.out │ ├── test_data_HOA_tools_cec2.test_compute_rotation_matrix.out │ ├── test_engine_losses.test_sisnr_loss.out │ ├── test_engine_losses.test_snr_loss.out │ ├── test_engine_losses.test_stoi_level_loss.out │ ├── test_engine_losses.test_stoi_loss.out │ ├── test_enhancers.test_GHA.out │ ├── test_enhancers.test_GHA_config.out │ ├── test_enhancers.test_GHA_inputs.out │ ├── test_enhancers.test_dsp_filter.out │ ├── test_enhancers.test_gha_audiogram.out │ ├── test_evaluators_msbg.test_firwin2.out │ ├── test_evaluators_msbg.test_gen_eh2008_speech_noise.out │ ├── test_evaluators_msbg.test_gen_tone.out │ ├── test_evaluators_msbg.test_measure_rms.out │ ├── test_evaluators_msbg.test_pad.out │ ├── test_full_CEC1_pipeline.test_full_cec1_pipeline.out │ ├── test_full_CEC2_pipeline.test_full_cec2_pipeline.out │ ├── test_mc_conv_tasnet.test_convtasnet.out │ ├── test_mc_conv_tasnet.test_overlap_add.out │ ├── test_predictors.test_torch_msbg_stoi_non_xeon_e5_2673_cpu.out │ ├── test_predictors.test_torch_msbg_stoi_xeon_e5_2673_cpu.out │ └── test_predictors.test_torchloudnorm.out ├── test_CEC2_scene_builder.py ├── test_data_HOA_tools_cec2.py ├── test_engine_losses.py ├── test_enhancers.py ├── test_evaluators_msbg.py ├── test_full_CEC1_pipeline.py ├── test_full_CEC2_pipeline.py ├── test_mc_conv_tasnet.py └── test_predictors.py ├── resources ├── recipes │ ├── cad1 │ │ ├── task1 │ │ │ ├── test_enhance.test_apply_baseline_ha.npy │ │ │ ├── test_enhance.test_decompose_signal_demucs.npy │ │ │ ├── test_enhance.test_decompose_signal_openunmix.npy │ │ │ ├── test_enhance.test_process_stems_for_listener.npy │ │ │ └── test_enhance.test_separate_sources.npy │ │ └── task2 │ │ │ ├── listeners.json │ │ │ ├── scenes.json │ │ │ ├── scenes_listeners.json │ │ │ ├── test_build_scene_metadata.json_sample.json │ │ │ ├── test_enhance.enhance_song_left.npy │ │ │ └── test_enhance.enhance_song_right.npy │ └── cad_icassp_2024 │ │ ├── test_enhance.test_decompose_signal_demucs.npy │ │ ├── test_enhance.test_decompose_signal_openunmix.npy │ │ ├── test_enhance.test_process_remix_for_listener_w_compressor.npy │ │ └── test_enhance.test_process_remix_for_listener_wo_compressor.npy └── utils │ ├── test_carnoise.signal_generator.npy │ ├── test_source_separation_support.test_separate_sources.npy │ └── test_source_separation_support.test_separate_sources_stereo.npy ├── test_data ├── clarity_data │ ├── hrir │ ├── metadata │ └── train │ │ ├── interferers │ │ ├── rooms │ │ └── targets ├── configs │ └── test_CEC2_scene_builder.yaml ├── filetypes │ └── valid.jsonl ├── hrir │ └── HRIRs_MAT │ │ ├── VP_N6-BTE_fr.mat │ │ ├── VP_N6-BTE_mid.mat │ │ ├── VP_N6-BTE_rear.mat │ │ └── VP_N6-ED.mat ├── interferers │ ├── music │ │ └── 1111967.low.mp3 │ ├── noise │ │ └── CIN_fan_014.wav │ └── speech │ │ └── som_04766_05.wav ├── metadata │ ├── hrir_data.json │ ├── listeners.json │ ├── masker_music_list.json │ ├── masker_nonspeech_list.json │ ├── masker_speech_list.json │ ├── rooms.train.json │ ├── scenes.cec1.test.json │ ├── scenes.test.json │ ├── scenes_listeners.1.json │ ├── scenes_listeners.json │ └── target_speech_list.json ├── openMHA │ ├── prerelease_combination3_smooth_template.cfg │ └── prerelease_combination4_smooth_template.cfg ├── recipes │ ├── cec1 │ │ ├── baseline │ │ │ └── eval_signals │ │ │ │ ├── S06001_L0064_HL-mixoutput.wav │ │ │ │ ├── S06001_L0064_HL-output.wav │ │ │ │ ├── S06001_L0064_HLddf-output.wav │ │ │ │ └── S06001_flat0dB_HL-output.wav │ │ └── e009_sheffield │ │ │ ├── clarity_CEC1_data │ │ │ └── clarity_data │ │ │ │ ├── dev │ │ │ │ └── scenes │ │ │ │ ├── metadata │ │ │ │ ├── listeners.json │ │ │ │ ├── scenes.dev.json │ │ │ │ └── scenes.train.json │ │ │ │ └── train │ │ │ │ └── scenes │ │ │ └── clarity_CEC1_data_eval │ │ │ └── clarity_data │ │ │ ├── eval │ │ │ └── scenes │ │ │ └── metadata │ │ │ └── scenes_listeners.eval.json │ ├── cec2 │ │ └── baseline │ │ │ ├── cec2_si.csv │ │ │ └── eval_signals │ │ │ └── S06001_L0064_HA-output.wav │ ├── cpc1 │ │ ├── clarity_CPC1_data │ │ │ ├── clarity_data │ │ │ │ ├── HA_outputs │ │ │ │ │ └── train │ │ │ │ │ │ └── S08510_L0239_E001.wav │ │ │ │ └── scenes │ │ │ │ │ ├── S08510_mixed_CH0.wav │ │ │ │ │ └── S08510_target_anechoic.wav │ │ │ └── metadata │ │ │ │ ├── CPC1.train.4.json │ │ │ │ ├── CPC1.train.json │ │ │ │ ├── CPC1.train_indep.4.json │ │ │ │ └── listeners.CPC1_train.json │ │ ├── clarity_CPC1_data_test │ │ │ └── metadata │ │ │ │ ├── CPC1.test.json │ │ │ │ └── CPC1.test_indep.json │ │ ├── e029_sheffield │ │ │ ├── clarity_CPC1_data_test │ │ │ │ ├── clarity_data │ │ │ │ │ ├── HA_outputs │ │ │ │ │ │ └── test │ │ │ │ │ │ │ └── S08520_L0216_E001.wav │ │ │ │ │ └── scenes │ │ │ │ │ │ └── S08520_target_anechoic.wav │ │ │ │ └── metadata │ │ │ │ │ ├── CPC1.test.json │ │ │ │ │ └── listeners.CPC1_all.json │ │ │ └── clarity_CPC1_data_train │ │ │ │ ├── clarity_data │ │ │ │ ├── HA_outputs │ │ │ │ │ └── train │ │ │ │ │ │ └── S08510_L0239_E001.wav │ │ │ │ └── scenes │ │ │ │ │ └── S08510_target_anechoic.wav │ │ │ │ └── metadata │ │ │ │ ├── CPC1.train.json │ │ │ │ └── listeners.CPC1_train.json │ │ ├── e032_sheffield │ │ └── exps │ │ │ ├── test │ │ │ └── sii.csv │ │ │ ├── test_indep │ │ │ └── sii.csv │ │ │ ├── train │ │ │ ├── eval_signals │ │ │ │ ├── S08510_L0239_E001_HL-mixoutput.wav │ │ │ │ ├── S08510_L0239_E001_HL-output.wav │ │ │ │ ├── S08510_L0239_E001_HLddf-output.wav │ │ │ │ └── S08510_flat0dB_HL-output.wav │ │ │ └── sii.csv │ │ │ └── train_indep │ │ │ └── sii.csv │ └── cpc2 │ │ └── clarity_data │ │ ├── HA_outputs │ │ └── signals │ │ │ └── CEC2 │ │ │ ├── S08547_L0001_E001.wav │ │ │ ├── S08564_L0001_E001.wav │ │ │ ├── S08564_L0002_E002.wav │ │ │ └── S08564_L0003_E003.wav │ │ ├── metadata │ │ ├── CEC1.train.sample.json │ │ └── listeners.json │ │ └── scenes │ │ └── CEC2 │ │ ├── S08547_target_ref.wav │ │ └── S08564_target_ref.wav ├── rooms │ ├── HOA_IRs │ │ ├── HOA_R06001_i1.wav │ │ ├── HOA_R06001_i2.wav │ │ ├── HOA_R06001_i3.wav │ │ └── HOA_R06001_t.wav │ ├── ac │ │ └── R06001.ac │ ├── brir │ │ ├── anech_brir_R00001_t_CH1.wav │ │ ├── brir_R00001_i1_CH0.wav │ │ ├── brir_R00001_i1_CH1.wav │ │ ├── brir_R00001_i1_CH2.wav │ │ ├── brir_R00001_i1_CH3.wav │ │ ├── brir_R00001_t_CH0.wav │ │ ├── brir_R00001_t_CH1.wav │ │ ├── brir_R00001_t_CH2.wav │ │ ├── brir_R00001_t_CH3.wav │ │ └── brir_R06001_t_CH1.wav │ └── rpf │ │ ├── R00001_i1.rpf │ │ ├── R00001_i2.rpf │ │ ├── R00001_i3.rpf │ │ ├── R00001_t.rpf │ │ ├── R06001_i1.rpf │ │ ├── R06001_i2.rpf │ │ ├── R06001_i3.rpf │ │ └── R06001_t.rpf ├── scenes │ ├── S06001_mix_CH0.wav │ ├── S06001_mix_CH1.wav │ ├── S06001_mix_CH2.wav │ ├── S06001_mix_CH3.wav │ ├── S06001_mixed_CH0.wav │ ├── S06001_mixed_CH1.wav │ ├── S06001_mixed_CH2.wav │ ├── S06001_mixed_CH3.wav │ ├── S06001_target_CH0.wav │ ├── S06001_target_CH1.wav │ ├── S06001_target_CH2.wav │ ├── S06001_target_CH3.wav │ ├── S06001_target_anechoic.wav │ └── S06001_target_anechoic_CH1.wav ├── targets │ └── T010_G0N_02468.wav └── test │ ├── interferers │ ├── rooms │ └── targets ├── test_import.py ├── test_scene_renderer_cec2.py └── utils ├── car_noise_simulator ├── test_carnoise_parameter_generator.py └── test_carnoise_signal_generator.py ├── test_audiogram.py ├── test_file_io.py ├── test_flac_encoder.py ├── test_results_support.py ├── test_signal_processing.py └── test_source_separation_support.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | # Exclude versioneer 4 | clarity/_version.py 5 | [report] 6 | exclude_lines = 7 | pragma: no cover 8 | if TYPE_CHECKING: 9 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E203, E501, W503 3 | max-line-length = 88 4 | max-complexity = 18 5 | select = B,C,E,F,W,T4,B9 6 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | clarity/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: bug, question 6 | assignees: '' 7 | --- 8 | **Describe the bug** 9 | A clear and concise description of what the bug is. 10 | 11 | **To Reproduce** 12 | Please describe the steps to reproduce the behavior: 13 | 14 | 1. `import 15 | 2. Click on '....' 16 | 3. Scroll down to '....' 17 | 4. See error 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | **Error Messages** 23 | If applicable, please **copy and paste** the command that failed and the full traceback output that occurs. 24 | 25 | **Environment** 26 | Please include the following... 27 | 28 | [ ] OS: [e.g. iOS] 29 | [ ] Python version (`python --version`) 30 | [ ] clarity version (`pip show clarity`) 31 | [ ] Installed package versions (`pip freeze`) 32 | 33 | **Additional context** 34 | Add any other context about the problem here such as the data that is being used. 35 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[FEATURE]" 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | **Is your feature request related to a problem? Please describe.** 10 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 11 | 12 | **Describe the solution you'd like** 13 | A clear and concise description of what you want to happen. 14 | 15 | **Describe alternatives you've considered** 16 | A clear and concise description of any alternative solutions or features you've considered. 17 | 18 | **Additional context** 19 | Add any other context or screenshots about the feature request here. 20 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/questions-about-the-challenge.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Questions about the Challenge 3 | about: Ask a question about using clarity for the Challenge 4 | title: '' 5 | labels: question 6 | assignees: '' 7 | 8 | --- 9 | If something isn't working as expected or you are unsure how to achieve something feel free to ask questions about how to use clarity for the Clarity Challenge. You can do so here but please also consider the [Clarity Challenge Google Group](https://groups.google.com/g/clarity-challenge). 10 | 11 | It would be useful to include the following information where applicable. 12 | 13 | [ ] Operating system 14 | [ ] Python version (`python --version`) 15 | [ ] clarity version (`pip show clarity`) 16 | [ ] Package versions (`pip freeze`) 17 | -------------------------------------------------------------------------------- /.github/workflows/ORDA.yaml: -------------------------------------------------------------------------------- 1 | name: Release to ORDA 2 | on: 3 | workflow_dispatch: 4 | release: 5 | types: [published] 6 | jobs: 7 | upload: 8 | runs-on: ubuntu-latest 9 | env: 10 | ARCHIVE_NAME: ${{ github.event.repository.name }}-${{ github.event.release.tag_name }} 11 | steps: 12 | - name: prepare-data-folder 13 | run : mkdir 'data' 14 | - name: download-archive 15 | run: | 16 | curl -sL "${{ github.event.release.zipball_url }}" > "$ARCHIVE_NAME".zip 17 | curl -sL "${{ github.event.release.tarball_url }}" > "$ARCHIVE_NAME".tar.gz 18 | - name: move-archive 19 | run: | 20 | mv "$ARCHIVE_NAME".zip data/ 21 | mv "$ARCHIVE_NAME".tar.gz data/ 22 | - name: upload-to-figshare 23 | uses: figshare/github-upload-action@v1.1 24 | with: 25 | FIGSHARE_TOKEN: ${{ secrets.FIGSHARE_TOKEN }} 26 | FIGSHARE_ENDPOINT: 'https://api.figshare.com/v2' 27 | FIGSHARE_ARTICLE_ID: 23230694 28 | DATA_DIR: 'data' 29 | -------------------------------------------------------------------------------- /.github/workflows/pypi.yaml: -------------------------------------------------------------------------------- 1 | name: Publish package to PyPi 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | workflow_dispatch: 8 | 9 | jobs: 10 | build-release: 11 | runs-on: ubuntu-latest 12 | name: Publish package to PyPi 13 | steps: 14 | - uses: actions/checkout@v4 15 | with: 16 | fetch-depth: 0 17 | - name: Setup Python 18 | uses: actions/setup-python@v5.5.0 19 | with: 20 | python-version: 3.9 21 | - name: Installing the package 22 | run: | 23 | pip3 install . 24 | pip3 install .[pypi] 25 | - name: Check Git Access 26 | run: | 27 | git status 28 | git describe --tags 29 | - name: Build package 30 | run: | 31 | pip3 install --upgrade setuptools 32 | export DEB_PYTHON_INSTALL_LAYOUT=deb_system 33 | python -m build --no-isolation 34 | - name: Publish package to PyPI 35 | uses: pypa/gh-action-pypi-publish@v1.12.4 36 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') 37 | with: 38 | user: __token__ 39 | password: ${{ secrets.PYPI_API_TOKEN }} 40 | - name: Upload release artifacts to Release Notes 41 | uses: Roang-zero1/github-upload-release-artifacts-action@v2 42 | with: 43 | args: "dist/" 44 | env: 45 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 46 | -------------------------------------------------------------------------------- /.github/workflows/run_tests.yml: -------------------------------------------------------------------------------- 1 | name: Clarity Tests 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | merge_group: 8 | workflow_dispatch: 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | build: 15 | name: Testing ~ (${{ matrix.python-version }}, ${{ matrix.os }}) 16 | runs-on: ${{ matrix.os }} 17 | strategy: 18 | matrix: 19 | os: ["ubuntu-latest"] 20 | python-version: ["3.9", "3.10", "3.11", "3.12"] 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Set up Python 24 | uses: actions/setup-python@v4.3.0 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | # cache: 'pip' 28 | # cache-dependency-path: setup.cfg 29 | - name: Install dependencies 30 | run: | 31 | sudo apt-get update -yq 32 | sudo apt-get install -yq libsndfile1-dev ffmpeg 33 | - name: Install package, including extras 34 | run: | 35 | pip install -e .[tests,docs,dev] 36 | - name: CPU information 37 | run: | 38 | python -m cpuinfo --json 39 | - name: Run pytest 40 | run: | 41 | pytest --cov=clarity --durations 10 . 42 | - name: Determine coverage 43 | run: | 44 | coverage xml 45 | - name: Upload coverage to Codecov 46 | uses: codecov/codecov-action@v3 47 | # - name: pylint 48 | # run: | 49 | # pylint --rcfile .pylintrc 50 | -------------------------------------------------------------------------------- /.github/workflows/sphinx_docs_to_gh_pages.yaml: -------------------------------------------------------------------------------- 1 | # Source : https://github.com/marketplace/actions/sphinx-docs-to-github-pages 2 | name: Sphinx docs to gh-pages 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | sphinx_docs_to_gh-pages: 11 | runs-on: ubuntu-latest 12 | name: Sphinx docs to gh-pages 13 | steps: 14 | - uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | - name: Setup Python 18 | uses: actions/setup-python@v4.3.0 19 | with: 20 | python-version: 3.9 21 | - name: Installing the Documentation requirements 22 | run: | 23 | pip3 install .[docs] 24 | - name: Copying recipe README.md 25 | run: | 26 | rsync -av --exclude-from="docs/recipe_rsync_exclude.txt" recipes docs/. 27 | - name: Running Sphinx to gh-pages Action 28 | uses: ns-rse/action-sphinx-docs-to-gh-pages@main 29 | with: 30 | branch: main # Test on branch, switch to main when working 31 | dir_docs: docs 32 | sphinxapiexclude: '../*setup* ../*tests* ../*.ipynb' 33 | sphinxapiopts: '--separate -o . ../' 34 | sphinxopts: '' 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Distribution / packaging 2 | .Python 3 | build/ 4 | develop-eggs/ 5 | dist/ 6 | downloads/ 7 | eggs/ 8 | .eggs/ 9 | lib/ 10 | lib64/ 11 | parts/ 12 | sdist/ 13 | var/ 14 | wheels/ 15 | pip-wheel-metadata/ 16 | share/python-wheels/ 17 | *.egg-info/ 18 | .installed.cfg 19 | *.egg 20 | MANIFEST 21 | 22 | # Python 23 | __pycache__ 24 | *.pyc 25 | 26 | # Unit test / coverage reports 27 | htmlcov/ 28 | .tox/ 29 | .nox/ 30 | .coverage 31 | .coverage.* 32 | .cache 33 | nosetests.xml 34 | coverage.xml 35 | *.cover 36 | *.py,cover 37 | .hypothesis/ 38 | .pytest_cache/ 39 | cover/ 40 | 41 | # Jupyter Notebook 42 | .ipynb_checkpoints 43 | 44 | # IPython 45 | profile_default/ 46 | ipython_config.py 47 | 48 | # Environments 49 | .env 50 | .venv 51 | env/ 52 | venv/ 53 | ENV/ 54 | env.bak/ 55 | venv.bak/ 56 | 57 | # Mac OS 58 | .DS_Store 59 | 60 | # Generated docs 61 | docs/clarity.*.rst 62 | docs/recipes.*.rst 63 | 64 | # setuptools_scm version 65 | clarity/_version.py 66 | 67 | # Emacs 68 | *~ 69 | 70 | # Sound files 71 | *.wav -------------------------------------------------------------------------------- /.markdownlint-cli2.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # An example .markdownlint-cli2.yaml file 3 | # 4 | 5 | # Disable some built-in rules 6 | config: 7 | line-length: false # Disable line length rule 8 | no-inline-html: false # Allow HTML in Markdown 9 | 10 | # # Include a custom rule package 11 | # customRules: 12 | # - markdownlint-rule-titlecase 13 | 14 | # Fix any fixable errors 15 | fix: false 16 | 17 | # Define a custom front matter pattern 18 | frontMatter: "---[^]*---" 19 | 20 | # # Define glob expressions to use (only valid at root) 21 | # globs: 22 | # - "!*bout.md" 23 | 24 | # # Define glob expressions to ignore 25 | # ignores: 26 | # - ".github/ISSUE_TEMPLATE/*.md" 27 | 28 | # # Use a plugin to recognize math 29 | # markdownItPlugins: 30 | # - 31 | # - "@iktakahiro/markdown-it-katex" 32 | 33 | # # Disable inline config comments 34 | # noInlineConfig: true 35 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.12.6 2 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.formatting.provider": "black", 3 | "cSpell.words": [ 4 | "haspi", 5 | "hasqi" 6 | ], 7 | "python.testing.pytestArgs": [ 8 | "tests/", 9 | "--cov", 10 | "--cov-branch", 11 | "--cov-report", 12 | "html", 13 | ], 14 | "python.testing.unittestEnabled": false, 15 | "python.testing.pytestEnabled": true, 16 | "githubPullRequests.ignoredPullRequestBranches": [ 17 | "main" 18 | ], 19 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 The PyClarity Team 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /clarity/__init__.py: -------------------------------------------------------------------------------- 1 | """pyClarity""" 2 | 3 | from importlib.metadata import PackageNotFoundError, version 4 | 5 | try: 6 | __version__ = version("package-name") 7 | except PackageNotFoundError: 8 | # package is not installed 9 | pass 10 | -------------------------------------------------------------------------------- /clarity/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/data/__init__.py -------------------------------------------------------------------------------- /clarity/data/params/speech_weight.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/data/params/speech_weight.mat -------------------------------------------------------------------------------- /clarity/engine/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/engine/__init__.py -------------------------------------------------------------------------------- /clarity/enhancer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/enhancer/__init__.py -------------------------------------------------------------------------------- /clarity/enhancer/dnn/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/enhancer/dnn/__init__.py -------------------------------------------------------------------------------- /clarity/enhancer/dsp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/enhancer/dsp/__init__.py -------------------------------------------------------------------------------- /clarity/enhancer/gha/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/enhancer/gha/__init__.py -------------------------------------------------------------------------------- /clarity/enhancer/multiband_compressor/__init__.py: -------------------------------------------------------------------------------- 1 | from clarity.enhancer.multiband_compressor.compressor_qmul import Compressor 2 | from clarity.enhancer.multiband_compressor.crossover import Crossover 3 | from clarity.enhancer.multiband_compressor.multiband_compressor import ( 4 | MultibandCompressor, 5 | ) 6 | 7 | __all__ = ["MultibandCompressor", "Compressor", "Crossover"] 8 | -------------------------------------------------------------------------------- /clarity/evaluator/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/evaluator/__init__.py -------------------------------------------------------------------------------- /clarity/evaluator/haaqi/__init__.py: -------------------------------------------------------------------------------- 1 | from clarity.evaluator.haaqi.haaqi import compute_haaqi, haaqi_v1 2 | 3 | __all__ = ["haaqi_v1", "compute_haaqi"] 4 | -------------------------------------------------------------------------------- /clarity/evaluator/haspi/__init__.py: -------------------------------------------------------------------------------- 1 | """HASPI intelligibility index.""" 2 | 3 | from clarity.evaluator.haspi.haspi import haspi_v2, haspi_v2_be 4 | 5 | __all__ = ["haspi_v2", "haspi_v2_be"] 6 | -------------------------------------------------------------------------------- /clarity/evaluator/hasqi/__init__.py: -------------------------------------------------------------------------------- 1 | from clarity.evaluator.hasqi.hasqi import hasqi_v2, hasqi_v2_better_ear # noqa F401 2 | 3 | __all__ = ["hasqi_v2", "hasqi_v2_better_ear"] 4 | -------------------------------------------------------------------------------- /clarity/evaluator/mbstoi/__init__.py: -------------------------------------------------------------------------------- 1 | """Modified Binaural Short-Time Objective Intelligibility Evaluator""" 2 | 3 | from clarity.evaluator.mbstoi.mbstoi import mbstoi 4 | 5 | __all__ = ["mbstoi"] 6 | -------------------------------------------------------------------------------- /clarity/evaluator/mbstoi/parameters.yaml: -------------------------------------------------------------------------------- 1 | sample_rate: 10000 # sample rate of proposed intelligibility measure in hz 2 | n_frame: 256 # window support in samples 3 | fft_size_in_samples: 512 # fft size in samples 4 | n_third_octave_bands: 15 # number of one-third octave bands 5 | centre_freq_first_third_octave_hz: 150 # centre frequency of first 1/3 octave band in hz 6 | n_frames: 30 # number of frames for intermediate intelligibility measure (length analysis window) 7 | dyn_range: 40 # speech dynamic range in db 8 | # values to define ec grid 9 | tau_min: -0.001 # minimum interaural delay compensation in seconds. b: -0.01. 10 | tau_max: 0.001 # maximum interaural delay compensation in seconds. b: 0.01. 11 | gamma_min: -20 # minimum interaural level compensation in db 12 | gamma_max: 20 # maximum interaural level compensation in db 13 | # constants for jitter 14 | # itd compensation standard deviation in seconds. equation 6 andersen et al. 2018 refinement 15 | sigma_delta_0: 0.000065 16 | # ild compensation standard deviation. equation 5 andersen et al. 2018 17 | sigma_epsilon_0: 1.5 18 | # constant for level shift deviation in db. equation 5 andersen et al. 2018 19 | alpha_0_db: 13 20 | # constant for time shift deviation in seconds. equation 6 andersen et al. 2018 21 | tau_0: 0.0016 22 | # constant for level shift deviation. power for calculation of sigma delta gamma 23 | # in equation 5 andersen et al. 2018. 24 | level_shift_deviation: 1.6 25 | -------------------------------------------------------------------------------- /clarity/evaluator/msbg/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/evaluator/msbg/__init__.py -------------------------------------------------------------------------------- /clarity/evaluator/msbg/msbg_hparams/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/evaluator/msbg/msbg_hparams/__init__.py -------------------------------------------------------------------------------- /clarity/predictor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/predictor/__init__.py -------------------------------------------------------------------------------- /clarity/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/utils/__init__.py -------------------------------------------------------------------------------- /clarity/utils/car_noise_simulator/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/clarity/utils/car_noise_simulator/__init__.py -------------------------------------------------------------------------------- /clarity/utils/results_support.py: -------------------------------------------------------------------------------- 1 | """Dataclass to save challenges results to a CSV file.""" 2 | 3 | from __future__ import annotations 4 | 5 | # pylint: disable=import-error 6 | import csv 7 | from dataclasses import dataclass 8 | from pathlib import Path 9 | 10 | 11 | @dataclass 12 | class ResultsFile: 13 | """A utility class for writing results to a CSV file. 14 | 15 | Attributes: 16 | file_name (str | Path): The name of the file to write results to. 17 | header_columns (list[str]): The columns to write to the CSV file. 18 | append_results (bool): Whether to append results to an existing file. 19 | If False, a new file will be created and the header row will be written. 20 | Defaults to False. 21 | """ 22 | 23 | file_name: str | Path 24 | header_columns: list[str] 25 | append_results: bool = False 26 | 27 | def __post_init__(self): 28 | """Write the header row to the CSV file.""" 29 | if isinstance(self.file_name, str): 30 | self.file_name = Path(self.file_name) 31 | 32 | if self.append_results: 33 | if not Path(self.file_name).exists(): 34 | raise FileNotFoundError( 35 | "Cannot append results to non-existent file " 36 | f"{self.file_name.as_posix()}" 37 | " - please set append_results=False" 38 | ) 39 | else: 40 | with open(self.file_name, "w", encoding="utf-8", newline="") as csv_file: 41 | csv_writer = csv.writer( 42 | csv_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL 43 | ) 44 | csv_writer.writerow(self.header_columns) 45 | 46 | def add_result( 47 | self, 48 | row_values: dict[str, str | float], 49 | ): 50 | """Add a result to the CSV file. 51 | 52 | Args: 53 | row_values (dict[str, str | float]): The values to write to the CSV file. 54 | """ 55 | 56 | with open(self.file_name, "a", encoding="utf-8", newline="") as csv_file: 57 | csv_writer = csv.writer( 58 | csv_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL 59 | ) 60 | row = [] 61 | for column in self.header_columns: 62 | row.append(row_values[column]) 63 | 64 | csv_writer.writerow(row) 65 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = . 8 | BUILDDIR = _build 9 | SPHINXAPIBUILD = sphinx-apidoc 10 | RECIPEDIR = "$(SOURCEDIR)/../recipes" 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | echo "Copying recipe README.md files for inclusion." 21 | rsync -av --exclude-from="recipe_rsync_exclude.txt" "$(RECIPEDIR)" "." 22 | @$(SPHINXAPIBUILD) --separate -o "$(SOURCEDIR)" "../" "../*setup*" "../*tests*" "../*.ipynb" 23 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 24 | -------------------------------------------------------------------------------- /docs/images/cadenza_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/docs/images/cadenza_logo.png -------------------------------------------------------------------------------- /docs/images/cropped-cadenza_logo_square-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/docs/images/cropped-cadenza_logo_square-1.png -------------------------------------------------------------------------------- /docs/images/earfinal_clarity_customColour.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/docs/images/earfinal_clarity_customColour.png -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Welcome to pyClarity's documentation 2 | 3 | ```{toctree} 4 | :caption: Getting Started 5 | :maxdepth: 1 6 | 7 | introduction 8 | installation 9 | usage 10 | recipes_doc 11 | CONTRIBUTING 12 | CODE_OF_CONDUCT 13 | ``` 14 | 15 | ```{toctree} 16 | :caption: Recipes 17 | :maxdepth: 1 18 | ``` 19 | 20 | ```{toctree} 21 | :caption: API 22 | :maxdepth: 1 23 | 24 | clarity 25 | recipes 26 | ``` 27 | 28 | ## Indices and tables 29 | 30 | - {ref}`genindex` 31 | - {ref}`modindex` 32 | - {ref}`search` 33 | -------------------------------------------------------------------------------- /docs/installation.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | It is recommended that you install and use Clarity under a [Virtual 4 | Environment](https://realpython.com/python-virtual-environments-a-primer/). If you opt to use Conda it is recommended 5 | that you use the minimal [Miniconda](https://docs.conda.io/en/latest/miniconda.html) version which avoids installing 6 | tools you won't use. Once you have installed Conda you can create and activate a virtual environment by... 7 | 8 | ``` bash 9 | cond create --name clarity python=3.8 10 | conda activate clarity 11 | ``` 12 | 13 | The following steps assume that you have activated the `clarity` virtual environment. 14 | 15 | ## PyPi 16 | 17 | The latest stable release of Clarity is available on [PyPI](https://pypi.org/project/pyclarity/) and can be installed 18 | using `pip`. To install simply... 19 | 20 | ``` bash 21 | pip install pyclarity 22 | ``` 23 | 24 | ## GitHub 25 | 26 | You can alternatively install the latest development version from GitHub. There are two methods for doing so. 27 | 28 | ### Pip GitHub install 29 | 30 | To install the `main` branch directly from GitHub using `pip`... 31 | 32 | ``` bash 33 | pip install -e git+https://github.com/claritychallenge/clarity.git@main 34 | ``` 35 | 36 | ### Manual Cloning and Installation 37 | 38 | You will have to have [git](https://git-scm.com) installed on your system to install in this manner. 39 | 40 | ``` bash 41 | git clone git@github.com:claritychallenge/clarity.git 42 | cd clarity 43 | pip install -e . 44 | ``` 45 | -------------------------------------------------------------------------------- /docs/recipe_rsync_exclude.txt: -------------------------------------------------------------------------------- 1 | *~ 2 | *.csv 3 | *.json 4 | *.log 5 | *.py 6 | *.pyc 7 | *.sh 8 | *.tgz* 9 | *.wav 10 | *.yaml 11 | -------------------------------------------------------------------------------- /docs/recipes_doc.md: -------------------------------------------------------------------------------- 1 | # Recipes 2 | 3 | The documents linked below detail the challenges to date and some of the entries. 4 | 5 | ```{toctree} 6 | :caption: CEC1 7 | :maxdepth: 1 8 | recipes/cec1/README 9 | recipes/cec1/baseline/README 10 | recipes/cec1/data_preparation/README 11 | recipes/cec1/e009_sheffield/README 12 | ``` 13 | 14 | ```{toctree} 15 | :caption: CEC2 16 | :maxdepth: 1 17 | recipes/cec2/README 18 | ``` 19 | 20 | ```{toctree} 21 | :caption: CPC1 22 | :maxdepth: 1 23 | recipes/cpc1/README 24 | recipes/cpc1/e029_sheffield/README 25 | recipes/cpc1/e032_sheffield/README 26 | ``` 27 | -------------------------------------------------------------------------------- /notebooks/README.md: -------------------------------------------------------------------------------- 1 | # Clarity Notebooks 2 | 3 | We have a growing number of Clarity Notebooks that demonstrate how to use our tools. You can use the links below to open and run the notebooks in Google's Colab environment. 4 | 5 | | Notebook | Description | | 6 | |:----------|:-------------|------:| 7 | | [01 Installing Clarity Tools](https://github.com/claritychallenge/clarity/blob/master/notebooks/01_Installing_clarity_tools_and_using_metadata.ipynb) | Intro tutorial showing how to install tools and use them to examine CEC2 metadata |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](http://colab.research.google.com/github/claritychallenge/clarity/blob/master/notebooks/01_Installing_clarity_tools_and_using_metadata.ipynb) | 8 | | [02 Running the CEC2 baseline (commandline)](https://github.com/claritychallenge/clarity/blob/master/notebooks/02_Running_the_CEC2_baseline_from_commandline.ipynb) | Run the CEC2 baseline enhancement system from the commandline |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](http://colab.research.google.com/github/claritychallenge/clarity/blob/master/notebooks/02_Running_the_CEC2_baseline_from_commandline.ipynb) | 9 | | [03 Running the CEC2 baseline (python)](https://github.com/claritychallenge/clarity/blob/master/notebooks/03_Running_the_CEC2_baseline_from_python.ipynb) | Use Clarity library to reproduce the CEC2 baseline in Python code |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](http://colab.research.google.com/github/claritychallenge/clarity/blob/master/notebooks/03_Running_the_CEC2_baseline_from_python.ipynb) | 10 | 11 | We are happy to receive notebook contributions. If you have something you want to share please submit a pull request 😄 12 | -------------------------------------------------------------------------------- /recipes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/__init__.py -------------------------------------------------------------------------------- /recipes/cad1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad1/__init__.py -------------------------------------------------------------------------------- /recipes/cad1/task1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad1/task1/__init__.py -------------------------------------------------------------------------------- /recipes/cad1/task1/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad1/task1/baseline/__init__.py -------------------------------------------------------------------------------- /recipes/cad1/task1/baseline/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ../../cadenza_data_demo/cad1/task1 3 | metadata_dir: ${path.root}/metadata 4 | music_dir: ${path.root}/audio/musdb18hq 5 | music_file: ${path.metadata_dir}/musdb18.valid.json 6 | listeners_file: ${path.metadata_dir}/listeners.valid.json 7 | music_segments_test_file: ${path.metadata_dir}/musdb18.segments.test.json 8 | exp_folder: ./exp_${separator.model} # folder to store enhanced signals and final results 9 | 10 | team_id: T001 11 | 12 | sample_rate: 44100 # sample rate of the input mixture 13 | stem_sample_rate: 24000 # sample rate output stems 14 | remix_sample_rate: 32000 # sample rate for output remixed signal 15 | 16 | nalr: 17 | nfir: 220 18 | sample_rate: ${sample_rate} 19 | 20 | apply_compressor: False 21 | compressor: 22 | threshold: 0.35 23 | attenuation: 0.1 24 | attack: 50 25 | release: 1000 26 | rms_buffer_size: 0.064 27 | 28 | soft_clip: True 29 | 30 | separator: 31 | model: demucs # demucs or openunmix 32 | device: ~ 33 | 34 | evaluate: 35 | set_random_seed: True 36 | small_test: False 37 | batch_size: 1 # Number of batches 38 | batch: 0 # Batch number to evaluate 39 | 40 | # hydra config 41 | hydra: 42 | run: 43 | dir: ${path.exp_folder} 44 | job: 45 | chdir: True -------------------------------------------------------------------------------- /recipes/cad1/task1/baseline/merge_batches_results.py: -------------------------------------------------------------------------------- 1 | """Join batches scores into a single file.""" 2 | 3 | import hydra 4 | import pandas as pd 5 | from omegaconf import DictConfig 6 | 7 | 8 | @hydra.main(config_path="", config_name="config", version_base=None) 9 | def join_batches(config: DictConfig) -> None: 10 | """ 11 | Join batches scores into a single file. 12 | 13 | Args: 14 | config (DictConfig): Dictionary of configuration options. 15 | The `.evaluate.batch_size` is extracted to determine how many 16 | batches there are to combine. 17 | 18 | """ 19 | batches_results = [] 20 | for batch in range(config.evaluate.batch_size): 21 | batches_results.append( 22 | pd.read_csv(f"scores_{batch + 1}-{config.evaluate.batch_size}.csv") 23 | ) 24 | df_res = pd.concat(batches_results, ignore_index=True) 25 | df_res.to_csv("scores.csv", index=False) 26 | 27 | 28 | # pylint: disable=no-value-for-parameter 29 | if __name__ == "__main__": 30 | join_batches() 31 | -------------------------------------------------------------------------------- /recipes/cad1/task2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad1/task2/__init__.py -------------------------------------------------------------------------------- /recipes/cad1/task2/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad1/task2/baseline/__init__.py -------------------------------------------------------------------------------- /recipes/cad1/task2/baseline/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ../../cadenza_task2_data_demo/cad1/task2 3 | audio_dir: ${path.root}/audio 4 | metadata_dir: ${path.root}/metadata 5 | music_dir: ${path.audio_dir}/music 6 | hrtf_dir: ${path.audio_dir}/eBrird 7 | listeners_file: ${path.metadata_dir}/listeners.valid.json 8 | scenes_file: ${path.metadata_dir}/scenes.json 9 | scenes_listeners_file: ${path.metadata_dir}/scenes_listeners.json 10 | hrtf_file: ${path.metadata_dir}/eBrird_BRIR.json 11 | exp_folder: ./exp # folder to store enhanced signals and final results 12 | 13 | team_id: T001 14 | 15 | sample_rate: 44100 # sample rate of the input signal 16 | enhanced_sample_rate: 32000 # sample rate for the enhanced output signal 17 | 18 | nalr: 19 | nfir: 220 20 | sample_rate: ${sample_rate} 21 | 22 | compressor: 23 | threshold: 0.7 24 | attenuation: 0.1 25 | attack: 5 26 | release: 20 27 | rms_buffer_size: 0.064 28 | 29 | soft_clip: False 30 | 31 | enhance: 32 | average_level: -14 # Average level according Spotify's levels 33 | min_level: -19 34 | 35 | evaluate: 36 | set_random_seed: True 37 | small_test: False 38 | save_intermediate_wavs: False 39 | split: valid # train, valid 40 | batch_size: 1 # Number of batches 41 | batch: 0 # Batch number to evaluate 42 | 43 | # hydra config 44 | hydra: 45 | run: 46 | dir: ${path.exp_folder} 47 | job: 48 | chdir: True -------------------------------------------------------------------------------- /recipes/cad1/task2/baseline/merge_batches_results.py: -------------------------------------------------------------------------------- 1 | """Join batches scores into a single file.""" 2 | 3 | import hydra 4 | import pandas as pd 5 | from omegaconf import DictConfig 6 | 7 | 8 | @hydra.main(config_path="", config_name="config", version_base=None) 9 | def join_batches(config: DictConfig) -> None: 10 | """ 11 | Join batches scores into a single file. 12 | 13 | """ 14 | batches_results = [] 15 | for batch in range(config.evaluate.batch_size): 16 | batches_results.append( 17 | pd.read_csv( 18 | f"scores_{batch}-{config.evaluate.batch_size}.csv", index_col=False 19 | ) 20 | ) 21 | df_res = pd.concat(batches_results, ignore_index=True) 22 | df_res.to_csv("scores.csv", index=False) 23 | 24 | 25 | # pylint: disable=no-value-for-parameter 26 | if __name__ == "__main__": 27 | join_batches() 28 | -------------------------------------------------------------------------------- /recipes/cad1/task2/baseline/test.py: -------------------------------------------------------------------------------- 1 | """Run the dummy enhancement.""" 2 | 3 | # pylint: disable=too-many-locals 4 | # pylint: disable=import-error 5 | from __future__ import annotations 6 | 7 | import logging 8 | import shutil 9 | from pathlib import Path 10 | 11 | import hydra 12 | from omegaconf import DictConfig 13 | 14 | from recipes.cad1.task2.baseline.enhance import enhance as enhance_set 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | def pack_submission( 20 | team_id: str, 21 | root_dir: str | Path, 22 | base_dir: str | Path = ".", 23 | ) -> None: 24 | """ 25 | Pack the submission files into an archive file. 26 | 27 | Args: 28 | team_id (str): Team ID. 29 | root_dir (str | Path): Root directory of the archived file. 30 | base_dir (str | Path): Base directory to archive. Defaults to ".". 31 | """ 32 | # Pack the submission files 33 | logger.info(f"Packing submission files for team {team_id}...") 34 | shutil.make_archive( 35 | f"submission_{team_id}", 36 | "zip", 37 | root_dir=root_dir, 38 | base_dir=base_dir, 39 | ) 40 | 41 | 42 | @hydra.main(config_path="", config_name="config", version_base=None) 43 | def enhance(config: DictConfig) -> None: 44 | """ 45 | Run the music enhancement. 46 | The baseline system is a dummy processor that returns the input signal. 47 | 48 | Args: 49 | config (dict): Dictionary of configuration options for enhancing music. 50 | """ 51 | enhance_set(config) 52 | 53 | pack_submission( 54 | team_id=config.team_id, 55 | root_dir=Path("enhanced_signals"), 56 | base_dir=config.evaluate.split, 57 | ) 58 | 59 | logger.info("Evaluation complete.!!") 60 | logger.info( 61 | f"Please, submit the file submission_{config.team_id}.zip to the challenge " 62 | "using the link provided. Thank you.!!" 63 | ) 64 | 65 | 66 | # pylint: disable = no-value-for-parameter 67 | if __name__ == "__main__": 68 | enhance() 69 | -------------------------------------------------------------------------------- /recipes/cad1/task2/data_preparation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad1/task2/data_preparation/__init__.py -------------------------------------------------------------------------------- /recipes/cad1/task2/data_preparation/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ./ 3 | metadata_dir: ${path.root}/metadata 4 | train_music_file: ${path.metadata_dir}/music.train.json 5 | valid_music_file: ${path.metadata_dir}/music.valid.json 6 | listeners_train_file: ${path.metadata_dir}/listeners.train.json 7 | listeners_valid_file: ${path.metadata_dir}/listeners.valid.json 8 | brir_file: ${path.metadata_dir}/eBrird_BRIR.json 9 | scenes_file: ${path.metadata_dir}/scenes.json 10 | scenes_listeners_file: ${path.metadata_dir}/scenes_listeners.json 11 | 12 | # Every seed value generates 8000 new samples. 13 | # Seed 2023 must never be removed. 14 | # If you want to add more seeds, add them to the list. 15 | # eg., seed: [2023, 2024, 2025] 16 | seed: [2023] 17 | valid_seed: 2023 18 | 19 | hydra: 20 | run: 21 | dir: . 22 | job: 23 | chdir: True -------------------------------------------------------------------------------- /recipes/cad2/README.md: -------------------------------------------------------------------------------- 1 | # The Second Cadenza Challenge 2 | 3 | Cadenza challenge code for the Second Cadenza Challenge (CAD2). 4 | For more information please visit the [challenge website](https://cadenzachallenge.org/docs/cadenza2/intro). 5 | 6 | In the directories `task 1` and `task 2`, you will find the code for the baseline 7 | for each system and the instruction on how to obtain the data. 8 | -------------------------------------------------------------------------------- /recipes/cad2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad2/__init__.py -------------------------------------------------------------------------------- /recipes/cad2/task1/ConvTasNet/README.md: -------------------------------------------------------------------------------- 1 | # CAD2-TASK1 singing/accompaniment separation model 2 | 3 | This recipe contains the necessary content to replicate the separation models used in CAD2-Task1. 4 | 5 | - The system is based on Asteroid Source Separation system. 6 | - ConvTasNet implementation is based on stereo adaptation by Alexandre Defossez 7 | - Evaluation logic is based on 8 | - Dataloader is based on 9 | 10 | You can replicate the Causal and Non-Causal model by running: 11 | 12 | - **To replicate the Non-Causal model** 13 | 14 | ```bash 15 | python train.py \ 16 | --exp_dir /path/to/save/exps \ 17 | --batch_size 4 \ 18 | --aggregate 2 \ 19 | --lr 0.0005 \ 20 | --root /path/to/MUSDB18 \ 21 | --sample_rate 44100 \ 22 | --segment 5.0 \ 23 | --samples_per_track 64 24 | ``` 25 | 26 | - **To replicate the Causal model** 27 | 28 | ```bash 29 | python train.py \ 30 | --exp_dir /path/to/save/exps \ 31 | --batch_size 4 \ 32 | --aggregate 1 \ 33 | --lr 0.0005 \ 34 | --root /path/to/MUSDB18 \ 35 | --sample_rate 44100 \ 36 | --segment 4.0 \ 37 | --samples_per_track 64 \ 38 | --causal True \ 39 | --n_src 2 \ 40 | --norm_type cLN 41 | ``` 42 | -------------------------------------------------------------------------------- /recipes/cad2/task1/ConvTasNet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad2/task1/ConvTasNet/__init__.py -------------------------------------------------------------------------------- /recipes/cad2/task1/ConvTasNet/local/__init__.py: -------------------------------------------------------------------------------- 1 | from .musdb18_dataset import Compose, MUSDB18Dataset, augment_channelswap, augment_gain 2 | from .tasnet import ConvTasNetStereo, overlap_and_add 3 | 4 | __all__ = [ 5 | "MUSDB18Dataset", 6 | "Compose", 7 | "augment_gain", 8 | "augment_channelswap", 9 | "ConvTasNetStereo", 10 | "overlap_and_add", 11 | ] 12 | -------------------------------------------------------------------------------- /recipes/cad2/task1/ConvTasNet/local/conf.yml: -------------------------------------------------------------------------------- 1 | # ConvTasNet config 2 | convtasnet: 3 | N: 256 4 | L: 20 5 | B: 256 6 | H: 512 7 | P: 3 8 | X: 10 9 | R: 4 10 | C: 2 11 | audio_channels: 2 # stereo 12 | norm_type: gLN 13 | causal: False 14 | mask_nonlinear: 'relu' 15 | 16 | # Training config 17 | training: 18 | epochs: 200 19 | batch_size: 4 # Aggregate 2 iteration, effective batch size = 8 20 | num_workers: 4 21 | half_lr: yes 22 | early_stop: yes 23 | aggregate: 2 # aggregate gradiante every 2 iterations 24 | # Optim config 25 | optim: 26 | lr: 0.0005 27 | # Data config 28 | data: 29 | root: /path/to/MUSDB18 30 | mix_background: true 31 | segment: 5.0 # Seconds 32 | samples_per_track: 64 33 | sample_rate: 44100 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /recipes/cad2/task1/ConvTasNet/local/prepare_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | dampvsep_root= 3 | python_path=python 4 | 5 | . ./utils/parse_options.sh 6 | if [ ! -d DAMP-VSEP-Singles ]; then 7 | # Clone preprocessed DAMP-VSEP-Singles repo 8 | git clone https://github.com/groadabike/DAMP-VSEP-Singles.git 9 | fi 10 | 11 | if [ ! -d metadata ]; then 12 | # Generate the splits 13 | . DAMP-VSEP-Singles/generate_dampvsep_singles.sh $dampvsep_root ../metadata $python_path 14 | fi 15 | -------------------------------------------------------------------------------- /recipes/cad2/task1/ConvTasNet/requirements.txt: -------------------------------------------------------------------------------- 1 | asteroid 2 | musdb 3 | museval 4 | -------------------------------------------------------------------------------- /recipes/cad2/task1/ConvTasNet/utils/prepare_python_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage ./utils/install_env.sh --install_dir A --asteroid_root B --pip_requires C 3 | install_dir=~ 4 | asteroid_root=../../../../ 5 | pip_requires=../../../requirements.txt # Expects a requirement.txt 6 | 7 | . utils/parse_options.sh || exit 1 8 | 9 | mkdir -p $install_dir 10 | cd $install_dir 11 | echo "Download and install latest version of miniconda3 into ${install_dir}" 12 | wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh 13 | 14 | bash Miniconda3-latest-Linux-x86_64.sh -b -p miniconda3 15 | pip_path=$PWD/miniconda3/bin/pip 16 | 17 | rm Miniconda3-latest-Linux-x86_64.sh 18 | cd - 19 | 20 | if [[ ! -z ${pip_requires} ]]; then 21 | $pip_path install -r $pip_requires 22 | fi 23 | $pip_path install soundfile 24 | $pip_path install -e $asteroid_root 25 | #$pip_path install ${asteroid_root}/\[""evaluate""\] 26 | echo -e "\nAsteroid has been installed in editable mode. Feel free to apply your changes !" -------------------------------------------------------------------------------- /recipes/cad2/task1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad2/task1/__init__.py -------------------------------------------------------------------------------- /recipes/cad2/task1/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad2/task1/baseline/__init__.py -------------------------------------------------------------------------------- /recipes/cad2/task1/baseline/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ??? # Set to the root of the dataset 3 | metadata_dir: ${path.root}/metadata 4 | music_dir: ${path.root}/audio 5 | musics_file: ${path.metadata_dir}/music.valid.json 6 | alphas_file: ${path.metadata_dir}/alpha.json 7 | listeners_file: ${path.metadata_dir}/listeners.valid.json 8 | enhancer_params_file: ${path.metadata_dir}/compressor_params.valid.json 9 | scenes_file: ${path.metadata_dir}/scene.valid.json 10 | scene_listeners_file: ${path.metadata_dir}/scene_listeners.valid.json 11 | exp_folder: ./exp_${separator.causality} # folder to store enhanced signals and final results 12 | 13 | input_sample_rate: 44100 # sample rate of the input mixture 14 | remix_sample_rate: 44100 # sample rate for the output remixed signal 15 | HAAQI_sample_rate: 24000 # sample rate for computing HAAQI score 16 | 17 | separator: 18 | causality: causal 19 | device: ~ 20 | separation: 21 | number_sources: 2 22 | segment: 6.0 23 | overlap: 0.1 24 | sample_rate: ${input_sample_rate} 25 | 26 | enhancer: 27 | crossover_frequencies: [ 353.55, 707.11, 1414.21, 2828.43, 5656.85 ] # [250, 500, 1000, 2000, 4000] * sqrt(2) 28 | attack: [ 11, 11, 14, 13, 11, 11 ] 29 | release: [ 80, 80, 80, 80, 100, 100 ] 30 | threshold: [ -30, -30, -30, -30, -30, -30 ] 31 | 32 | soft_clip: False 33 | 34 | evaluate: 35 | whisper_version: base.en 36 | set_random_seed: True 37 | small_test: False 38 | save_intermediate: False 39 | equiv_0db_spl: 100 40 | batch_size: 1 # Number of batches 41 | batch: 0 # Batch number to evaluate 42 | 43 | # hydra config 44 | hydra: 45 | run: 46 | dir: ${path.exp_folder} 47 | job: 48 | chdir: True -------------------------------------------------------------------------------- /recipes/cad2/task1/requirements.txt: -------------------------------------------------------------------------------- 1 | huggingface-hub 2 | jiwer 3 | openai-whisper 4 | safetensors 5 | -------------------------------------------------------------------------------- /recipes/cad2/task2/ConvTasNet/README.md: -------------------------------------------------------------------------------- 1 | # CAD2-TASK2 target instrument/accompaniment separation model 2 | 3 | This recipe contains the necessary content to replicate the separation models used in CAD2-Task1. 4 | 5 | - The system is based on Asteroid Source Separation system. 6 | - ConvTasNet implementation is based on stereo adaptation by Alexandre Defossez 7 | 8 | You can replicate the Causal and Non-Causal model by running: 9 | 10 | - **To replicate the Non-Causal model** 11 | 12 | ```bash 13 | python train.py \ 14 | --exp_dir /path/to/save/exps \ 15 | --batch_size 4 \ 16 | --aggregate 2 \ 17 | --lr 0.0005 \ 18 | --root /path/to/MUSDB18 \ 19 | --sample_rate 44100 \ 20 | --segment 5.0 \ 21 | --samples_per_track 64 22 | ``` 23 | 24 | - **To replicate the Causal model** 25 | 26 | ```bash 27 | python train.py \ 28 | --exp_dir /path/to/save/exps \ 29 | --batch_size 4 \ 30 | --aggregate 1 \ 31 | --lr 0.0005 \ 32 | --root /path/to/MUSDB18 \ 33 | --sample_rate 44100 \ 34 | --segment 4.0 \ 35 | --samples_per_track 64 \ 36 | --causal True \ 37 | --n_src 2 \ 38 | --norm_type cLN 39 | ``` 40 | -------------------------------------------------------------------------------- /recipes/cad2/task2/ConvTasNet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad2/task2/ConvTasNet/__init__.py -------------------------------------------------------------------------------- /recipes/cad2/task2/ConvTasNet/local/__init__.py: -------------------------------------------------------------------------------- 1 | from .cad2task2_dataloader import ( 2 | Compose, 3 | RebalanceMusicDataset, 4 | augment_channelswap, 5 | augment_gain, 6 | ) 7 | from .tasnet import ConvTasNetStereo, overlap_and_add 8 | 9 | __all__ = [ 10 | "ConvTasNetStereo", 11 | "overlap_and_add", 12 | "RebalanceMusicDataset", 13 | "Compose", 14 | "augment_gain", 15 | "augment_channelswap", 16 | ] 17 | -------------------------------------------------------------------------------- /recipes/cad2/task2/ConvTasNet/local/conf.yml: -------------------------------------------------------------------------------- 1 | # ConvTasNet config 2 | convtasnet: 3 | N: 256 4 | L: 20 5 | B: 256 6 | H: 512 7 | P: 3 8 | X: 10 9 | R: 4 10 | C: 2 11 | audio_channels: 2 # stereo 12 | norm_type: gLN 13 | causal: False 14 | mask_nonlinear: 'relu' 15 | 16 | # Training config 17 | training: 18 | epochs: 200 19 | batch_size: 4 20 | num_workers: 4 21 | half_lr: yes 22 | early_stop: yes 23 | aggregate: 1 24 | # Optim config 25 | optim: 26 | lr: 0.0001 27 | # Data config 28 | data: 29 | root_path: path/to/cad2/task2/audio/data # path to Cadenza dataset, audio folder 30 | music_tracks_file: path/to/cad2/task2/metadata/data # path to metadata file in cadenza format 31 | sample_rate: 44100 32 | target: 'Bassoon' 33 | samples_per_track: 64 34 | segment_length: 3.0 35 | 36 | 37 | -------------------------------------------------------------------------------- /recipes/cad2/task2/ConvTasNet/utils/prepare_python_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage ./utils/install_env.sh --install_dir A --asteroid_root B --pip_requires C 3 | install_dir=~ 4 | asteroid_root=../../../../ 5 | pip_requires=../../../requirements.txt # Expects a requirement.txt 6 | 7 | . utils/parse_options.sh || exit 1 8 | 9 | mkdir -p $install_dir 10 | cd $install_dir 11 | echo "Download and install latest version of miniconda3 into ${install_dir}" 12 | wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh 13 | 14 | bash Miniconda3-latest-Linux-x86_64.sh -b -p miniconda3 15 | pip_path=$PWD/miniconda3/bin/pip 16 | 17 | rm Miniconda3-latest-Linux-x86_64.sh 18 | cd - 19 | 20 | if [[ ! -z ${pip_requires} ]]; then 21 | $pip_path install -r $pip_requires 22 | fi 23 | $pip_path install soundfile 24 | $pip_path install -e $asteroid_root 25 | #$pip_path install ${asteroid_root}/\[""evaluate""\] 26 | echo -e "\nAsteroid has been installed in editable mode. Feel free to apply your changes !" -------------------------------------------------------------------------------- /recipes/cad2/task2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad2/task2/__init__.py -------------------------------------------------------------------------------- /recipes/cad2/task2/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad2/task2/baseline/__init__.py -------------------------------------------------------------------------------- /recipes/cad2/task2/baseline/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ??? # Set to the root of the dataset 3 | metadata_dir: ${path.root}/metadata 4 | music_dir: ${path.root}/audio 5 | gains_file: ${path.metadata_dir}/gains.json 6 | listeners_file: ${path.metadata_dir}/listeners.valid.json 7 | enhancer_params_file: ${path.metadata_dir}/compressor_params.valid.json 8 | music_file: ${path.metadata_dir}/music.valid.json 9 | scenes_file: ${path.metadata_dir}/scenes.valid.json 10 | scene_listeners_file: ${path.metadata_dir}/scene_listeners.valid.json 11 | exp_folder: ./exp # folder to store enhanced signals and final results 12 | 13 | input_sample_rate: 44100 14 | remix_sample_rate: 32000 15 | HAAQI_sample_rate: 24000 16 | 17 | separator: 18 | force_redownload: True 19 | add_residual: 0.1 20 | causality: noncausal 21 | device: ~ 22 | separation: 23 | number_sources: 2 24 | segment: 6.0 25 | overlap: 0.1 26 | sample_rate: ${input_sample_rate} 27 | 28 | enhancer: 29 | crossover_frequencies: [353.55, 707.11, 1414.21, 2828.43, 5656.85] # [250, 500, 1000, 2000, 4000] * sqrt(2) 30 | attack: [11, 11, 14, 13, 11, 11] 31 | release: [80, 80, 80, 80, 100, 100] 32 | threshold: [-30, -30, -30, -30, -30, -30] 33 | 34 | soft_clip: False 35 | 36 | evaluate: 37 | set_random_seed: True 38 | small_test: False 39 | batch_size: 1 # Number of batches 40 | batch: 0 # Batch number to evaluate 41 | 42 | # hydra config 43 | hydra: 44 | run: 45 | dir: ${path.exp_folder} 46 | job: 47 | chdir: True -------------------------------------------------------------------------------- /recipes/cad2/task2/baseline/merge_batches_results.py: -------------------------------------------------------------------------------- 1 | """Join batches scores into a single file.""" 2 | 3 | # pylint: disable=import-error 4 | import hydra 5 | import pandas as pd 6 | from omegaconf import DictConfig 7 | 8 | 9 | @hydra.main(config_path="", config_name="config") 10 | def join_batches(config: DictConfig) -> None: 11 | """ 12 | Join batches scores into a single file. 13 | 14 | Args: 15 | config (DictConfig): Dictionary of configuration options. 16 | The `.evaluate.batch_size` is extracted to determine how many 17 | batches there are to combine. 18 | 19 | """ 20 | batches_results = [] 21 | for batch in range(config.evaluate.batch_size): 22 | batches_results.append( 23 | pd.read_csv(f"scores_{batch + 1}-{config.evaluate.batch_size}.csv") 24 | ) 25 | df_res = pd.concat(batches_results, ignore_index=True) 26 | df_res.to_csv("scores.csv", index=False) 27 | 28 | 29 | # pylint: disable=no-value-for-parameter 30 | if __name__ == "__main__": 31 | join_batches() 32 | -------------------------------------------------------------------------------- /recipes/cad2/task2/process_dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad2/task2/process_dataset/__init__.py -------------------------------------------------------------------------------- /recipes/cad2/task2/process_dataset/config.yaml: -------------------------------------------------------------------------------- 1 | # Zenodo download path: path to the zenodo download folder. 2 | # root: root path of the dataset. This path will contain the audio and metadata folders 3 | path: 4 | zenodo_download_path: ?? 5 | root: ?? 6 | metadata_dir: ${path.root}/metadata 7 | scene_metadata_file: ${path.metadata_dir}/scenes.train2.json 8 | gain_metadata: ${path.metadata_dir}/gains_meta.json 9 | music_dir: ${path.root}/audio 10 | music_metadata: ${path.metadata_dir}/music.train.json 11 | 12 | seed_scene_generation: Cad2Task2_2024 13 | gains_per_track: 4 14 | listener_per_scene: 2 15 | 16 | # hydra config 17 | hydra: 18 | job: 19 | chdir: True -------------------------------------------------------------------------------- /recipes/cad_icassp_2024/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cad_icassp_2024/__init__.py -------------------------------------------------------------------------------- /recipes/cad_icassp_2024/baseline/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ??? 3 | metadata_dir: ${path.root}/metadata 4 | music_dir: ${path.root}/audio/at_mic_music 5 | gains_file: ${path.metadata_dir}/gains.json 6 | head_positions_file: ${path.metadata_dir}/head_positions.json 7 | listeners_file: ${path.metadata_dir}/listeners.train.json 8 | music_file: ${path.metadata_dir}/at_mic_music.train.json 9 | scenes_file: ${path.metadata_dir}/scenes.train.json 10 | scene_listeners_file: ${path.metadata_dir}/scene_listeners.train.json 11 | exp_folder: ./exp # folder to store enhanced signals and final results 12 | 13 | sample_rate: 44100 14 | remix_sample_rate: 32000 15 | HAAQI_sample_rate: 24000 16 | 17 | nalr: 18 | nfir: 220 19 | sample_rate: ${sample_rate} 20 | 21 | apply_compressor: False 22 | compressor: 23 | threshold: 0.35 24 | attenuation: 0.1 25 | attack: 50 26 | release: 1000 27 | rms_buffer_size: 0.064 28 | 29 | soft_clip: False 30 | 31 | separator: 32 | model: demucs # demucs or openunmix 33 | device: ~ 34 | 35 | evaluate: 36 | set_random_seed: True 37 | small_test: False 38 | batch_size: 1 # Number of batches 39 | batch: 0 # Batch number to evaluate 40 | 41 | # hydra config 42 | hydra: 43 | run: 44 | dir: ${path.exp_folder} 45 | job: 46 | chdir: True -------------------------------------------------------------------------------- /recipes/cad_icassp_2024/baseline/merge_batches_results.py: -------------------------------------------------------------------------------- 1 | """Join batches scores into a single file.""" 2 | 3 | # pylint: disable=import-error 4 | import hydra 5 | import pandas as pd 6 | from omegaconf import DictConfig 7 | 8 | 9 | @hydra.main(config_path="", config_name="config", version_base=None) 10 | def join_batches(config: DictConfig) -> None: 11 | """ 12 | Join batches scores into a single file. 13 | 14 | Args: 15 | config (DictConfig): Dictionary of configuration options. 16 | The `.evaluate.batch_size` is extracted to determine how many 17 | batches there are to combine. 18 | 19 | """ 20 | batches_results = [] 21 | for batch in range(config.evaluate.batch_size): 22 | batches_results.append( 23 | pd.read_csv(f"scores_{batch + 1}-{config.evaluate.batch_size}.csv") 24 | ) 25 | df_res = pd.concat(batches_results, ignore_index=True) 26 | df_res.to_csv("scores.csv", index=False) 27 | 28 | 29 | # pylint: disable=no-value-for-parameter 30 | if __name__ == "__main__": 31 | join_batches() 32 | -------------------------------------------------------------------------------- /recipes/cad_icassp_2024/generate_dataset/README.md: -------------------------------------------------------------------------------- 1 | # Generate music dataset for the ICASSP 2024 Cadenza Challenge 2 | 3 | The ICASSP 2024 Cadenza Challenge music dataset is based on the MUSDB18-HQ dataset. 4 | 5 | Steps: 6 | 7 | 1. Download `cadenza_cad1_task1_core_musdb18hq.tar.gz` and `cadenza_cad1_task1_core_metadata.tar.gz` 8 | packages from the [Cadenza Challenge website](https://cadenza-challenge.github.io/). 9 | 2. Unpack packages under the same root directory. 10 | 3. Run the script 11 | 12 | ## Unpack the data 13 | 14 | To unpack the data run: 15 | 16 | ```bash 17 | tar -xvzf 18 | ``` 19 | 20 | ## Generate the dataset 21 | 22 | To generate the dataset, set the `path.root` parameter in the `generate_dataset/config.yaml` 23 | to where you unpacked the data. Then run: 24 | 25 | ```bash 26 | python generates_at_mic_musdb18.py 27 | ``` 28 | 29 | or, run the script with the `path.root` parameter: 30 | 31 | ```bash 32 | python generates_at_mic_musdb18.py path.root 33 | ``` 34 | 35 | The script will generate the dataset in the `path.root` directory. 36 | 37 | The script should create the `at_mic_microphone` where all music samples 38 | picked up by the microphones (at the mic) are saved. 39 | 40 | In the next example, `A Classic Education - NightOwl-hp_0103` corresponds to the 41 | song `A Classic Education - NightOwl` with the `hp_0103` head position. 42 | 43 | ```text 44 | cadenza_data 45 | ├───audio 46 | | ├───at_mic_music 47 | | | └───train (80.8 GB) 48 | | | ├───A Classic Education - NightOwl-hp_0103 49 | | | | | bass.wav 50 | | | | | drums.wav 51 | | | | | other.wav 52 | | | | | vocals.wav 53 | | | | | mixture.wav 54 | | | | 55 | | | ├───A Classic Education - NightOwl-hp_0138 56 | | | | .... 57 | | | 58 | | ├───hrtf (336 kB) 59 | | | 60 | | └───music 61 | | └───train (20.2 GB) 62 | | 63 | └───metadata (328 kB) 64 | | gains.json 65 | | at_mic_music.train.json 66 | | ... 67 | ``` 68 | -------------------------------------------------------------------------------- /recipes/cad_icassp_2024/generate_dataset/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ??? 3 | metadata_dir: ${path.root}/metadata 4 | music_dir: ${path.root}/audio/music # musdb18 dataset 5 | hrtf_dir: ${path.root}/audio/hrtf 6 | scene_file: ${path.metadata_dir}/scenes.train.json 7 | music_file: ${path.metadata_dir}/musdb18.train.json 8 | head_loudspeaker_positions_file: ${path.metadata_dir}/head_loudspeaker_positions.json 9 | tracklist_file: ./musdb18_tracklist.csv 10 | gains_file: ${path.metadata_dir}/gains.json 11 | output_music_dir: ${path.root}/audio/at_mic_music # at microphone musdb18 dataset 12 | output_music_file: ${path.metadata_dir}/at_mic_music.train.json 13 | 14 | sample_rate: 44100 15 | 16 | scene: 17 | number_scenes_per_song: 4 18 | 19 | scene_listener: 20 | number_listeners_per_scene: 2 21 | 22 | hydra: 23 | run: 24 | dir: . 25 | job: 26 | chdir: True 27 | -------------------------------------------------------------------------------- /recipes/cec1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cec1/__init__.py -------------------------------------------------------------------------------- /recipes/cec1/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cec1/baseline/__init__.py -------------------------------------------------------------------------------- /recipes/cec1/baseline/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ??? 3 | exp_folder: ./results/ # folder to store intermediate signals and final results 4 | scenes_listeners_file: ${path.root}/clarity_CEC1_data/clarity_data/metadata/scenes_listeners.dev.json 5 | listeners_file: ${path.root}/clarity_CEC1_data/clarity_data/metadata/listeners.json 6 | scenes_folder: ${path.root}/clarity_CEC1_data/clarity_data/dev/scenes 7 | enhanced_signals: ${path.exp_folder}/enhanced_signals 8 | 9 | # experimental parameters 10 | num_channels: 3 11 | sample_rate: 44100 12 | ahr: 20 13 | equiv_0db_spl: 100 14 | 15 | GHAHearingAid: # hyperparameters for GHA Hearing Aid, BE CAREFUL if making changes 16 | sample_rate: ${sample_rate} 17 | ahr: ${ahr} 18 | audf: null 19 | cfg_file: prerelease_combination4_smooth 20 | noise_gate_levels: null 21 | noise_gate_slope: 0 22 | cr_level: 0 23 | max_output_level: 100 24 | equiv_0db_spl: ${equiv_0db_spl} 25 | test_nbits: 16 26 | 27 | MSBGEar: # hyperparameters for MSBG ear 28 | src_pos: ff 29 | sample_rate: ${sample_rate} 30 | equiv_0db_spl: ${equiv_0db_spl} 31 | ahr: ${ahr} 32 | 33 | mbstoi: 34 | sample_rate: ${sample_rate} 35 | grid_coarseness: 1 36 | 37 | # disable hydra loggings 38 | defaults: 39 | - override hydra/job_logging: disabled 40 | 41 | hydra: 42 | output_subdir: Null 43 | run: 44 | dir: . 45 | job: 46 | chdir: True 47 | -------------------------------------------------------------------------------- /recipes/cec1/baseline/enhance.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | 4 | import hydra 5 | from omegaconf import DictConfig 6 | from tqdm import tqdm 7 | 8 | from clarity.enhancer.gha.gha_interface import GHAHearingAid 9 | from clarity.utils.audiogram import Listener 10 | 11 | 12 | @hydra.main(config_path=".", config_name="config", version_base=None) 13 | def enhance(cfg: DictConfig) -> None: 14 | enhanced_folder = Path(cfg.path.exp_folder) / "enhanced_signals" 15 | enhanced_folder.mkdir(parents=True, exist_ok=True) 16 | with open(cfg.path.scenes_listeners_file, encoding="utf-8") as fp: 17 | scenes_listeners = json.load(fp) 18 | listener_dict = Listener.load_listener_dict(cfg.path.listeners_file) 19 | enhancer = GHAHearingAid(**cfg["GHAHearingAid"]) 20 | 21 | for scene in tqdm(scenes_listeners): 22 | for listener_id in scenes_listeners[scene]: 23 | listener = listener_dict[listener_id] 24 | 25 | infile_names = [ 26 | f"{cfg.path.scenes_folder}/{scene}_mixed_CH{ch}.wav" 27 | for ch in range(1, cfg["num_channels"] + 1) 28 | ] 29 | 30 | enhancer.process_files( 31 | infile_names=infile_names, 32 | outfile_name=f"{enhanced_folder}/{scene}_{listener_id}_HA-output.wav", 33 | listener=listener, 34 | ) 35 | 36 | 37 | # pylint: disable=no-value-for-parameter 38 | if __name__ == "__main__": 39 | enhance() 40 | -------------------------------------------------------------------------------- /recipes/cec1/baseline/results/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cec1/baseline/results/__init__.py -------------------------------------------------------------------------------- /recipes/cec1/data_preparation/README.md: -------------------------------------------------------------------------------- 1 | # CLARITY CEC1 Data Preparation 2 | 3 | The code here explains how we generated the CEC1 scenes. Please note that 6000 training scenes and 2500 development scenes are included in the data you downloaded. If you are intereted in how we generated the scenes, you could run the scripts to get the idea. 4 | 5 | In `data_config.yaml`, specify `root` folder in which you unpack the data. Run `prepare_cec1_data.py` to generate the train & dev scenes. 6 | -------------------------------------------------------------------------------- /recipes/cec1/data_preparation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cec1/data_preparation/__init__.py -------------------------------------------------------------------------------- /recipes/cec1/data_preparation/data_config.yaml: -------------------------------------------------------------------------------- 1 | root: ??? 2 | input_path: ${root}/clarity_CEC1_data/clarity_data/ 3 | 4 | datasets: 5 | train: 6 | metafile_path: ${root}/clarity_CEC1_data/clarity_data/metadata/scenes.train.json 7 | scene_folder: ${root}/clarity_CEC1_data/clarity_data/train/scenes/ 8 | dev: 9 | metafile_path: ${root}/clarity_CEC1_data/clarity_data/metadata/scenes.dev.json 10 | scene_folder: ${root}/clarity_CEC1_data/clarity_data/dev/scenes/ 11 | 12 | num_channels: 3 13 | 14 | 15 | # disable hydra loggings 16 | defaults: 17 | - override hydra/job_logging: disabled 18 | 19 | hydra: 20 | output_subdir: Null 21 | run: 22 | dir: . 23 | job: 24 | chdir: True -------------------------------------------------------------------------------- /recipes/cec1/data_preparation/prepare_cec1_data.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from pathlib import Path 4 | 5 | import hydra 6 | from omegaconf import DictConfig 7 | from tqdm import tqdm 8 | 9 | from clarity.data.scene_renderer_cec1 import Renderer, check_scene_exists 10 | 11 | 12 | def prepare_data( 13 | root_path: str, metafile_path: str, scene_folder: str, num_channels: int 14 | ): 15 | """ 16 | Generate scene data given dataset (train or dev) 17 | Args: 18 | root_path: Clarity root path 19 | metafile_path: scene metafile path 20 | scene_folder: folder containing generated scenes 21 | num_channels: number of channels 22 | """ 23 | with open(metafile_path, encoding="utf-8") as fp: 24 | scenes = json.load(fp) 25 | 26 | Path(scene_folder).mkdir(parents=True, exist_ok=True) 27 | 28 | renderer = Renderer(input_path=root_path, output_path=scene_folder, num_channels=3) 29 | for scene in tqdm(scenes): 30 | if check_scene_exists(scene, scene_folder, num_channels): 31 | logging.info(f"Skipping processed scene {scene['scene']}.") 32 | else: 33 | renderer.render( 34 | pre_samples=scene["pre_samples"], 35 | post_samples=scene["post_samples"], 36 | dataset=scene["dataset"], 37 | target_id=scene["target"]["name"], 38 | noise_type=scene["interferer"]["type"], 39 | interferer_id=scene["interferer"]["name"], 40 | room=scene["room"]["name"], 41 | scene=scene["scene"], 42 | offset=scene["interferer"]["offset"], 43 | snr_dB=scene["SNR"], 44 | ) 45 | 46 | 47 | @hydra.main(config_path=".", config_name="data_config", version_base=None) 48 | def run(cfg: DictConfig) -> None: 49 | for dataset in cfg["datasets"]: 50 | prepare_data( 51 | cfg.input_path, 52 | cfg["datasets"][dataset]["metafile_path"], 53 | cfg["datasets"][dataset]["scene_folder"], 54 | cfg.num_channels, 55 | ) 56 | 57 | 58 | # pylint: disable=no-value-for-parameter 59 | if __name__ == "__main__": 60 | run() 61 | -------------------------------------------------------------------------------- /recipes/cec1/e009_sheffield/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cec1/e009_sheffield/__init__.py -------------------------------------------------------------------------------- /recipes/cec2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cec2/__init__.py -------------------------------------------------------------------------------- /recipes/cec2/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cec2/baseline/__init__.py -------------------------------------------------------------------------------- /recipes/cec2/baseline/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ??? 3 | metadata_dir: ${path.root}/clarity_data/metadata 4 | scenes_listeners_file: ${path.metadata_dir}/scenes_listeners.dev.json 5 | listeners_file: ${path.metadata_dir}/listeners.json 6 | scenes_folder: ${path.root}/clarity_data/dev/scenes 7 | exp_folder: ./exp # folder to store enhanced signals and final results 8 | 9 | nalr: 10 | nfir: 220 11 | sample_rate: 44100 12 | 13 | compressor: 14 | threshold: 0.35 15 | attenuation: 0.1 16 | attack: 50 17 | release: 1000 18 | rms_buffer_size: 0.064 19 | 20 | soft_clip: True 21 | 22 | evaluate: 23 | cal_unprocessed_si: True 24 | set_random_seed: True 25 | 26 | # hydra config 27 | hydra: 28 | run: 29 | dir: ${path.exp_folder} 30 | job: 31 | chdir: True 32 | -------------------------------------------------------------------------------- /recipes/cec2/baseline/data_generation/build_additional_scenes.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | import hydra 5 | from omegaconf import DictConfig 6 | 7 | from clarity.data.scene_builder_cec2 import RoomBuilder, SceneBuilder, set_random_seed 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | def instantiate_scenes(cfg): 13 | room_builder = RoomBuilder() 14 | set_random_seed(cfg.random_seed) 15 | room_file = Path(cfg.path.metadata_dir) / "rooms.train.json" 16 | for dataset in cfg.scene_datasets: 17 | if not Path(cfg.path.additional_data_file).exists(): 18 | logger.info(f"instantiate scenes for {dataset} set") 19 | room_builder.load(str(room_file)) 20 | scene_builder = SceneBuilder( 21 | rb=room_builder, 22 | scene_datasets=cfg.scene_datasets[dataset], 23 | target=cfg.target, 24 | interferer=cfg.interferer, 25 | snr_range=cfg.snr_range[dataset], 26 | listener=cfg.listener, 27 | shuffle_rooms=cfg.shuffle_rooms, 28 | ) 29 | scene_builder.instantiate_scenes(dataset=dataset) 30 | scene_builder.save_scenes(cfg.path.additional_data_file) 31 | else: 32 | logger.info(f"scenes.{dataset}.json has existed, skip") 33 | 34 | 35 | @hydra.main(config_path=".", config_name="additional_data_config", version_base=None) 36 | def run(cfg: DictConfig) -> None: 37 | logger.info("Instantiating scenes for additional training data") 38 | instantiate_scenes(cfg) 39 | 40 | 41 | # pylint: disable=no-value-for-parameter 42 | if __name__ == "__main__": 43 | run() 44 | -------------------------------------------------------------------------------- /recipes/cec2/baseline/data_generation/hydra/launcher/cec2_submitit_local.yaml: -------------------------------------------------------------------------------- 1 | # Submitit configuration for running data generation stage locally 2 | 3 | defaults: 4 | - submitit_local 5 | 6 | cpus_per_task: 1 7 | tasks_per_node: 2 8 | mem_gb: 4 9 | nodes: 1 10 | -------------------------------------------------------------------------------- /recipes/cec2/baseline/data_generation/hydra/launcher/cec2_submitit_slurm.yaml: -------------------------------------------------------------------------------- 1 | # Submitit configuration for running data generation stage on slurm cluster 2 | 3 | defaults: 4 | - submitit_slurm 5 | 6 | mem_per_cpu: 4GB 7 | tasks_per_node: 1 8 | timeout_min: 180 9 | additional_parameters: 10 | account: clarity 11 | partition: clarity 12 | setup: ['module load Anaconda3/5.3.0', 'source activate clarity', 'export SLURM_EXPORT_ENV=ALL'] -------------------------------------------------------------------------------- /recipes/cec2/baseline/data_generation/render_additional_scenes.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | import hydra 5 | from omegaconf import DictConfig 6 | 7 | from clarity.data.scene_renderer_cec2 import SceneRenderer 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | def render_scenes(cfg): 13 | for dataset in cfg.scene_renderer: 14 | logger.info(f"Beginning scene generation for {dataset} set...") 15 | file_path = cfg.scene_renderer[dataset].metadata.scene_definitions 16 | with open(file_path, encoding="utf-8") as fp: 17 | scenes = json.load(fp) 18 | 19 | starting_scene = ( 20 | cfg.scene_renderer[dataset].chunk_size * cfg.render_starting_chunk 21 | ) 22 | n_scenes = ( 23 | cfg.scene_renderer[dataset].chunk_size * cfg.render_n_chunk_to_process 24 | ) 25 | scenes = scenes[starting_scene : starting_scene + n_scenes] 26 | 27 | scene_renderer = SceneRenderer( 28 | cfg.scene_renderer[dataset].paths, 29 | cfg.scene_renderer[dataset].metadata, 30 | **cfg.render_params, 31 | ) 32 | scene_renderer.render_scenes(scenes) 33 | 34 | 35 | @hydra.main(config_path=".", config_name="additional_data_config", version_base=None) 36 | def run(cfg: DictConfig) -> None: 37 | logger.info("Rendering scenes") 38 | render_scenes(cfg) 39 | 40 | 41 | # pylint: disable=no-value-for-parameter 42 | if __name__ == "__main__": 43 | run() 44 | -------------------------------------------------------------------------------- /recipes/cec2/baseline/data_generation/render_additional_scenes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=clarity 3 | #SBATCH --partition=clarity 4 | 5 | module load Anaconda3/5.3.0 6 | module load CUDA/10.2.89-GCC-8.3.0 7 | source activate clarity 8 | 9 | srun --export=ALL python render_additional_scenes.py 'render_starting_chunk=range(0, 1000, 200)' --multirun 10 | -------------------------------------------------------------------------------- /recipes/cec2/baseline/exp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cec2/baseline/exp/__init__.py -------------------------------------------------------------------------------- /recipes/cec2/data_preparation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cec2/data_preparation/__init__.py -------------------------------------------------------------------------------- /recipes/cec2/data_preparation/build_scenes.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | import hydra 5 | from omegaconf import DictConfig 6 | 7 | from clarity.data.scene_builder_cec2 import RoomBuilder, SceneBuilder, set_random_seed 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | def build_rooms_from_rpf(cfg): 13 | room_builder = RoomBuilder() 14 | for dataset in cfg.room_datasets: 15 | room_file = Path(cfg.path.metadata_dir) / f"rooms.{dataset}.json" 16 | if not room_file.exists(): 17 | room_builder.build_from_rpf(**cfg.room_datasets[dataset]) 18 | room_builder.save_rooms(str(room_file)) 19 | else: 20 | logger.info(f"rooms.{dataset}.json exists, skip") 21 | 22 | 23 | def instantiate_scenes(cfg): 24 | room_builder = RoomBuilder() 25 | set_random_seed(cfg.random_seed) 26 | for dataset in cfg.scene_datasets: 27 | scene_file = Path(cfg.path.metadata_dir) / f"scenes.{dataset}.json" 28 | if not scene_file.exists(): 29 | logger.info(f"instantiate scenes for {dataset} set") 30 | room_file = Path(cfg.path.metadata_dir) / f"rooms.{dataset}.json" 31 | room_builder.load(str(room_file)) 32 | scene_builder = SceneBuilder( 33 | rb=room_builder, 34 | scene_datasets=cfg.scene_datasets[dataset], 35 | target=cfg.target, 36 | interferer=cfg.interferer, 37 | snr_range=cfg.snr_range[dataset], 38 | listener=cfg.listener, 39 | shuffle_rooms=cfg.shuffle_rooms, 40 | ) 41 | scene_builder.instantiate_scenes(dataset=dataset) 42 | scene_builder.save_scenes(str(scene_file)) 43 | else: 44 | logger.info(f"scenes.{dataset}.json exists, skip") 45 | 46 | 47 | @hydra.main(config_path=".", config_name="config", version_base=None) 48 | def run(cfg: DictConfig) -> None: 49 | logger.info("Building rooms") 50 | build_rooms_from_rpf(cfg) 51 | logger.info("Instantiating scenes") 52 | instantiate_scenes(cfg) 53 | 54 | 55 | # pylint: disable=no-value-for-parameter 56 | if __name__ == "__main__": 57 | run() 58 | -------------------------------------------------------------------------------- /recipes/cec2/data_preparation/hydra/launcher/cec2_submitit_local.yaml: -------------------------------------------------------------------------------- 1 | # Submitit configuration for running data preparation stage locally 2 | 3 | defaults: 4 | - submitit_local 5 | 6 | cpus_per_task: 1 7 | tasks_per_node: 2 8 | mem_gb: 4 9 | nodes: 1 10 | -------------------------------------------------------------------------------- /recipes/cec2/data_preparation/hydra/launcher/cec2_submitit_slurm.yaml: -------------------------------------------------------------------------------- 1 | # Submitit configuration for running data preparation stage on slurm cluster 2 | 3 | defaults: 4 | - submitit_slurm 5 | 6 | mem_per_cpu: 4GB 7 | tasks_per_node: 1 8 | timeout_min: 180 9 | additional_parameters: 10 | account: clarity 11 | partition: clarity 12 | setup: ['module load Anaconda3/5.3.0', 'source activate clarity', 'export SLURM_EXPORT_ENV=ALL'] -------------------------------------------------------------------------------- /recipes/cec2/data_preparation/render_scenes.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | import hydra 5 | from omegaconf import DictConfig 6 | 7 | from clarity.data.scene_renderer_cec2 import SceneRenderer 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | def render_scenes(cfg): 13 | for dataset in cfg.scene_renderer: 14 | logger.info(f"Beginning scene generation for {dataset} set...") 15 | with open( 16 | cfg.scene_renderer[dataset].metadata.scene_definitions, 17 | encoding="utf-8", 18 | ) as fp: 19 | scenes = json.load(fp) 20 | 21 | starting_scene = ( 22 | cfg.scene_renderer[dataset].chunk_size * cfg.render_starting_chunk 23 | ) 24 | n_scenes = ( 25 | cfg.scene_renderer[dataset].chunk_size * cfg.render_n_chunk_to_process 26 | ) 27 | scenes = scenes[starting_scene : starting_scene + n_scenes] 28 | 29 | scene_renderer = SceneRenderer( 30 | cfg.scene_renderer[dataset].paths, 31 | cfg.scene_renderer[dataset].metadata, 32 | **cfg.render_params, 33 | ) 34 | scene_renderer.render_scenes(scenes) 35 | 36 | 37 | @hydra.main(config_path=".", config_name="config", version_base=None) 38 | def run(cfg: DictConfig) -> None: 39 | logger.info("Rendering scenes") 40 | render_scenes(cfg) 41 | 42 | 43 | # pylint: disable=no-value-for-parameter 44 | if __name__ == "__main__": 45 | run() 46 | -------------------------------------------------------------------------------- /recipes/cec2/data_preparation/render_scenes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=clarity 3 | #SBATCH --partition=clarity 4 | 5 | module load Anaconda3/5.3.0 6 | module load CUDA/10.2.89-GCC-8.3.0 7 | source activate clarity 8 | 9 | srun --export=ALL python render_scenes.py 'render_starting_chunk=range(0, 500, 10)' --multirun -------------------------------------------------------------------------------- /recipes/cec3/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cec3/__init__.py -------------------------------------------------------------------------------- /recipes/cec3/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cec3/baseline/__init__.py -------------------------------------------------------------------------------- /recipes/cec3/baseline/config.yaml: -------------------------------------------------------------------------------- 1 | task: ??? # This can be set to 'task1', 'task2' or 'task3' 2 | 3 | path: 4 | root: ??? # root folder for clarity data 5 | exp: ??? # folder to store enhanced signals and final results 6 | scenes_folder: ${path.root}/${task}/clarity_data/dev/scenes 7 | metadata_dir: ${path.root}/${task}/clarity_data/metadata 8 | scenes_listeners_file: ${path.metadata_dir}/scenes_listeners.dev.json 9 | listeners_file: ${path.metadata_dir}/listeners.json 10 | scenes_file: ${path.metadata_dir}/scenes.dev.json 11 | 12 | nalr: 13 | nfir: 220 14 | sample_rate: 48000 15 | 16 | compressor: 17 | threshold: 0.35 18 | attenuation: 0.1 19 | attack: 50 20 | release: 1000 21 | rms_buffer_size: 0.064 22 | 23 | soft_clip: True 24 | 25 | evaluate: 26 | set_random_seed: True 27 | small_test: False 28 | first_scene: 0 29 | n_scenes: 0 30 | 31 | # hydra config 32 | hydra: 33 | run: 34 | dir: ${path.exp} 35 | sweep: 36 | dir: ${path.exp}/multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} 37 | subdir: ${hydra.job.num} 38 | 39 | defaults: 40 | - override hydra/launcher: cec3_submitit_local 41 | -------------------------------------------------------------------------------- /recipes/cec3/baseline/hydra/launcher/cec3_submitit_local.yaml: -------------------------------------------------------------------------------- 1 | # Submitit configuration for running data preparation stage locally 2 | 3 | defaults: 4 | - submitit_local 5 | 6 | cpus_per_task: 1 7 | tasks_per_node: 2 8 | mem_gb: 4 9 | nodes: 1 10 | -------------------------------------------------------------------------------- /recipes/cec3/baseline/hydra/launcher/cec3_submitit_slurm.yaml: -------------------------------------------------------------------------------- 1 | # Submitit configuration for running data preparation stage on slurm cluster 2 | # (Example configuration. Edit to fit your HPC set-up.) 3 | 4 | defaults: 5 | - submitit_slurm 6 | 7 | mem_per_cpu: 4GB 8 | tasks_per_node: 1 9 | timeout_min: 180 10 | additional_parameters: 11 | account: clarity 12 | partition: clarity 13 | setup: 14 | [ 15 | "module load Anaconda3/5.3.0 libsndfile/1.0.28-GCCcore-9.3.0", 16 | "source activate clarity", 17 | "export SLURM_EXPORT_ENV=ALL", 18 | ] 19 | -------------------------------------------------------------------------------- /recipes/cpc1/README.md: -------------------------------------------------------------------------------- 1 | # The 1st Clarity Prediction Challenge (CPC1) 2 | 3 | Clarity challenge code for the 1st prediction challenge (CPC1). 4 | 5 | There are two traks in CPC1: 6 | 7 | - **closed-set**: the evaluation systems and listeners are covered in the training set, the signals and listener repsonses are provieded in `CPC1.train.json` 8 | - **open-set**: the evaluation systems and listeners are not seen in the training set, the signals and listener responses are provieded in `CPC1_indep.train.json` 9 | 10 | For more information about the CPC1 please visit [claritychallenge.org/docs/cpc1/cpc1_intro](https://claritychallenge.org/docs/cpc1/cpc1_intro). 11 | 12 | ## Data structure 13 | 14 | To download the CPC1 data, please visit [here](https://mab.to/R6H84YNf74p5U). 15 | 16 | **clarity_CPC1_data.v1_1** contains the training data: 17 | 18 | ```text 19 | clarity_data 20 | | 21 | └───HA_outputs 22 | | | 23 | | └───train 3.8G 24 | | | 25 | | └───train_indep 2.8G 26 | | 27 | └───scenes 12.1G 28 | 29 | metadata 30 | |CPC1.train.json 31 | |CPC1.train_indep.json 32 | |listener_data.CPC1_train.xlsx 33 | listeners.CPC1_train.json 34 | |scenes.CPC1_train.json 35 | ``` 36 | 37 | **clarity_CPC1_data.test.v1** follows the same structure as **clarity_CPC1_data.v1_1**, except that the listener responses (i.e. test labels) are not included. The test listener responses are in the `test_listener_responses`. 38 | 39 | ## Baseline 40 | 41 | The baseline folder provides the code of the Cambridge Auditory Group MSBG hearing loss model and MBSTOI, see [CEC1](../cec1/baseline). Run `run.py` to generate the predicted intelligibility, and then run `compute_scores.py` to apply logistic fitting and compute the evaluation scores, including RMSE, normalised cross-correlation, Kendall's Tau coefficient. 42 | 43 | ## Citing CPC1 44 | 45 | ```text 46 | @inproceedings{barker2022the, 47 | title={The 1st Clarity Prediction Challenge: A machine learning challenge for hearing aid intelligibility prediction}, 48 | author={Jon Barker, Michael Akeroyd, Trevor J. Cox, John F. Culling, Jennifer Firth, Simone Graetzer, Holly Griffiths, Lara Harris, Graham Naylor, Zuzanna Podwinska, Eszter Porter and Rhoddy Viveros Munoz}, 49 | year={2022} 50 | } 51 | 52 | ``` 53 | -------------------------------------------------------------------------------- /recipes/cpc1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cpc1/__init__.py -------------------------------------------------------------------------------- /recipes/cpc1/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cpc1/baseline/__init__.py -------------------------------------------------------------------------------- /recipes/cpc1/baseline/results.json: -------------------------------------------------------------------------------- 1 | {"closed_set scores:": {"RMSE": 28.523881198335246, "Std": 0.5796351115705066, "NCC": 0.6208212304051433, "KT": 0.3976092440566517}, "open_set scores:": {"RMSE": 36.512151185224724, "Std": 1.344932258036841, "NCC": 0.5290826426953844, "KT": 0.39116377202829106}} -------------------------------------------------------------------------------- /recipes/cpc1/e029_sheffield/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cpc1/e029_sheffield/__init__.py -------------------------------------------------------------------------------- /recipes/cpc1/e029_sheffield/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ??? 3 | cpc1_train_data: ${path.root}/clarity_CPC1_data_train/ 4 | cpc1_test_data: ${path.root}/clarity_CPC1_data_test/ 5 | exp_folder: ${path.root}/e029 6 | 7 | cpc1_track: closed # "closed" or "open" 8 | dev_percent: 0.3 # amount of scenes for dev set 9 | 10 | MSBGEar: # hyperparameters for MSBG ear 11 | src_pos: ff 12 | sample_rate: 44100 13 | equiv_0db_spl: 100 14 | ahr: 20 15 | 16 | asr_config: transformer_cpc1.yaml 17 | 18 | hydra: 19 | output_subdir: Null 20 | run: 21 | dir: . 22 | job: 23 | chdir: True 24 | -------------------------------------------------------------------------------- /recipes/cpc1/e032_sheffield/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cpc1/e032_sheffield/__init__.py -------------------------------------------------------------------------------- /recipes/cpc1/e032_sheffield/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ??? 3 | cpc1_train_data: ${path.root}/clarity_CPC1_data_train/ 4 | cpc1_test_data: ${path.root}/clarity_CPC1_data_test/ 5 | exp_folder: ${path.root}/e032 6 | 7 | cpc1_track: closed # "closed" or "open" 8 | dev_percent: 0.3 # amount of scenes for dev set 9 | 10 | MSBGEar: # hyperparameters for MSBG ear 11 | src_pos: ff 12 | sample_rate: 44100 13 | equiv_0db_spl: 100 14 | ahr: 20 15 | 16 | asr_config: transformer_cpc1.yaml 17 | 18 | hydra: 19 | output_subdir: Null 20 | run: 21 | dir: . 22 | job: 23 | chdir: True 24 | -------------------------------------------------------------------------------- /recipes/cpc1/test_listener_responses/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cpc1/test_listener_responses/__init__.py -------------------------------------------------------------------------------- /recipes/cpc2/README.md: -------------------------------------------------------------------------------- 1 | # The 2nd Clarity Prediction Challenge (CPC2) 2 | 3 | Clarity challenge code for the 2nd Clarity Prediction Challenge (CPC2). 4 | 5 | For more information about the CPC2 please [visit](https://claritychallenge.github.io/) 6 | 7 | The directory contains [baseline code](baseline/README.md) and will be updated with any submitted solutions. 8 | -------------------------------------------------------------------------------- /recipes/cpc2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cpc2/__init__.py -------------------------------------------------------------------------------- /recipes/cpc2/baseline/.gitignore: -------------------------------------------------------------------------------- 1 | clarity_CPC2_data_demo 2 | -------------------------------------------------------------------------------- /recipes/cpc2/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cpc2/baseline/__init__.py -------------------------------------------------------------------------------- /recipes/cpc2/baseline/config.yaml: -------------------------------------------------------------------------------- 1 | dataset: ??? 2 | 3 | path: 4 | clarity_data_dir: ../clarity_CPC2_data_demo 5 | metadata_dir: ${path.clarity_data_dir}/clarity_data/metadata 6 | signal_dir: ${path.clarity_data_dir}/clarity_data/HA_outputs/signals/CEC2 7 | scene_dir: ${path.clarity_data_dir}/clarity_data/scenes/CEC2 8 | 9 | compute_haspi: 10 | batch: 1 11 | n_batches: 1 12 | set_random_seed: True 13 | 14 | hydra: 15 | run: 16 | dir: exp 17 | job: 18 | chdir: True 19 | -------------------------------------------------------------------------------- /recipes/cpc3/README.md: -------------------------------------------------------------------------------- 1 | # The 3rd Clarity Prediction Challenge (C3C2) 2 | 3 | Clarity challenge code for the 3rd Clarity Prediction Challenge (CPC3). 4 | 5 | For more information about the CPC3 please [visit](https://claritychallenge.github.io/) 6 | 7 | The directory contains [baseline code](baseline/README.md) and will be updated with any submitted solutions. 8 | -------------------------------------------------------------------------------- /recipes/cpc3/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cpc3/__init__.py -------------------------------------------------------------------------------- /recipes/cpc3/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/cpc3/baseline/__init__.py -------------------------------------------------------------------------------- /recipes/cpc3/baseline/config.yaml: -------------------------------------------------------------------------------- 1 | clarity_data_root: ../clarity_CPC3_data # relative to the exp/ directory 2 | dataset: clarity_demo_data # For testing - use 'clarity_data' for full dataset 3 | split: train # When using the training set 4 | 5 | compute_haspi: 6 | batch: 1 7 | n_batches: 1 8 | set_random_seed: True 9 | 10 | hydra: 11 | run: 12 | dir: exp 13 | job: 14 | chdir: True 15 | -------------------------------------------------------------------------------- /recipes/cpc3/baseline/predict_dev.py: -------------------------------------------------------------------------------- 1 | """Make intelligibility predictions from HASPI scores.""" 2 | 3 | from __future__ import annotations 4 | 5 | import logging 6 | 7 | import hydra 8 | from omegaconf import DictConfig 9 | 10 | from recipes.cpc3.baseline.shared_predict_utils import ( 11 | LogisticModel, 12 | load_dataset_with_haspi, 13 | ) 14 | 15 | log = logging.getLogger(__name__) 16 | 17 | 18 | # pylint: disable = no-value-for-parameter 19 | @hydra.main(config_path=".", config_name="config", version_base=None) 20 | def predict_dev(cfg: DictConfig): 21 | """Predict intelligibility from HASPI scores.""" 22 | 23 | # Load the data 24 | log.info("Loading dataset...") 25 | records_train_df = load_dataset_with_haspi(cfg, "train") 26 | records_dev_df = load_dataset_with_haspi(cfg, "dev") 27 | 28 | # Compute the logistic fit 29 | log.info("Making the fitting model...") 30 | model = LogisticModel() 31 | model.fit(records_train_df.haspi_score, records_train_df.correctness) 32 | 33 | # Make predictions for all items in the dev data 34 | log.info("Starting predictions...") 35 | records_dev_df["predicted_correctness"] = model.predict(records_dev_df.haspi_score) 36 | 37 | # Save results to CSV file 38 | 39 | output_file = f"{cfg.dataset}.dev.predict.csv" 40 | records_dev_df[["signal", "predicted_correctness"]].to_csv( 41 | output_file, 42 | index=False, 43 | header=["signal_ID", "intelligibility_score"], 44 | mode="w", 45 | ) 46 | log.info(f"Predictions saved to {output_file}") 47 | 48 | 49 | if __name__ == "__main__": 50 | predict_dev() 51 | -------------------------------------------------------------------------------- /recipes/icassp_2023/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/icassp_2023/__init__.py -------------------------------------------------------------------------------- /recipes/icassp_2023/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/recipes/icassp_2023/baseline/__init__.py -------------------------------------------------------------------------------- /recipes/icassp_2023/baseline/config.yaml: -------------------------------------------------------------------------------- 1 | path: 2 | root: ./ # root folder of the project 3 | metadata_dir: ${path.root}/clarity_data/metadata 4 | scenes_listeners_file: ${path.metadata_dir}/scenes_listeners.dev.json 5 | listeners_file: ${path.metadata_dir}/listeners.json 6 | scenes_folder: ${path.root}/clarity_data/dev/scenes 7 | exp_folder: ./exp # folder to store enhanced signals and final results 8 | 9 | nalr: 10 | nfir: 220 11 | sample_rate: 44100 12 | 13 | compressor: 14 | threshold: 0.35 15 | attenuation: 0.1 16 | attack: 50 17 | release: 1000 18 | rms_buffer_size: 0.064 19 | 20 | soft_clip: True 21 | 22 | evaluate: 23 | set_random_seed: True 24 | small_test: False 25 | 26 | # hydra config 27 | hydra: 28 | run: 29 | dir: ${path.exp_folder} 30 | job: 31 | chdir: True 32 | -------------------------------------------------------------------------------- /recipes/icassp_2023/baseline/enhance.py: -------------------------------------------------------------------------------- 1 | """Run the dummy enhancement.""" 2 | 3 | import json 4 | import logging 5 | import pathlib 6 | 7 | import hydra 8 | import numpy as np 9 | from omegaconf import DictConfig 10 | from scipy.io import wavfile 11 | from tqdm import tqdm 12 | 13 | from clarity.utils.audiogram import Listener 14 | from recipes.icassp_2023.baseline.evaluate import make_scene_listener_list 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | @hydra.main(config_path=".", config_name="config", version_base=None) 20 | def enhance(cfg: DictConfig) -> None: 21 | """Run the dummy enhancement.""" 22 | 23 | enhanced_folder = pathlib.Path("enhanced_signals") 24 | enhanced_folder.mkdir(parents=True, exist_ok=True) 25 | 26 | with open(cfg.path.scenes_listeners_file, encoding="utf-8") as fp: 27 | scenes_listeners = json.load(fp) 28 | 29 | listener_dict = Listener.load_listener_dict(cfg.path.listeners_file) 30 | 31 | # Make list of all scene listener pairs that will be run 32 | scene_listener_pairs = make_scene_listener_list( 33 | scenes_listeners, cfg.evaluate.small_test 34 | ) 35 | 36 | for scene, listener_id in tqdm(scene_listener_pairs): 37 | sample_rate, signal = wavfile.read( 38 | pathlib.Path(cfg.path.scenes_folder) / f"{scene}_mix_CH1.wav" 39 | ) 40 | 41 | # Convert to 32-bit floating point scaled between -1 and 1 42 | signal = (signal / 32768.0).astype(np.float32) 43 | 44 | # pylint: disable=unused-variable 45 | listener = listener_dict[listener_id] # noqa: F841 46 | 47 | # Note: The audiograms are stored in the listener object, 48 | # but they are not needed for the baseline 49 | 50 | # Baseline just reads the signal from the front microphone pair 51 | # and write it out as the enhanced signal 52 | 53 | wavfile.write( 54 | enhanced_folder / f"{scene}_{listener_id}_enhanced.wav", sample_rate, signal 55 | ) 56 | 57 | 58 | # pylint: disable=no-value-for-parameter 59 | if __name__ == "__main__": 60 | enhance() 61 | -------------------------------------------------------------------------------- /recipes/icassp_2023/baseline/report_score.py: -------------------------------------------------------------------------------- 1 | """Run the dummy enhancement.""" 2 | 3 | import json 4 | import logging 5 | 6 | import hydra 7 | import pandas as pd 8 | from omegaconf import DictConfig 9 | 10 | from recipes.icassp_2023.baseline.evaluate import make_scene_listener_list 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | @hydra.main(config_path=".", config_name="config", version_base=None) 16 | def report_score(cfg: DictConfig) -> None: 17 | """Run the dummy enhancement.""" 18 | 19 | with open(cfg.path.scenes_listeners_file, encoding="utf-8") as fp: 20 | scenes_listeners = json.load(fp) 21 | 22 | results_df = pd.read_csv("scores.csv") 23 | 24 | # Make list of all scene listener pairs that are expected in results file 25 | scene_listener_pairs = make_scene_listener_list( 26 | scenes_listeners, cfg.evaluate.small_test 27 | ) 28 | selection_df = pd.DataFrame(scene_listener_pairs, columns=["scene", "listener"]) 29 | 30 | # Select the expected scene listener pairs from the results file 31 | selected_results_df = pd.merge(results_df, selection_df, on=["scene", "listener"]) 32 | 33 | if len(selected_results_df) != len(selection_df): 34 | print("The following results were not found:") 35 | difference = pd.concat( 36 | [selected_results_df[["scene", "listener"]], selection_df] 37 | ).drop_duplicates(keep=False) 38 | print(difference) 39 | else: 40 | # All expected results were found so report the mean score 41 | print(f"Scores based on {len(selected_results_df)} scenes.") 42 | print(selected_results_df[["haspi", "hasqi", "combined"]].mean(axis=0)) 43 | 44 | 45 | # pylint: disable=no-value-for-parameter 46 | if __name__ == "__main__": 47 | report_score() 48 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """Fixtures for testing.""" 2 | 3 | from __future__ import annotations 4 | 5 | from pathlib import Path 6 | 7 | import numpy as np 8 | import pytest 9 | 10 | BASE_DIR = Path.cwd() 11 | RESOURCES = BASE_DIR / "tests" / "resources" 12 | 13 | SEED = 564687 14 | rng = np.random.default_rng(SEED) 15 | 16 | 17 | @pytest.fixture 18 | def make_random_matrix(): 19 | """Generate a random matrix for use in tests. 20 | 21 | The fixture returns a function that can be called to generate a random matrix 22 | >>> def my_test(random_matrix): 23 | >>> matrix = random_matrix(seed=1234) 24 | or use the global seed 25 | >>> matrix = random_matrix() 26 | """ 27 | 28 | def _random_matrix(seed: int | None = None, size=(100, 100)) -> np.ndarray: 29 | if seed is not None: 30 | # Seed is supplied so use a generator with that seed... 31 | rng_to_use = np.random.default_rng(seed) 32 | else: 33 | # ... else use the global generator 34 | rng_to_use = rng 35 | return np.asarray(rng_to_use.random(size)) 36 | 37 | return _random_matrix 38 | 39 | 40 | @pytest.fixture 41 | def abs_tolerance(): 42 | """Fixture for absolute tolerance value.""" 43 | return 1e-7 44 | 45 | 46 | @pytest.fixture 47 | def rel_tolerance(): 48 | """Fixture for relative tolerance value.""" 49 | return 1e-7 50 | 51 | 52 | def pytest_configure() -> None: 53 | """Configure custom variables for pytest. 54 | 55 | **NB**: pytest automatically calls this hook when the conftest is loaded. 56 | """ 57 | pytest.abs_tolerance = 1e-7 58 | pytest.rel_tolerance = 1e-7 59 | -------------------------------------------------------------------------------- /tests/enhancer/dnn/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/enhancer/dnn/__init__.py -------------------------------------------------------------------------------- /tests/enhancer/dnn/test_mc_conv_tasnet.py: -------------------------------------------------------------------------------- 1 | """Tests for enhancer.dnn.test_mc_conv_tasnet module""" 2 | -------------------------------------------------------------------------------- /tests/enhancer/dsp/test_filter.py: -------------------------------------------------------------------------------- 1 | """Tests for enhancer.dsp.filter module""" 2 | 3 | import pytest 4 | import torch 5 | 6 | from clarity.enhancer.dsp.filter import AudiometricFIR 7 | 8 | SAMPLE_RATE = 44100 # Sample rate of the signal 9 | NFIR = 220 # Number of FIR filter taps to use in tests 10 | 11 | 12 | @pytest.fixture(autouse=True) 13 | def use_torch(): 14 | """Fixture to ensure torch is used with a consistent seed and settings""" 15 | torch.manual_seed(0) 16 | torch.set_num_threads(1) 17 | torch.set_default_tensor_type(torch.FloatTensor) 18 | torch.set_printoptions(precision=10) 19 | 20 | 21 | def test_audiometric_filter_init(): 22 | """test construction of audiomatric filter""" 23 | fir_filter = AudiometricFIR(sample_rate=SAMPLE_RATE, nfir=NFIR) 24 | assert fir_filter.padding == NFIR // 2 25 | assert fir_filter.window_size == NFIR + 1 26 | 27 | 28 | def test_audiometric_filter_forward(): 29 | """test that the filter can be applied""" 30 | fir_filter = AudiometricFIR(sample_rate=SAMPLE_RATE, nfir=NFIR, device="cpu") 31 | audio = torch.randn(1, 1, 4410) 32 | filtered_audio = fir_filter(audio) 33 | assert filtered_audio.shape == audio.shape 34 | assert filtered_audio.cpu().detach().numpy().sum() == pytest.approx( 35 | -60.199371337890625, abs=1e-4 # <- had to relax this tolerance 36 | ) 37 | 38 | 39 | def test_audiometric_filter_forward_error(): 40 | """test that the filter throws error with invalid signal shapes""" 41 | fir_filter = AudiometricFIR(sample_rate=SAMPLE_RATE, nfir=NFIR) 42 | with pytest.raises(RuntimeError): 43 | fir_filter(torch.randn(2, 44100)) 44 | with pytest.raises(RuntimeError): 45 | fir_filter(torch.randn(1, 2, 44100)) 46 | -------------------------------------------------------------------------------- /tests/enhancer/gha/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/enhancer/gha/__init__.py -------------------------------------------------------------------------------- /tests/enhancer/multiband_compressor/test_compressor_qmul.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from clarity.enhancer.multiband_compressor.compressor_qmul import Compressor 5 | 6 | 7 | @pytest.fixture 8 | def default_compressor(): 9 | """Return a Compressor object with default parameters.""" 10 | return Compressor() 11 | 12 | 13 | @pytest.fixture 14 | def custom_compressor(): 15 | """Return a Compressor object with custom parameters.""" 16 | return Compressor( 17 | threshold=-30.0, 18 | ratio=4.0, 19 | attack=10.0, 20 | release=100.0, 21 | makeup_gain=1.25, 22 | sample_rate=44100.0, 23 | knee_width=10.0, 24 | ) 25 | 26 | 27 | @pytest.fixture 28 | def random_signal(): 29 | """Generate a random signal for testing.""" 30 | return np.random.randn(1, 1000) 31 | 32 | 33 | def test_initialization_default(default_compressor): 34 | """Test the initialization of the Compressor class.""" 35 | assert default_compressor.threshold == 0.0 36 | assert default_compressor.ratio == 1.0 37 | assert default_compressor.attack == 15.0 38 | assert default_compressor.release == 100.0 39 | assert default_compressor.makeup_gain == 0.0 40 | assert default_compressor.sample_rate == 44100.0 41 | assert default_compressor.knee_width == 0.0 42 | 43 | 44 | def test_initialization_custom(custom_compressor): 45 | """Test the initialization of the Compressor class with custom parameters.""" 46 | assert custom_compressor.threshold == -30.0 47 | assert custom_compressor.ratio == 4.0 48 | assert custom_compressor.attack == 10.0 49 | assert custom_compressor.release == 100.0 50 | assert custom_compressor.makeup_gain == 1.25 51 | assert custom_compressor.sample_rate == 44100.0 52 | assert custom_compressor.knee_width == 10.0 53 | 54 | 55 | def test_call_default(default_compressor, random_signal): 56 | """Test the call method of the Compressor class with default parameters.""" 57 | processed_signal = default_compressor(random_signal) 58 | assert processed_signal.shape == random_signal.shape 59 | 60 | 61 | def test_call_custom(custom_compressor, random_signal): 62 | """Test the call method of the Compressor class with custom parameters.""" 63 | processed_signal = custom_compressor(random_signal) 64 | assert processed_signal.shape == random_signal.shape 65 | -------------------------------------------------------------------------------- /tests/enhancer/test_compressor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from clarity.enhancer.compressor import Compressor 5 | 6 | DEFAULT_FS = 44100 7 | 8 | 9 | def test_compressor_set_attack(): 10 | """Test that the attack time is set correctly.""" 11 | c = Compressor() 12 | c.set_attack(1000) 13 | 14 | assert c.attack == 1.0 / DEFAULT_FS 15 | 16 | 17 | def test_compressor_set_attack_error(): 18 | """Test that the attack time raises divide by zero error if set to 0""" 19 | c = Compressor() 20 | with pytest.raises(ZeroDivisionError): 21 | c.set_attack(0) 22 | 23 | 24 | def test_compressor_set_release(): 25 | """Test that the release time is set correctly.""" 26 | c = Compressor() 27 | c.set_release(1000) 28 | 29 | assert c.release == 1.0 / DEFAULT_FS 30 | 31 | 32 | def test_compressor_set_release_error(): 33 | """Test that the release time raises divide by zero error if set to 0.""" 34 | c = Compressor() 35 | with pytest.raises(ZeroDivisionError): 36 | c.set_release(0) 37 | 38 | 39 | def test_compressor_process(): 40 | c = Compressor() 41 | signal = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) 42 | output, rms, comp_ratios = c.process(signal) 43 | 44 | assert len(output) == len(signal) 45 | assert np.all(rms >= 0.0) 46 | assert np.sum(rms) == pytest.approx( 47 | 0.9799197751960967, rel=pytest.rel_tolerance, abs=pytest.abs_tolerance 48 | ) 49 | assert len(comp_ratios) == len(signal) 50 | -------------------------------------------------------------------------------- /tests/enhancer/test_nalr.py: -------------------------------------------------------------------------------- 1 | """Tests for the enhancer.nalr module.""" 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from clarity.enhancer.nalr import NALR 7 | from clarity.utils.audiogram import Audiogram 8 | 9 | cfg_nalr = {"nfir": 220, "sample_rate": 44100} 10 | 11 | 12 | STANDARD_CFS = np.array([250, 500, 1000, 2000, 4000, 6000]) 13 | 14 | 15 | @pytest.mark.parametrize( 16 | "levels, cfs, expected", 17 | [ 18 | ( 19 | np.array([45, 45, 35, 45, 60, 65]), 20 | np.array([250, 500, 1000, 1500, 4000, 6000]), # <-- non default cfs 21 | 29.808253441926134, # was 29.493833... (New behaviour due to log freq axis) 22 | ), 23 | ( 24 | np.array([45, 45, 35, 45, 60, 65]), 25 | np.array([250, 500, 1000, 2000, 4000, 6000]), 26 | 28.846583644263408, 27 | ), 28 | ], 29 | ) 30 | def test_nalr(levels: np.ndarray, cfs: np.ndarray, expected: float) -> None: 31 | """Test that the NALR filter is built correctly.""" 32 | enhancer = NALR(**cfg_nalr) 33 | audiogram = Audiogram(levels=levels, frequencies=cfs) 34 | nalr_fir, _ = enhancer.build(audiogram) 35 | assert np.sum(np.abs(nalr_fir)) == pytest.approx( 36 | expected, rel=pytest.rel_tolerance, abs=pytest.abs_tolerance 37 | ) 38 | 39 | 40 | def test_nalr_default_cfs() -> None: 41 | """Test that the NALR filter is the same for the default and standard cfs.""" 42 | enhancer = NALR(**cfg_nalr) 43 | nalr_fir1, _ = enhancer.build( 44 | Audiogram(np.array([45, 45, 35, 45, 60, 65]), STANDARD_CFS) 45 | ) 46 | assert nalr_fir1 == pytest.approx( 47 | nalr_fir1, rel=pytest.rel_tolerance, abs=pytest.abs_tolerance 48 | ) 49 | -------------------------------------------------------------------------------- /tests/evaluator/haaqi/test_haaqi.py: -------------------------------------------------------------------------------- 1 | """Tests for haaqi module""" 2 | 3 | # pylint: disable=import-error 4 | import numpy as np 5 | import pytest 6 | 7 | from clarity.evaluator.haaqi import compute_haaqi, haaqi_v1 8 | from clarity.utils.audiogram import Audiogram 9 | 10 | 11 | def test_haaqi_v1() -> None: 12 | """Test for haaqi_v1 index""" 13 | np.random.seed(0) 14 | sample_rate = 16000 15 | x = np.random.uniform(-1, 1, int(sample_rate * 0.5)) # i.e. 500 ms of audio 16 | y = np.random.uniform(-1, 1, int(sample_rate * 0.5)) 17 | 18 | hearing_loss = np.array([45, 45, 35, 45, 60, 65]) 19 | freqs = np.array([250, 500, 1000, 2000, 4000, 6000]) 20 | audiogram = Audiogram(levels=hearing_loss, frequencies=freqs) 21 | 22 | equalisation_mode = 1 23 | level1 = 65 24 | 25 | score, _, _, _ = haaqi_v1( 26 | x, sample_rate, y, sample_rate, audiogram, equalisation_mode, level1 27 | ) 28 | assert score == pytest.approx( 29 | 0.111290948, rel=pytest.rel_tolerance, abs=pytest.abs_tolerance 30 | ) 31 | 32 | 33 | @pytest.mark.parametrize( 34 | "levels,freqs,expected_result", 35 | [ 36 | ( 37 | np.array([10, 20, 30, 40, 50, 60]), 38 | np.array([250, 500, 1000, 2000, 4000, 6000]), 39 | 0.113759275, 40 | ), 41 | ( 42 | np.array([10, 20, 40, 50, 60]), 43 | np.array([250, 500, 2000, 4000, 6000]), # missing cfs, requires interp 44 | 0.113759275, 45 | ), 46 | ], 47 | ) 48 | def test_compute_haaqi(levels, freqs, expected_result): 49 | """Test for compute_haaqi function""" 50 | np.random.seed(42) 51 | 52 | sample_rate = 16000 53 | enh_signal = np.random.uniform(-1, 1, int(sample_rate * 0.5)) 54 | ref_signal = np.random.uniform(-1, 1, int(sample_rate * 0.5)) 55 | 56 | audiogram = Audiogram(levels=levels, frequencies=freqs) 57 | 58 | # Compute HAAQI score 59 | score = compute_haaqi( 60 | processed_signal=enh_signal, 61 | reference_signal=ref_signal, 62 | processed_sample_rate=sample_rate, 63 | reference_sample_rate=sample_rate, 64 | audiogram=audiogram, 65 | ) 66 | 67 | # Check that the score is a float between 0 and 1 68 | assert score == pytest.approx( 69 | expected_result, rel=pytest.rel_tolerance, abs=pytest.abs_tolerance 70 | ) 71 | -------------------------------------------------------------------------------- /tests/evaluator/mbstoi/test_mbstoi.py: -------------------------------------------------------------------------------- 1 | """Tests for mbstoi module""" 2 | 3 | # mbstoi 4 | 5 | import numpy as np 6 | import pytest 7 | 8 | from clarity.evaluator.mbstoi import mbstoi 9 | 10 | 11 | @pytest.mark.parametrize( 12 | "sr_signal, expected_score", 13 | [ 14 | (10000.0, 0.9275623756362125), # At signal at operating sample rate 15 | (9000.0, 0.9487599218003981), # Signal will need resampling 16 | ], 17 | ) 18 | def test_mbstoi(sr_signal, expected_score) -> None: 19 | """Test for mbstoi function""" 20 | np.random.seed(0) 21 | sig_len = 8000 22 | sample_rate = 10000.0 23 | left_clean = 100 * np.random.random(size=sig_len) 24 | right_clean = left_clean.copy() 25 | right_clean[4:] = right_clean[:-4] 26 | left_noisy = left_clean + 30 * np.random.random(size=sig_len) 27 | right_noisy = right_clean + 30 * np.random.random(size=sig_len) 28 | 29 | mbstoi_val = mbstoi( 30 | left_ear_clean=left_clean, 31 | right_ear_clean=right_clean, 32 | left_ear_noisy=left_noisy, 33 | right_ear_noisy=right_noisy, 34 | sr_signal=sr_signal, # signal sample rate 35 | sample_rate=sample_rate, # operating sample rate 36 | fft_size_in_samples=64, 37 | n_third_octave_bands=5, 38 | centre_freq_first_third_octave_hz=500, 39 | dyn_range=60, 40 | ) 41 | 42 | assert mbstoi_val == pytest.approx( 43 | expected_score, rel=pytest.rel_tolerance, abs=pytest.abs_tolerance 44 | ) 45 | -------------------------------------------------------------------------------- /tests/recipes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cad1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cad1/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cad1/task1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cad1/task1/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cad1/task1/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cad1/task1/baseline/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cad1/task1/baseline/test_merge_batches.py: -------------------------------------------------------------------------------- 1 | """Tests for merge_batches module""" 2 | -------------------------------------------------------------------------------- /tests/recipes/cad1/task2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cad1/task2/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cad1/task2/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cad1/task2/baseline/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cad1/task2/baseline/test_audio_manager.py: -------------------------------------------------------------------------------- 1 | """Test for AudioManager module""" 2 | 3 | from pathlib import Path 4 | 5 | import numpy as np 6 | import pytest 7 | from scipy.io import wavfile 8 | 9 | from recipes.cad1.task2.baseline.audio_manager import AudioManager 10 | 11 | 12 | def test_save_audios(tmp_path): 13 | """Test save_audios method.""" 14 | np.random.seed(42) 15 | # Initialize an audio manager with temporary directory as output audio path 16 | audio_manager = AudioManager(output_audio_path=tmp_path.as_posix()) 17 | 18 | # Create sample audio data 19 | audio_data = np.random.randn(2, 44100) 20 | 21 | # Add audio data to audio manager 22 | audio_manager.add_audios_to_save("test_audio", audio_data) 23 | 24 | # Save audio 25 | audio_manager.save_audios() 26 | 27 | # Check if audio file was saved 28 | audio_file = Path(tmp_path) / "test_audio.wav" 29 | assert audio_file.is_file() 30 | 31 | # Check if audio data was saved correctly 32 | sample_rate, _ = wavfile.read(audio_file) 33 | assert sample_rate == pytest.approx( 34 | audio_manager.sample_rate, rel=pytest.rel_tolerance, abs=pytest.abs_tolerance 35 | ) 36 | 37 | 38 | @pytest.mark.skip(reason="Not implemented yet") 39 | def test_clip_audio(): 40 | """Test clip_audio method.""" 41 | 42 | 43 | @pytest.mark.skip(reason="Not implemented yet") 44 | def test_get_lufs_level(): 45 | """Test get_lufs_level method.""" 46 | 47 | 48 | @pytest.mark.skip(reason="Not implemented yet") 49 | def test_scale_to_lufs(): 50 | """Test scale_to_lufs method.""" 51 | -------------------------------------------------------------------------------- /tests/recipes/cad1/task2/baseline/test_baseline_utils.py: -------------------------------------------------------------------------------- 1 | """Test for baseline_utils.py""" 2 | 3 | from pathlib import Path 4 | 5 | # pylint: disable=import-error 6 | import librosa 7 | import numpy as np 8 | import pytest 9 | from omegaconf import OmegaConf 10 | 11 | from recipes.cad1.task2.baseline.baseline_utils import ( 12 | load_listeners_and_scenes, 13 | read_mp3, 14 | ) 15 | 16 | # load_hrtf,; make_scene_listener_list, 17 | 18 | BASE_DIR = Path.cwd() 19 | RESOURCES = BASE_DIR / "tests" / "resources" / "recipes" / "cad1" / "task2" 20 | 21 | 22 | def test_read_mp3(): 23 | """Test read_mp3()""" 24 | signal, sample_rate = read_mp3(librosa.example("brahms")) 25 | assert isinstance(signal, np.ndarray) 26 | assert isinstance(sample_rate, int) 27 | 28 | 29 | @pytest.mark.skip(reason="Not implemented yet") 30 | def test_load_hrtf(): 31 | """Test for load_hrtf function""" 32 | 33 | 34 | def test_load_listeners_and_scenes(): 35 | """Test load_listeners_and_scenes()""" 36 | config = OmegaConf.create( 37 | { 38 | "path": { 39 | "scenes_file": (RESOURCES / "scenes.json").as_posix(), 40 | "listeners_file": (RESOURCES / "listeners.json").as_posix(), 41 | "scenes_listeners_file": ( 42 | RESOURCES / "scenes_listeners.json" 43 | ).as_posix(), 44 | }, 45 | "evaluate": {"split": "train"}, 46 | } 47 | ) 48 | scenes, listener_audiograms, scene_listeners = load_listeners_and_scenes(config) 49 | assert isinstance(scenes, dict) 50 | assert isinstance(listener_audiograms, dict) 51 | assert isinstance(scene_listeners, dict) 52 | 53 | 54 | @pytest.mark.skip(reason="Not implemented yet") 55 | def test_make_scene_listener_list(): 56 | """Test make_scene_listener_list()""" 57 | -------------------------------------------------------------------------------- /tests/recipes/cad1/task2/baseline/test_car_scene_acoustics.py: -------------------------------------------------------------------------------- 1 | """Tests for car_scene_acoustics module""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.skip(reason="Not implemented yet") 7 | def test_apply_car_acoustics_to_signal(): 8 | """Test the function apply_car_acoustics_to_signal""" 9 | 10 | 11 | # many other methods in CarSceneAcoustics 12 | # plan to test the minimum required methods to get 100% coverage 13 | -------------------------------------------------------------------------------- /tests/recipes/cad1/task2/baseline/test_enhance_task2.py: -------------------------------------------------------------------------------- 1 | """Test the enhance module.""" 2 | 3 | # pylint: disable=import-error 4 | 5 | from pathlib import Path 6 | 7 | import numpy as np 8 | import pyloudnorm as pyln 9 | import pytest 10 | from omegaconf import DictConfig 11 | 12 | from clarity.utils.audiogram import Audiogram, Listener 13 | from recipes.cad1.task2.baseline.enhance import enhance_song 14 | 15 | BASE_DIR = Path.cwd() 16 | RESOURCES = BASE_DIR / "tests" / "resources" / "recipes" / "cad1" / "task2" 17 | 18 | 19 | def test_enhance_song(): 20 | """Test the enhance_song function.""" 21 | np.random.seed(42) 22 | 23 | # Set the sample rate and gain 24 | duration = 0.5 25 | 26 | config = DictConfig( 27 | { 28 | "sample_rate": 16000, 29 | "enhance": {"min_level": -11, "max_level": -19, "average_level": -14}, 30 | } 31 | ) 32 | 33 | levels = np.array([20, 30, 35, 45, 50, 60, 65, 60]) 34 | frequencies = np.array([250, 500, 1000, 2000, 3000, 4000, 6000, 8000]) 35 | audiogram = Audiogram(levels=levels, frequencies=frequencies) 36 | 37 | listener = Listener(audiogram, audiogram) 38 | # Create a sample waveform 39 | waveform = np.random.rand(2, int(config.sample_rate * duration)) 40 | 41 | # Call the function 42 | out_left, out_right = enhance_song(waveform, listener, config) 43 | 44 | expected_left = np.load( 45 | RESOURCES / "test_enhance.enhance_song_left.npy", allow_pickle=True 46 | ) 47 | expected_right = np.load( 48 | RESOURCES / "test_enhance.enhance_song_right.npy", allow_pickle=True 49 | ) 50 | 51 | # Check that the output is not equal to the input 52 | np.testing.assert_array_almost_equal(out_left, expected_left) 53 | np.testing.assert_array_almost_equal(out_right, expected_right) 54 | 55 | # Check that the output has the correct loudness 56 | meter = pyln.Meter(config.sample_rate) 57 | 58 | out_loudness = meter.integrated_loudness(np.array([out_left, out_right]).T) 59 | assert np.isclose(out_loudness, -14, atol=0.1) 60 | 61 | 62 | @pytest.mark.skip(reason="Not implemented yet") 63 | def test_enhance(): 64 | """Test enhance function.""" 65 | -------------------------------------------------------------------------------- /tests/recipes/cad1/task2/baseline/test_evaluate.py: -------------------------------------------------------------------------------- 1 | """Tests for evaluate module""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.skip(reason="Not implemented yet") 7 | def test_run_calculate_audio_quality(): 8 | """Test the function run_calculate_audio_quality""" 9 | -------------------------------------------------------------------------------- /tests/recipes/cad1/task2/baseline/test_merge_batches.py: -------------------------------------------------------------------------------- 1 | """Test for merge_batches module""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.skip(reason="Not implemented yet") 7 | def test_join_batches(): 8 | """Test the function join_batches""" 9 | -------------------------------------------------------------------------------- /tests/recipes/cad1/task2/data_preparation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cad1/task2/data_preparation/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cad_icassp_2024/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cad_icassp_2024/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cad_icassp_2024/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cad_icassp_2024/baseline/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cec1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cec1/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cec1/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cec1/baseline/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cec1/data_preparation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cec1/data_preparation/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cec1/e009_sheffield/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cec1/e009_sheffield/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cec1/e009_sheffield/test_train.py: -------------------------------------------------------------------------------- 1 | """Tests for cec1 e009 train module""" 2 | 3 | import logging 4 | from pathlib import Path 5 | 6 | import hydra 7 | import numpy as np 8 | import pytest 9 | import torch 10 | 11 | from recipes.cec1.e009_sheffield.train import train_amp, train_den 12 | 13 | 14 | @pytest.mark.slow 15 | def test_run(tmp_path): 16 | """Test for the run function.""" 17 | np.random.seed(0) 18 | torch.manual_seed(0) 19 | 20 | hydra.core.global_hydra.GlobalHydra.instance().clear() 21 | hydra.initialize( 22 | config_path="../../../../recipes/cec1/e009_sheffield", job_name="test_cec1_e009" 23 | ) 24 | 25 | logging.getLogger("pytorch_lightning").setLevel(logging.ERROR) 26 | 27 | hydra_cfg = hydra.compose( 28 | config_name="config", 29 | # Override settings to make a fast training test 30 | overrides=[ 31 | "path.cec1_root=tests/test_data/recipes/cec1/e009_sheffield", 32 | f"path.exp_folder={tmp_path}", 33 | # Disable multiprocessing for testing (faster) 34 | "train_loader.num_workers=0", 35 | "dev_loader.num_workers=0", 36 | "test_loader.num_workers=0", 37 | "train_loader.batch_size=1", 38 | "train_dataset.wav_sample_len=1.0", 39 | "den_trainer.epochs=1", 40 | "amp_trainer.epochs=1", 41 | "fir.nfir=32", 42 | "mc_conv_tasnet.H=64", 43 | "mc_conv_tasnet.B=32", 44 | # The validation sanity check step is slow, so disable it 45 | "amp_trainer.num_sanity_val_steps=0", 46 | ], 47 | ) 48 | 49 | train_den(hydra_cfg, ear="left") 50 | hydra_cfg.downsample_factor = 40 51 | train_amp(hydra_cfg, ear="left") 52 | 53 | expected_files = [ 54 | "left_amp/checkpoints/epoch=0-step=1.ckpt", 55 | "left_amp/best_k_models.json", 56 | "left_amp/best_model.pth", 57 | "left_den/checkpoints/epoch=0-step=1.ckpt", 58 | "left_den/best_k_models.json", 59 | "left_den/best_model.pth", 60 | ] 61 | for filename in expected_files: 62 | assert (Path(tmp_path) / filename).exists() 63 | -------------------------------------------------------------------------------- /tests/recipes/cec2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cec2/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cec2/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cec2/baseline/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cec2/baseline/data_generation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cec2/baseline/data_generation/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cec2/baseline/data_generation/test_build_additional_scenes.py: -------------------------------------------------------------------------------- 1 | # Tests for build_additional_scenes module 2 | 3 | import json 4 | from unittest.mock import patch 5 | 6 | import hydra 7 | 8 | from recipes.cec2.baseline.data_generation.build_additional_scenes import ( 9 | instantiate_scenes, 10 | ) 11 | 12 | 13 | def not_tqdm(iterable): 14 | """ 15 | Replacement for tqdm that just passes back the iterable. 16 | 17 | Useful for silencing `tqdm` in tests. 18 | """ 19 | return iterable 20 | 21 | 22 | @patch( 23 | "clarity.data.scene_builder_cec2.tqdm", 24 | not_tqdm, 25 | ) 26 | def test_instantiate_scenes(tmp_path): 27 | """Test instantiate_scenes function.""" 28 | 29 | hydra.core.global_hydra.GlobalHydra.instance().clear() 30 | hydra.initialize( 31 | config_path="../../../../../recipes/cec2/baseline/data_generation/", 32 | job_name="test_cec2", 33 | ) 34 | hydra_cfg = hydra.compose( 35 | config_name="additional_data_config", 36 | overrides=[ 37 | "path.root=.", 38 | "path.metadata_dir=tests/test_data/metadata", 39 | f"path.additional_data_file={tmp_path}/scenes.test.json", 40 | "scene_datasets.train.n_scenes=100", 41 | ], 42 | ) 43 | 44 | instantiate_scenes(hydra_cfg) 45 | 46 | # Check that the output file exists... 47 | filename = tmp_path / "scenes.test.json" 48 | assert filename.exists() 49 | 50 | with open(filename, encoding="utf-8") as fp: 51 | scenes = json.load(fp) 52 | 53 | # ... then check there are the correct number of scenes 54 | # and that all scenes have the correct keys 55 | assert len(scenes) == 100 56 | scene_keys = { 57 | "dataset", 58 | "room", 59 | "scene", 60 | "target", 61 | "duration", 62 | "interferers", 63 | "SNR", 64 | "listener", 65 | } 66 | for scene in scenes: 67 | assert scene.keys() == scene_keys 68 | -------------------------------------------------------------------------------- /tests/recipes/cec2/baseline/data_generation/test_render_additional_scenes.py: -------------------------------------------------------------------------------- 1 | # Tests for render_additional_scenes module 2 | # 3 | import json 4 | from unittest.mock import MagicMock, patch 5 | 6 | import hydra 7 | 8 | from recipes.cec2.baseline.data_generation.render_additional_scenes import run 9 | 10 | 11 | @patch("recipes.cec2.baseline.data_generation.render_additional_scenes.SceneRenderer") 12 | def test_render_scenes(mock_sr, tmp_path): 13 | """Test render_scenes function.""" 14 | 15 | # Mock out the SceneRenderer as we don't want to actually render 16 | # any scenes - it's very slow and tested elsewhere in the unit tests 17 | scene_renderer_instance = MagicMock() 18 | mock_sr.return_value = scene_renderer_instance 19 | 20 | # Using the config file from the actual recipe, but overriding 21 | # just the paths to the scene data and the output directory 22 | hydra.core.global_hydra.GlobalHydra.instance().clear() 23 | hydra.initialize( 24 | config_path="../../../../../recipes/cec2/baseline/data_generation/", 25 | job_name="test_cec2", 26 | ) 27 | hydra_cfg = hydra.compose( 28 | config_name="additional_data_config", 29 | overrides=[ 30 | "path.root=tests/test_data", 31 | "path.metadata_dir=tests/test_data/metadata", 32 | ( 33 | "scene_renderer.train.metadata.scene_definitions=" 34 | "tests/test_data/metadata/scenes.test.json" 35 | ), 36 | f"scene_renderer.train.paths.scenes={tmp_path}", 37 | ], 38 | ) 39 | 40 | run(hydra_cfg) 41 | 42 | # Check the the scene renderer was instantiated 43 | assert mock_sr.call_count == 1 44 | assert scene_renderer_instance.render_scenes.call_count == 1 45 | 46 | # Read the scene data directly 47 | with open("tests/test_data/metadata/scenes.test.json", encoding="utf-8") as fp: 48 | expected_scene_data = json.load(fp) 49 | 50 | # Check that the scene data was delivered to the SceneRenderer 51 | assert ( 52 | scene_renderer_instance.render_scenes.call_args.args[0] == expected_scene_data 53 | ) 54 | -------------------------------------------------------------------------------- /tests/recipes/cec2/baseline/test_enhance.py: -------------------------------------------------------------------------------- 1 | """Tests for cec2 baseline enhance module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from pathlib import Path 6 | from unittest.mock import patch 7 | 8 | import hydra 9 | import numpy as np 10 | import pytest 11 | from omegaconf import DictConfig 12 | 13 | from clarity.utils.file_io import read_signal 14 | from recipes.cec2.baseline.enhance import enhance 15 | 16 | 17 | @pytest.fixture() 18 | def hydra_cfg(tmp_path: Path): 19 | """Fixture for hydra config.""" 20 | hydra.core.global_hydra.GlobalHydra.instance().clear() 21 | hydra.initialize( 22 | config_path=".../../../../../../recipes/cec2/baseline", job_name="test_cec2" 23 | ) 24 | cfg = hydra.compose( 25 | config_name="config", 26 | overrides=[ 27 | "path.root=.", 28 | f"path.exp_folder={tmp_path}", 29 | "path.metadata_dir=tests/test_data/metadata", 30 | "path.scenes_listeners_file=" 31 | "tests/test_data/metadata/scenes_listeners.1.json", 32 | "path.listeners_file=tests/test_data/metadata/listeners.json", 33 | "path.scenes_folder=tests/test_data/scenes", 34 | ], 35 | ) 36 | return cfg 37 | 38 | 39 | def not_tqdm(iterable): 40 | """ 41 | Replacement for tqdm that just passes back the iterable. 42 | 43 | Useful for silencing `tqdm` in tests. 44 | """ 45 | return iterable 46 | 47 | 48 | @patch("recipes.cec2.baseline.enhance.tqdm", not_tqdm) 49 | def test_enhance(tmp_path: Path, hydra_cfg: DictConfig) -> None: 50 | """Test run_HL_processing function.""" 51 | np.random.seed(0) 52 | 53 | # Run the enhance function 54 | enhance(hydra_cfg) 55 | 56 | # Check that the output signal is correct 57 | filename = tmp_path / "enhanced_signals" / "S06001_L0064_HA-output.wav" 58 | assert filename.exists() 59 | signal = read_signal(filename) 60 | assert np.sum(np.abs(signal)) == pytest.approx( 61 | 78939.73132324219, rel=pytest.rel_tolerance, abs=pytest.abs_tolerance 62 | ) 63 | -------------------------------------------------------------------------------- /tests/recipes/cec2/data_preparation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cec2/data_preparation/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cec2/data_preparation/test_render_scenes.py: -------------------------------------------------------------------------------- 1 | # Tests for render_scenes module 2 | # 3 | import json 4 | from unittest.mock import MagicMock, patch 5 | 6 | import hydra 7 | 8 | from recipes.cec2.data_preparation.render_scenes import run 9 | 10 | 11 | @patch("recipes.cec2.data_preparation.render_scenes.SceneRenderer") 12 | def test_render_scenes(mock_sr, tmp_path): 13 | """Test render_scenes function.""" 14 | 15 | # Mock out the SceneRenderer as we don't want to actually render 16 | # any scenes - it's very slow and tested elsewhere in the unit tests 17 | scene_renderer_instance = MagicMock() 18 | mock_sr.return_value = scene_renderer_instance 19 | 20 | # Using the config file from the actual recipe, but overriding 21 | # just the paths to the scene data and the output directory 22 | hydra.core.global_hydra.GlobalHydra.instance().clear() 23 | hydra.initialize( 24 | config_path="../../../../recipes/cec2/data_preparation/", 25 | job_name="test_cec2", 26 | ) 27 | hydra_cfg = hydra.compose( 28 | config_name="config", 29 | overrides=[ 30 | "path.root=tests/test_data", 31 | "path.metadata_dir=tests/test_data/metadata", 32 | ( 33 | "scene_renderer.train.metadata.scene_definitions=" 34 | "tests/test_data/metadata/scenes.test.json" 35 | ), 36 | ( 37 | "scene_renderer.dev.metadata.scene_definitions=" 38 | "tests/test_data/metadata/scenes.test.json" 39 | ), 40 | ( 41 | "scene_renderer.demo.metadata.scene_definitions=" 42 | "tests/test_data/metadata/scenes.test.json" 43 | ), 44 | f"scene_renderer.train.paths.scenes={tmp_path}", 45 | ], 46 | ) 47 | 48 | run(hydra_cfg) 49 | 50 | # Check the the scene renderer was instantiated 51 | assert mock_sr.call_count == 3 52 | assert scene_renderer_instance.render_scenes.call_count == 3 53 | 54 | # Read the scene data directly 55 | with open("tests/test_data/metadata/scenes.test.json", encoding="utf-8") as fp: 56 | expected_scene_data = json.load(fp) 57 | 58 | # Check that the scene data was delivered to the SceneRenderer 59 | assert ( 60 | scene_renderer_instance.render_scenes.call_args.args[0] == expected_scene_data 61 | ) 62 | -------------------------------------------------------------------------------- /tests/recipes/cpc1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cpc1/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cpc1/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cpc1/baseline/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cpc1/baseline/test_compute_scores.py: -------------------------------------------------------------------------------- 1 | """Tests for cpc1 compute_scores module""" 2 | 3 | import json 4 | from pathlib import Path 5 | 6 | import hydra 7 | import pytest 8 | 9 | from recipes.cpc1.baseline.compute_scores import run 10 | 11 | 12 | @pytest.fixture() 13 | def hydra_cfg(): 14 | """Fixture for hydra config.""" 15 | hydra.core.global_hydra.GlobalHydra.instance().clear() 16 | hydra.initialize( 17 | config_path="../../../../recipes/cpc1/baseline/", job_name="test_cpc1" 18 | ) 19 | cfg = hydra.compose( 20 | config_name="config", 21 | overrides=[ 22 | "train_path.root=tests/test_data/recipes/cpc1", 23 | ( 24 | "train_path.scenes_file=" 25 | "${train_path.root}/clarity_CPC1_data/metadata/CPC1.train.4.json" 26 | ), 27 | ( 28 | "train_indep_path.scenes_file=" 29 | "${train_path.root}/clarity_CPC1_data/metadata/CPC1.train_indep.4.json" 30 | ), 31 | ], 32 | ) 33 | return cfg 34 | 35 | 36 | def test_run(hydra_cfg): 37 | """Test run function.""" 38 | 39 | expected_results = { 40 | "closed_set scores:": { 41 | "RMSE": 80.68942294220068, 42 | "Std": 8.103245804533998, 43 | "NCC": 0.2255349034109652, 44 | "KT": -0.3333333333333334, 45 | }, 46 | "open_set scores:": { 47 | "RMSE": 62.32533032225741, 48 | "Std": 18.61907164264097, 49 | "NCC": 0.7820801834042259, 50 | "KT": 1.0, 51 | }, 52 | } 53 | 54 | run(hydra_cfg) 55 | 56 | # Check output 57 | 58 | with open("results.json", encoding="utf-8") as f: 59 | results = json.load(f) 60 | 61 | # TODO: Find out what is causing results to be rounded to 4 dp 62 | # Need the abs=1e-4 because sometimes the results are being 63 | # printed rounded to 4 decimal places, and sometimes not. 64 | # Depends on the order the tests are run in. 65 | for test_set in ["closed_set scores:", "open_set scores:"]: 66 | for metric in ["RMSE", "Std", "NCC", "KT"]: 67 | assert results[test_set][metric] == pytest.approx( 68 | expected_results[test_set][metric], abs=1e-4 69 | ) 70 | 71 | # Clean up 72 | 73 | Path("results.json").unlink() 74 | -------------------------------------------------------------------------------- /tests/recipes/cpc1/e029_sheffield/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cpc1/e029_sheffield/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cpc1/e029_sheffield/test_evaluate.py: -------------------------------------------------------------------------------- 1 | """Tests for cpc1 e029_sheffield evaluate module.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.skip(reason="Not implemented yet") 7 | def test_placeholder(): 8 | """Dummy test to avoid pytest error.""" 9 | -------------------------------------------------------------------------------- /tests/recipes/cpc1/e029_sheffield/test_infer.py: -------------------------------------------------------------------------------- 1 | """Tests for cpc1 e029_sheffield infer module.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.skip(reason="Not implemented yet") 7 | def test_placeholder(): 8 | """Dummy test to avoid pytest error.""" 9 | -------------------------------------------------------------------------------- /tests/recipes/cpc1/e029_sheffield/test_prepare_data.py: -------------------------------------------------------------------------------- 1 | """Tests for cpc1 e029_sheffield prepare_data module.""" 2 | 3 | # pylint: disable=all 4 | # This file was causing pylint to crash, so I disabled it for now. !! 5 | 6 | from pathlib import Path 7 | from unittest.mock import patch 8 | 9 | import hydra 10 | 11 | from recipes.cpc1.e029_sheffield.prepare_data import run 12 | 13 | 14 | def not_tqdm(iterable): 15 | """ 16 | Replacement for tqdm that just passes back the iterable. 17 | 18 | Useful for silencing `tqdm` in tests. 19 | """ 20 | return iterable 21 | 22 | 23 | @patch("recipes.cpc1.e029_sheffield.prepare_data.tqdm", not_tqdm) 24 | def test_run(tmp_path): 25 | """Test for the run function.""" 26 | hydra.core.global_hydra.GlobalHydra.instance().clear() 27 | hydra.initialize( 28 | config_path="../../../../recipes/cpc1/e029_sheffield", job_name="test_cpc1_e029" 29 | ) 30 | hydra_cfg = hydra.compose( 31 | config_name="config", 32 | overrides=[ 33 | "path.root=tests/test_data/recipes/cpc1/e029_sheffield", 34 | f"path.exp_folder={tmp_path}", 35 | ], 36 | ) 37 | 38 | root = Path("tests/test_data/recipes/cpc1/e029_sheffield/") 39 | expected_files = [ 40 | "clarity_CPC1_data_test/clarity_data/HA_outputs/test/" 41 | "S08520_L0216_E001_HL-output.wav", 42 | "clarity_CPC1_data_train/clarity_data/HA_outputs/train/" 43 | "S08510_L0239_E001_HL-output.wav", 44 | ] 45 | run(hydra_cfg) 46 | 47 | for expected_file in expected_files: 48 | assert (root / expected_file).exists() 49 | (root / expected_file).unlink() 50 | -------------------------------------------------------------------------------- /tests/recipes/cpc1/e029_sheffield/test_train_asr.py: -------------------------------------------------------------------------------- 1 | """Tests for cpc1 e029_sheffield train_asr module.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.skip(reason="Not implemented yet") 7 | def test_placeholder(): 8 | """Dummy test to avoid pytest error.""" 9 | -------------------------------------------------------------------------------- /tests/recipes/cpc1/e029_sheffield/test_transformer_cpc1_ensemble_decoder.py: -------------------------------------------------------------------------------- 1 | """Tests for cpc1 e029_sheffield transformer module.""" 2 | -------------------------------------------------------------------------------- /tests/recipes/cpc1/e032_sheffield/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cpc1/e032_sheffield/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cpc1/e032_sheffield/test_evaluate.py: -------------------------------------------------------------------------------- 1 | """Tests for cpc1 e032_sheffield evaluate module.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.skip(reason="Not implemented yet") 7 | def test_placeholder(): 8 | """Dummy test to avoid pytest error.""" 9 | -------------------------------------------------------------------------------- /tests/recipes/cpc1/e032_sheffield/test_infer.py: -------------------------------------------------------------------------------- 1 | """Tests for cpc1 e032_sheffield infer module.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.skip(reason="Not implemented yet") 7 | def test_placeholder(): 8 | """Dummy test to avoid pytest error.""" 9 | -------------------------------------------------------------------------------- /tests/recipes/cpc1/e032_sheffield/test_prepare_data.py: -------------------------------------------------------------------------------- 1 | """Tests for cpc1 e032_sheffield prepare_data module.""" 2 | 3 | # pylint: disable=all 4 | # This file was causing pylint to crash, so I disabled it for now. !! 5 | 6 | from pathlib import Path 7 | from unittest.mock import patch 8 | 9 | import hydra 10 | 11 | from recipes.cpc1.e032_sheffield.prepare_data import run 12 | 13 | 14 | def not_tqdm(iterable): 15 | """ 16 | Replacement for tqdm that just passes back the iterable. 17 | 18 | Useful for silencing `tqdm` in tests. 19 | """ 20 | return iterable 21 | 22 | 23 | @patch("recipes.cpc1.e032_sheffield.prepare_data.tqdm", not_tqdm) 24 | def test_run(tmp_path): 25 | """Test for the run function.""" 26 | hydra.core.global_hydra.GlobalHydra.instance().clear() 27 | hydra.initialize( 28 | config_path="../../../../recipes/cpc1/e032_sheffield", job_name="test_cpc1_e032" 29 | ) 30 | hydra_cfg = hydra.compose( 31 | config_name="config", 32 | overrides=[ 33 | "path.root=tests/test_data/recipes/cpc1/e032_sheffield", 34 | f"path.exp_folder={tmp_path}", 35 | ], 36 | ) 37 | 38 | root = Path("tests/test_data/recipes/cpc1/e032_sheffield/") 39 | expected_files = [ 40 | "clarity_CPC1_data_test/clarity_data/HA_outputs/test/" 41 | "S08520_L0216_E001_HL-output.wav", 42 | "clarity_CPC1_data_train/clarity_data/HA_outputs/train/" 43 | "S08510_L0239_E001_HL-output.wav", 44 | ] 45 | run(hydra_cfg) 46 | 47 | for expected_file in expected_files: 48 | assert (root / expected_file).exists() 49 | (root / expected_file).unlink() 50 | -------------------------------------------------------------------------------- /tests/recipes/cpc1/e032_sheffield/test_train_asr.py: -------------------------------------------------------------------------------- 1 | """Tests for cpc1 e032_sheffield train_asr module.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.skip(reason="Not implemented yet") 7 | def test_placeholder(): 8 | """Dummy test to avoid pytest error.""" 9 | -------------------------------------------------------------------------------- /tests/recipes/cpc1/e032_sheffield/test_transformer_cpc1_decoder.py: -------------------------------------------------------------------------------- 1 | """Tests for cpc1 e032_sheffield transformer module.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.skip(reason="Not implemented yet") 7 | def test_placeholder(): 8 | """Dummy test to avoid pytest error.""" 9 | -------------------------------------------------------------------------------- /tests/recipes/cpc2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cpc2/__init__.py -------------------------------------------------------------------------------- /tests/recipes/cpc2/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/cpc2/baseline/__init__.py -------------------------------------------------------------------------------- /tests/recipes/icassp_2023/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/icassp_2023/__init__.py -------------------------------------------------------------------------------- /tests/recipes/icassp_2023/baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/recipes/icassp_2023/baseline/__init__.py -------------------------------------------------------------------------------- /tests/recipes/icassp_2023/baseline/test_enhance.py: -------------------------------------------------------------------------------- 1 | """Tests for icassp_2023 cec2 enhance module.""" 2 | 3 | from __future__ import annotations 4 | 5 | from pathlib import Path 6 | from unittest.mock import patch 7 | 8 | import hydra 9 | import numpy as np 10 | import pytest 11 | from omegaconf import DictConfig 12 | 13 | from clarity.utils.file_io import read_signal 14 | from recipes.icassp_2023.baseline.enhance import enhance 15 | 16 | 17 | @pytest.fixture() 18 | def hydra_cfg(tmp_path: Path): 19 | """Fixture for hydra config.""" 20 | hydra.core.global_hydra.GlobalHydra.instance().clear() 21 | hydra.initialize( 22 | config_path="../../../../recipes/icassp_2023/baseline", 23 | job_name="test_icassp_2023", 24 | ) 25 | cfg = hydra.compose( 26 | config_name="config", 27 | overrides=[ 28 | "path.root=tests/test_data", 29 | f"path.exp_folder={tmp_path}", 30 | "path.metadata_dir=tests/test_data/metadata", 31 | "path.scenes_listeners_file=${path.metadata_dir}/scenes_listeners.1.json", 32 | "path.scenes_folder=${path.root}/scenes", 33 | ], 34 | ) 35 | return cfg 36 | 37 | 38 | def not_tqdm(iterable): 39 | """ 40 | Replacement for tqdm that just passes back the iterable. 41 | 42 | Useful for silencing `tqdm` in tests. 43 | """ 44 | return iterable 45 | 46 | 47 | @patch("recipes.icassp_2023.baseline.enhance.tqdm", not_tqdm) 48 | def test_enhance(hydra_cfg: DictConfig) -> None: 49 | """Test run_HL_processing function.""" 50 | np.random.seed(0) 51 | 52 | # Run the enhance function 53 | enhance(hydra_cfg) 54 | 55 | # Check that the output signal is correct 56 | filename = Path("enhanced_signals/S06001_L0064_enhanced.wav") 57 | assert filename.exists() 58 | signal = read_signal(filename) 59 | assert np.sum(np.abs(signal)) == pytest.approx( 60 | 125253.92190551758, rel=pytest.rel_tolerance, abs=pytest.abs_tolerance 61 | ) 62 | 63 | # Note, enhance.py writes results to where this test is run from, 64 | # so we need to clean up. 65 | filename.unlink() 66 | filename.parent.rmdir() 67 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_CEC2_scene_builder.test_CEC2_room_builder.out: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "R06001", 4 | "dimensions": "4.4317x4.1x2.6", 5 | "target": { 6 | "position": [ 7 | -0.7000, 8 | 1.7000, 9 | 1.2000 10 | ], 11 | "view_vector": [ 12 | 0.2820, 13 | 0.9590, 14 | 0.0000 15 | ] 16 | }, 17 | "listener": { 18 | "position": [ 19 | -0.2000, 20 | 3.4000, 21 | 1.2000 22 | ], 23 | "view_vector": [ 24 | -0.2820, 25 | -0.9590, 26 | 0.0000 27 | ] 28 | }, 29 | "interferers": [ 30 | { 31 | "position": [ 32 | 0.9000, 33 | 3.0000, 34 | 1.2000 35 | ] 36 | }, 37 | { 38 | "position": [ 39 | -1.0000, 40 | 2.6000, 41 | 1.2000 42 | ] 43 | }, 44 | { 45 | "position": [ 46 | 0.9000, 47 | 1.4000, 48 | 1.2000 49 | ] 50 | } 51 | ] 52 | } 53 | ] 54 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_data_HOA_tools_cec2.test_P.out: -------------------------------------------------------------------------------- 1 | P value -0.000 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_data_HOA_tools_cec2.test_U.out: -------------------------------------------------------------------------------- 1 | U value 0.000 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_data_HOA_tools_cec2.test_V.out: -------------------------------------------------------------------------------- 1 | V[0] value -0.000 2 | V[1] value 0.000 3 | V[2] value 0.000 4 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_data_HOA_tools_cec2.test_W.out: -------------------------------------------------------------------------------- 1 | W[0] value 0.000 2 | W[1] value 0.000 3 | W[2] value 0.000 4 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_data_HOA_tools_cec2.test_centred_element.out: -------------------------------------------------------------------------------- 1 | centered element 0.360 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_data_HOA_tools_cec2.test_compute_UVW_coefficients.out: -------------------------------------------------------------------------------- 1 | UVW coefficients 0.577, -0.289, -0.000 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_data_HOA_tools_cec2.test_compute_band_rotation.out: -------------------------------------------------------------------------------- 1 | Band rotations [[ 1.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 2 | 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 3 | 0.00000000e+00] 4 | [ 0.00000000e+00 7.07106781e-01 0.00000000e+00 7.07106781e-01 5 | 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 6 | 0.00000000e+00] 7 | [ 0.00000000e+00 0.00000000e+00 1.00000000e+00 0.00000000e+00 8 | 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 9 | 0.00000000e+00] 10 | [ 0.00000000e+00 -7.07106781e-01 0.00000000e+00 7.07106781e-01 11 | 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 12 | 0.00000000e+00] 13 | [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 14 | -2.22044605e-16 0.00000000e+00 0.00000000e+00 0.00000000e+00 15 | 1.00000000e+00] 16 | [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 17 | 0.00000000e+00 7.07106781e-01 0.00000000e+00 7.07106781e-01 18 | 0.00000000e+00] 19 | [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 20 | 0.00000000e+00 0.00000000e+00 1.00000000e+00 0.00000000e+00 21 | 0.00000000e+00] 22 | [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 23 | 0.00000000e+00 -7.07106781e-01 0.00000000e+00 7.07106781e-01 24 | 0.00000000e+00] 25 | [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 26 | -1.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 27 | -2.22044605e-16]], [array([[1.]]), array([[ 0.70710678, 0. , 0.70710678], 28 | [ 0. , 1. , 0. ], 29 | [-0.70710678, 0. , 0.70710678]]), array([[-2.22044605e-16, 0.00000000e+00, 0.00000000e+00, 30 | 0.00000000e+00, 1.00000000e+00], 31 | [ 0.00000000e+00, 7.07106781e-01, 0.00000000e+00, 32 | 7.07106781e-01, 0.00000000e+00], 33 | [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 34 | 0.00000000e+00, 0.00000000e+00], 35 | [ 0.00000000e+00, -7.07106781e-01, 0.00000000e+00, 36 | 7.07106781e-01, 0.00000000e+00], 37 | [-1.00000000e+00, 0.00000000e+00, 0.00000000e+00, 38 | 0.00000000e+00, -2.22044605e-16]])] 39 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_data_HOA_tools_cec2.test_compute_rotation_matrix.out: -------------------------------------------------------------------------------- 1 | Rotation matrix [[ 1.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 2 | 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 3 | 0.00000000e+00] 4 | [ 0.00000000e+00 7.07106781e-01 0.00000000e+00 7.07106781e-01 5 | 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 6 | 0.00000000e+00] 7 | [ 0.00000000e+00 0.00000000e+00 1.00000000e+00 0.00000000e+00 8 | 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 9 | 0.00000000e+00] 10 | [ 0.00000000e+00 -7.07106781e-01 0.00000000e+00 7.07106781e-01 11 | 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 12 | 0.00000000e+00] 13 | [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 14 | -2.22044605e-16 0.00000000e+00 0.00000000e+00 0.00000000e+00 15 | 1.00000000e+00] 16 | [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 17 | 0.00000000e+00 7.07106781e-01 0.00000000e+00 7.07106781e-01 18 | 0.00000000e+00] 19 | [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 20 | 0.00000000e+00 0.00000000e+00 1.00000000e+00 0.00000000e+00 21 | 0.00000000e+00] 22 | [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 23 | 0.00000000e+00 -7.07106781e-01 0.00000000e+00 7.07106781e-01 24 | 0.00000000e+00] 25 | [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 26 | -1.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 27 | -2.22044605e-16]] 28 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_engine_losses.test_sisnr_loss.out: -------------------------------------------------------------------------------- 1 | SISNR loss 36.5829 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_engine_losses.test_snr_loss.out: -------------------------------------------------------------------------------- 1 | SNR loss 2.90764 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_engine_losses.test_stoi_level_loss.out: -------------------------------------------------------------------------------- 1 | STOI level loss 0.0169409 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_engine_losses.test_stoi_loss.out: -------------------------------------------------------------------------------- 1 | STOI loss 0.0164208 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_enhancers.test_GHA.out: -------------------------------------------------------------------------------- 1 | signal output: 2 | prerelease_combination4_smooth 3 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_enhancers.test_GHA_inputs.out: -------------------------------------------------------------------------------- 1 | signal output: 2 | [[-0.30786133 -0.39199829 -0.34420776 -0.35498047] 3 | [-0.30331421 -0.4085083 -0.33901978 -0.37487793] 4 | [-0.29840088 -0.42907715 -0.33230591 -0.40112305] 5 | [-0.29516602 -0.4453125 -0.32678223 -0.41842651] 6 | [-0.29556274 -0.45758057 -0.32110596 -0.43707275] 7 | [-0.29144287 -0.47451782 -0.31506348 -0.46224976] 8 | [-0.28607178 -0.48565674 -0.31271362 -0.48049927] 9 | [-0.28259277 -0.49224854 -0.31155396 -0.49224854] 10 | [-0.28088379 -0.49679565 -0.30471802 -0.50222778] 11 | [-0.28219604 -0.49954224 -0.29943848 -0.51016235]] 12 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_enhancers.test_dsp_filter.out: -------------------------------------------------------------------------------- 1 | signal output: 2 | [[[0.5568 0.862 0.0993 0.1481 0.3449 0.7114 0.5499 1.0058 0.5112 0.7095]]] 3 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_enhancers.test_gha_audiogram.out: -------------------------------------------------------------------------------- 1 | Audiogram original: 2 | [ 250 500 1000 2000 4000 8000] 3 | [2. 1. 2. 2. 2. 1.] 4 | [2. 1. 1. 2. 1. 2.] 5 | ['NOTHING', 'NOTHING'] 6 | Audiogram new: 7 | [ 500 1000 2000] 8 | [1. 2. 2.] 9 | [1. 1. 2.] 10 | ['NOTHING', 'NOTHING'] 11 | Audiogram original: 12 | [ 250 500 1000 2000 4000 8000] 13 | [16. 14. 15. 16. 17. 15.] 14 | [17. 14. 13. 17. 14. 16.] 15 | ['MILD', 'MILD'] 16 | Audiogram new: 17 | [ 500 1000 2000] 18 | [14. 15. 16.] 19 | [14. 13. 17.] 20 | ['MILD', 'MILD'] 21 | Audiogram original: 22 | [ 250 500 1000 2000 4000 8000] 23 | [47. 43. 46. 47. 50. 45.] 24 | [50. 41. 40. 52. 42. 47.] 25 | ['MODERATE', 'MODERATE'] 26 | Audiogram new: 27 | [ 500 1000 2000] 28 | [43. 46. 47.] 29 | [41. 40. 52.] 30 | ['MODERATE', 'MODERATE'] 31 | Audiogram original: 32 | [ 250 500 1000 2000 4000 8000] 33 | [62. 58. 61. 63. 67. 60.] 34 | [66. 54. 53. 69. 56. 63.] 35 | ['SEVERE', 'SEVERE'] 36 | Audiogram new: 37 | [ 500 1000 2000] 38 | [58. 61. 63.] 39 | [54. 53. 69.] 40 | ['SEVERE', 'SEVERE'] 41 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_evaluators_msbg.test_firwin2.out: -------------------------------------------------------------------------------- 1 | [-2.14597e-05 1.26870e-05 -8.63342e-06 9.71205e-07 1.36178e-06 2 | -2.55385e-05 3.00180e-06 -6.50707e-05 -1.07068e-05 -1.09422e-04 3 | -4.54185e-05 -1.42841e-04 -1.00592e-04 -1.45943e-04 -1.64998e-04 4 | -1.05354e-04 -2.16263e-04 -2.60389e-05 -2.27386e-04 6.01322e-05 5 | -1.80258e-04 9.68517e-05 -8.18485e-05 2.09897e-05 2.49435e-05 6 | -2.07307e-04 6.45390e-05 -5.72635e-04 -4.89200e-05 -9.86453e-04 7 | -3.72806e-04 -1.29854e-03 -8.93188e-04 -1.34463e-03 -1.49744e-03 8 | -1.02279e-03 -1.98254e-03 -3.75258e-04 -2.11128e-03 3.57610e-04 9 | -1.71309e-03 7.44325e-04 -8.07276e-04 2.49590e-04 2.88409e-04 10 | -1.60264e-03 9.04779e-04 -5.02154e-03 5.27227e-05 -9.74681e-03 11 | -3.46039e-03 -1.48997e-02 -1.08722e-02 -1.89030e-02 -2.33702e-02 12 | -1.93952e-02 -4.22927e-02 -1.28007e-02 -7.03307e-02 7.92937e-03 13 | -1.17328e-01 6.80084e-02 -2.52154e-01 5.85211e-01 5.85211e-01 14 | -2.52154e-01 6.80084e-02 -1.17328e-01 7.92937e-03 -7.03307e-02 15 | -1.28007e-02 -4.22927e-02 -1.93952e-02 -2.33702e-02 -1.89030e-02 16 | -1.08722e-02 -1.48997e-02 -3.46039e-03 -9.74681e-03 5.27227e-05 17 | -5.02154e-03 9.04779e-04 -1.60264e-03 2.88409e-04 2.49590e-04 18 | -8.07276e-04 7.44325e-04 -1.71309e-03 3.57610e-04 -2.11128e-03 19 | -3.75258e-04 -1.98254e-03 -1.02279e-03 -1.49744e-03 -1.34463e-03 20 | -8.93188e-04 -1.29854e-03 -3.72806e-04 -9.86453e-04 -4.89200e-05 21 | -5.72635e-04 6.45390e-05 -2.07307e-04 2.49435e-05 2.09897e-05 22 | -8.18485e-05 9.68517e-05 -1.80258e-04 6.01322e-05 -2.27386e-04 23 | -2.60389e-05 -2.16263e-04 -1.05354e-04 -1.64998e-04 -1.45943e-04 24 | -1.00592e-04 -1.42841e-04 -4.54185e-05 -1.09422e-04 -1.07068e-05 25 | -6.50707e-05 3.00180e-06 -2.55385e-05 1.36178e-06 9.71205e-07 26 | -8.63342e-06 1.26870e-05 -2.14597e-05] 27 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_evaluators_msbg.test_gen_eh2008_speech_noise.out: -------------------------------------------------------------------------------- 1 | [-2.4544 -2.28809 -2.2546 -2.645 -2.7897 -2.62201 -2.68118 -2.93731 2 | -2.89048] 3 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_evaluators_msbg.test_gen_tone.out: -------------------------------------------------------------------------------- 1 | [-7.63033e+00 -6.76347e+00 -5.86230e+00 -4.93139e+00 -3.97547e+00 2 | -2.99939e+00 -2.00808e+00 -1.00659e+00 -7.76099e-13] 3 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_evaluators_msbg.test_measure_rms.out: -------------------------------------------------------------------------------- 1 | 10.7239970, [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 2 | 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 3 | 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 4 | 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 5 | 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 6 | 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 7 | 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 8 | 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 9 | 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 10 | 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 11 | 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 12 | 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 13 | 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 14 | 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 15 | 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 16 | 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 17 | 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 18 | 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 19 | 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 20 | 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 21 | 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 22 | 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 23 | 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 24 | 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 25 | 432 433 434 435 436 437 438 439 440], -0.0008829, 20.0000000 26 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_evaluators_msbg.test_pad.out: -------------------------------------------------------------------------------- 1 | [1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 2 | 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 3 | 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 4 | 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 5 | 0. 0. 0. 0.] 6 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_full_CEC1_pipeline.test_full_cec1_pipeline.out: -------------------------------------------------------------------------------- 1 | Enhanced audio MBSTOI score is 0.2188711 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_full_CEC2_pipeline.test_full_cec2_pipeline.out: -------------------------------------------------------------------------------- 1 | Enhanced audio HASPI score is 0.74915 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_mc_conv_tasnet.test_convtasnet.out: -------------------------------------------------------------------------------- 1 | ctn output: 2 | [[ 0.001889 -0.008292] 3 | [ 0.001597 -0.008242] 4 | [ 0.001265 -0.008123] 5 | ... 6 | [-0.001017 -0.000657] 7 | [-0.000668 -0.000418] 8 | [-0.000345 -0.0002 ]] 9 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_mc_conv_tasnet.test_overlap_add.out: -------------------------------------------------------------------------------- 1 | overlap add output: 2 | [ 0. 0.00999983 0.01999867 ... -0.02185133 -0.01185279 3 | -0.00185307] 4 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_predictors.test_torch_msbg_stoi_non_xeon_e5_2673_cpu.out: -------------------------------------------------------------------------------- 1 | Torch MSBG STOILoss -0.46198, ESTOILoss -0.3300 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_predictors.test_torch_msbg_stoi_xeon_e5_2673_cpu.out: -------------------------------------------------------------------------------- 1 | Torch MSBG STOILoss -0.46198, ESTOILoss -0.32999 2 | -------------------------------------------------------------------------------- /tests/regression/_regtest_outputs/test_predictors.test_torchloudnorm.out: -------------------------------------------------------------------------------- 1 | Torch Loudnorm div is 0.0111 2 | -------------------------------------------------------------------------------- /tests/regression/test_CEC2_scene_builder.py: -------------------------------------------------------------------------------- 1 | # Regression test 2 | # Use scene builder to build random scene 3 | 4 | import json 5 | from pathlib import Path 6 | 7 | from omegaconf import OmegaConf 8 | 9 | from clarity.data.scene_builder_cec2 import RoomBuilder, SceneBuilder, set_random_seed 10 | 11 | 12 | def test_CEC2_scene_builder(regtest): 13 | """Regression test for CEC2 scene builder""" 14 | cfg = OmegaConf.load("tests/test_data/configs/test_CEC2_scene_builder.yaml") 15 | cfg.path.root = "tests" 16 | set_random_seed(cfg.random_seed) 17 | dataset = "train" 18 | room_builder = RoomBuilder() 19 | 20 | room_file = Path(cfg.path.metadata_dir) / f"rooms.{dataset}.json" 21 | room_builder.load(room_file) 22 | scene_builder = SceneBuilder( 23 | rb=room_builder, 24 | scene_datasets=cfg.scene_datasets[dataset], 25 | target=cfg.target, 26 | interferer=cfg.interferer, 27 | snr_range=cfg.snr_range[dataset], 28 | listener=cfg.listener, 29 | shuffle_rooms=cfg.shuffle_rooms, 30 | ) 31 | scene_builder.instantiate_scenes(dataset=dataset) 32 | 33 | print(len(scene_builder.scenes)) 34 | 35 | with regtest: 36 | print(json.dumps(scene_builder.scenes, indent=4)) 37 | -------------------------------------------------------------------------------- /tests/regression/test_engine_losses.py: -------------------------------------------------------------------------------- 1 | # Regression test 2 | 3 | import torch 4 | 5 | from clarity.engine.losses import SISNRLoss, SNRLoss, STOILevelLoss, STOILoss 6 | 7 | 8 | def test_sisnr_loss(regtest): 9 | torch.manual_seed(0) 10 | torch.set_num_threads(1) 11 | si_snr_loss = SISNRLoss() 12 | x = torch.randn(10, 1000) 13 | y = torch.randn(10, 1000) 14 | loss = si_snr_loss.forward(x, y) 15 | 16 | regtest.write(f"SISNR loss {loss:0.4f}\n") 17 | 18 | 19 | def test_snr_loss(regtest): 20 | torch.manual_seed(0) 21 | torch.set_num_threads(1) 22 | snr_loss = SNRLoss() 23 | x = torch.randn(10, 1000) 24 | y = torch.randn(10, 1000) 25 | loss = snr_loss.forward(x, y) 26 | 27 | regtest.write(f"SNR loss {loss:0.5f}\n") 28 | 29 | 30 | def test_stoi_loss(regtest): 31 | torch.manual_seed(0) 32 | torch.set_num_threads(1) 33 | stoi_loss = STOILoss(sr=16000) 34 | x = torch.randn(2, 16000) 35 | y = torch.randn(2, 16000) 36 | loss = stoi_loss.forward(x, y) 37 | 38 | regtest.write(f"STOI loss {loss:0.7f}\n") 39 | 40 | 41 | def test_stoi_level_loss(regtest): 42 | torch.manual_seed(0) 43 | torch.set_num_threads(1) 44 | stoi_level_loss = STOILevelLoss(sr=16000, alpha=0.5) 45 | x = torch.randn(2, 16000) 46 | y = torch.randn(2, 16000) 47 | loss = stoi_level_loss.forward(x, y) 48 | 49 | regtest.write(f"STOI level loss {loss:0.7f}\n") 50 | -------------------------------------------------------------------------------- /tests/regression/test_evaluators_msbg.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from clarity.evaluator.msbg.msbg_utils import ( 5 | firwin2, 6 | gen_eh2008_speech_noise, 7 | gen_tone, 8 | measure_rms, 9 | pad, 10 | ) 11 | 12 | # pylint: disable=W0613 13 | # pylint false positives due to fixtures. pylint-pytest does not seem to work :( 14 | 15 | 16 | @pytest.fixture(name="use_numpy") 17 | def fixture_use_numpy(): 18 | """Set numpy seed and print options for each test""" 19 | np.random.seed(0) 20 | np_print_opts = np.get_printoptions() 21 | np.set_printoptions(precision=5, threshold=1000) 22 | yield 23 | np.set_printoptions(**np_print_opts) 24 | 25 | 26 | def test_gen_eh2008_speech_noise(regtest, use_numpy): 27 | signal = gen_eh2008_speech_noise(0.1, 44100.0, 0.0) 28 | with regtest: 29 | print(signal[-9:]) 30 | 31 | 32 | def test_gen_tone(regtest, use_numpy): 33 | signal = gen_tone(500, 0.1, 44100.0, 20.0) 34 | with regtest: 35 | print(signal[-9:]) 36 | 37 | 38 | def test_firwin2(regtest, use_numpy): 39 | params = firwin2(128, [0.0, 0.1, 0.9, 1.0], [0, 1.0, 1.0, 0.0]) 40 | with regtest: 41 | print(params) 42 | 43 | 44 | def test_pad(regtest, use_numpy): 45 | padded = pad(np.array([1.0, 1.0, 1.0, 1.0]), 100) 46 | with regtest: 47 | print(padded) 48 | 49 | 50 | def test_measure_rms(use_numpy, regtest): 51 | signal = gen_tone(500, 0.05, 44100.0, 20.0) 52 | noise = gen_eh2008_speech_noise(0.05, 44100.0, 0.0) 53 | rms, idx, rel_dB_thresh, active = measure_rms(signal + noise, 44100, 0.0, 10.0) 54 | with regtest: 55 | print(f"{rms:.7f}, {idx}, {rel_dB_thresh:.7f}, {active:.7f}") 56 | -------------------------------------------------------------------------------- /tests/resources/recipes/cad1/task1/test_enhance.test_apply_baseline_ha.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/recipes/cad1/task1/test_enhance.test_apply_baseline_ha.npy -------------------------------------------------------------------------------- /tests/resources/recipes/cad1/task1/test_enhance.test_decompose_signal_demucs.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/recipes/cad1/task1/test_enhance.test_decompose_signal_demucs.npy -------------------------------------------------------------------------------- /tests/resources/recipes/cad1/task1/test_enhance.test_decompose_signal_openunmix.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/recipes/cad1/task1/test_enhance.test_decompose_signal_openunmix.npy -------------------------------------------------------------------------------- /tests/resources/recipes/cad1/task1/test_enhance.test_process_stems_for_listener.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/recipes/cad1/task1/test_enhance.test_process_stems_for_listener.npy -------------------------------------------------------------------------------- /tests/resources/recipes/cad1/task1/test_enhance.test_separate_sources.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/recipes/cad1/task1/test_enhance.test_separate_sources.npy -------------------------------------------------------------------------------- /tests/resources/recipes/cad1/task2/listeners.json: -------------------------------------------------------------------------------- 1 | { 2 | "L5071": { 3 | "name": "L5071", 4 | "audiogram_cfs": [ 5 | 250, 6 | 500, 7 | 1000, 8 | 2000, 9 | 3000, 10 | 4000, 11 | 6000, 12 | 8000 13 | ], 14 | "audiogram_levels_l": [ 15 | 15, 16 | 30, 17 | 40, 18 | 55, 19 | 55, 20 | 55, 21 | 70, 22 | 65 23 | ], 24 | "audiogram_levels_r": [ 25 | 15, 26 | 20, 27 | 35, 28 | 45, 29 | 40, 30 | 35, 31 | 50, 32 | 65 33 | ] 34 | }, 35 | "L5042": { 36 | "name": "L5042", 37 | "audiogram_cfs": [ 38 | 250, 39 | 500, 40 | 1000, 41 | 2000, 42 | 3000, 43 | 4000, 44 | 6000, 45 | 8000 46 | ], 47 | "audiogram_levels_l": [ 48 | 25, 49 | 25, 50 | 40, 51 | 50, 52 | 50, 53 | 65, 54 | 80, 55 | 80 56 | ], 57 | "audiogram_levels_r": [ 58 | 20, 59 | 20, 60 | 30, 61 | 45, 62 | 60, 63 | 70, 64 | 80, 65 | 80 66 | ] 67 | } 68 | } -------------------------------------------------------------------------------- /tests/resources/recipes/cad1/task2/scenes.json: -------------------------------------------------------------------------------- 1 | { 2 | "SCENE01": { 3 | "split": "valid" 4 | }, 5 | "SCENE02": { 6 | "split": "valid" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /tests/resources/recipes/cad1/task2/scenes_listeners.json: -------------------------------------------------------------------------------- 1 | { 2 | "SCENE1": ["L5071"], 3 | "SCENE2": ["L5042"] 4 | } -------------------------------------------------------------------------------- /tests/resources/recipes/cad1/task2/test_build_scene_metadata.json_sample.json: -------------------------------------------------------------------------------- 1 | {"key1": "value1", "key2": "value2"} -------------------------------------------------------------------------------- /tests/resources/recipes/cad1/task2/test_enhance.enhance_song_left.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/recipes/cad1/task2/test_enhance.enhance_song_left.npy -------------------------------------------------------------------------------- /tests/resources/recipes/cad1/task2/test_enhance.enhance_song_right.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/recipes/cad1/task2/test_enhance.enhance_song_right.npy -------------------------------------------------------------------------------- /tests/resources/recipes/cad_icassp_2024/test_enhance.test_decompose_signal_demucs.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/recipes/cad_icassp_2024/test_enhance.test_decompose_signal_demucs.npy -------------------------------------------------------------------------------- /tests/resources/recipes/cad_icassp_2024/test_enhance.test_decompose_signal_openunmix.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/recipes/cad_icassp_2024/test_enhance.test_decompose_signal_openunmix.npy -------------------------------------------------------------------------------- /tests/resources/recipes/cad_icassp_2024/test_enhance.test_process_remix_for_listener_w_compressor.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/recipes/cad_icassp_2024/test_enhance.test_process_remix_for_listener_w_compressor.npy -------------------------------------------------------------------------------- /tests/resources/recipes/cad_icassp_2024/test_enhance.test_process_remix_for_listener_wo_compressor.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/recipes/cad_icassp_2024/test_enhance.test_process_remix_for_listener_wo_compressor.npy -------------------------------------------------------------------------------- /tests/resources/utils/test_carnoise.signal_generator.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/utils/test_carnoise.signal_generator.npy -------------------------------------------------------------------------------- /tests/resources/utils/test_source_separation_support.test_separate_sources.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/utils/test_source_separation_support.test_separate_sources.npy -------------------------------------------------------------------------------- /tests/resources/utils/test_source_separation_support.test_separate_sources_stereo.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/resources/utils/test_source_separation_support.test_separate_sources_stereo.npy -------------------------------------------------------------------------------- /tests/test_data/clarity_data/hrir: -------------------------------------------------------------------------------- 1 | ../hrir -------------------------------------------------------------------------------- /tests/test_data/clarity_data/metadata: -------------------------------------------------------------------------------- 1 | ../metadata -------------------------------------------------------------------------------- /tests/test_data/clarity_data/train/interferers: -------------------------------------------------------------------------------- 1 | ../../interferers -------------------------------------------------------------------------------- /tests/test_data/clarity_data/train/rooms: -------------------------------------------------------------------------------- 1 | ../../rooms -------------------------------------------------------------------------------- /tests/test_data/clarity_data/train/targets: -------------------------------------------------------------------------------- 1 | ../../targets -------------------------------------------------------------------------------- /tests/test_data/filetypes/valid.jsonl: -------------------------------------------------------------------------------- 1 | {"id": 1, "name": "xxx"} 2 | {"id": 2, "name": "yyy"} 3 | {"id": 3, "name": "zzz"} 4 | -------------------------------------------------------------------------------- /tests/test_data/hrir/HRIRs_MAT/VP_N6-BTE_fr.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/hrir/HRIRs_MAT/VP_N6-BTE_fr.mat -------------------------------------------------------------------------------- /tests/test_data/hrir/HRIRs_MAT/VP_N6-BTE_mid.mat: -------------------------------------------------------------------------------- 1 | VP_N6-BTE_fr.mat -------------------------------------------------------------------------------- /tests/test_data/hrir/HRIRs_MAT/VP_N6-BTE_rear.mat: -------------------------------------------------------------------------------- 1 | VP_N6-BTE_fr.mat -------------------------------------------------------------------------------- /tests/test_data/hrir/HRIRs_MAT/VP_N6-ED.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/hrir/HRIRs_MAT/VP_N6-ED.mat -------------------------------------------------------------------------------- /tests/test_data/interferers/music/1111967.low.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/interferers/music/1111967.low.mp3 -------------------------------------------------------------------------------- /tests/test_data/interferers/noise/CIN_fan_014.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/interferers/noise/CIN_fan_014.wav -------------------------------------------------------------------------------- /tests/test_data/interferers/speech/som_04766_05.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/interferers/speech/som_04766_05.wav -------------------------------------------------------------------------------- /tests/test_data/metadata/scenes.cec1.test.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "room": { 3 | "name": "R00001", 4 | "dimensions": "5.9x3.4186x2.9" 5 | }, 6 | "hrirfilename": "VP_N5-ED", 7 | "target": { 8 | "Positions": [ 9 | -0.5, 10 | 3.4, 11 | 1.2 12 | ], 13 | "ViewVectors": [ 14 | 0.291, 15 | -0.957, 16 | 0 17 | ], 18 | "name": "T010_G0N_02468", 19 | "nsamples": 109809 20 | }, 21 | "listener": { 22 | "Positions": [ 23 | 0.2, 24 | 1.1, 25 | 1.2 26 | ], 27 | "ViewVectors": [ 28 | -0.414, 29 | 0.91, 30 | 0 31 | ] 32 | }, 33 | "interferer": { 34 | "Positions": [ 35 | 0.4, 36 | 3.2, 37 | 1.2 38 | ], 39 | "name": "CIN_fan_014", 40 | "nsamples": 383670, 41 | "duration": 27, 42 | "type": "noise", 43 | "offset": 82115 44 | }, 45 | "azimuth_target_listener": -7.54, 46 | "azimuth_interferer_listener": -29.9, 47 | "scene": "S06001", 48 | "dataset": "test", 49 | "pre_samples": 88200, 50 | "post_samples": 44100, 51 | "SNR": 0.976 52 | }] -------------------------------------------------------------------------------- /tests/test_data/metadata/scenes.test.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "dataset": "dev", 3 | "room": "R06001", 4 | "scene": "S06001", 5 | "target": { 6 | "name": "T010_G0N_02468", 7 | "time_start": 78260, 8 | "time_end": 243635 9 | }, 10 | "duration": 287735, 11 | "interferers": [{ 12 | "position": 1, 13 | "time_start": 0, 14 | "time_end": 287735, 15 | "type": "noise", 16 | "name": "CIN_fan_014.wav", 17 | "offset": 7565 18 | }, 19 | { 20 | "position": 3, 21 | "time_start": 0, 22 | "time_end": 287735, 23 | "type": "speech", 24 | "name": "som_04766_05.wav", 25 | "offset": 5097 26 | } 27 | ], 28 | "SNR": 2.5580, 29 | "listener": { 30 | "rotation": [{ 31 | "sample": 90952.7142, 32 | "angle": -125.8235 33 | }, 34 | { 35 | "sample": 100246.7142, 36 | "angle": -112.0054 37 | } 38 | ], 39 | "hrir_filename": [ 40 | "VP_N6-ED", 41 | "VP_N6-BTE_fr", 42 | "VP_N6-BTE_mid", 43 | "VP_N6-BTE_rear" 44 | ] 45 | } 46 | }] -------------------------------------------------------------------------------- /tests/test_data/metadata/scenes_listeners.1.json: -------------------------------------------------------------------------------- 1 | { 2 | "S06001": [ 3 | "L0064" 4 | ] 5 | } -------------------------------------------------------------------------------- /tests/test_data/recipes/cec1/baseline/eval_signals/S06001_L0064_HL-mixoutput.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cec1/baseline/eval_signals/S06001_L0064_HL-mixoutput.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cec1/baseline/eval_signals/S06001_L0064_HL-output.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cec1/baseline/eval_signals/S06001_L0064_HL-output.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cec1/baseline/eval_signals/S06001_L0064_HLddf-output.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cec1/baseline/eval_signals/S06001_L0064_HLddf-output.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cec1/baseline/eval_signals/S06001_flat0dB_HL-output.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cec1/baseline/eval_signals/S06001_flat0dB_HL-output.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cec1/e009_sheffield/clarity_CEC1_data/clarity_data/dev/scenes: -------------------------------------------------------------------------------- 1 | ../../../../../../scenes -------------------------------------------------------------------------------- /tests/test_data/recipes/cec1/e009_sheffield/clarity_CEC1_data/clarity_data/metadata/listeners.json: -------------------------------------------------------------------------------- 1 | ../../../../../../metadata/listeners.json -------------------------------------------------------------------------------- /tests/test_data/recipes/cec1/e009_sheffield/clarity_CEC1_data/clarity_data/metadata/scenes.dev.json: -------------------------------------------------------------------------------- 1 | scenes.train.json -------------------------------------------------------------------------------- /tests/test_data/recipes/cec1/e009_sheffield/clarity_CEC1_data/clarity_data/metadata/scenes.train.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "room": { 3 | "name": "R00001", 4 | "dimensions": "5.9x3.4186x2.9" 5 | }, 6 | "hrirfilename": "VP_N5-ED", 7 | "target": { 8 | "Positions": [ 9 | -0.5, 10 | 3.4, 11 | 1.2 12 | ], 13 | "ViewVectors": [ 14 | 0.291, 15 | -0.957, 16 | 0 17 | ], 18 | "name": "T010_G0N_02468", 19 | "nsamples": 109809 20 | }, 21 | "listener": { 22 | "Positions": [ 23 | 0.2, 24 | 1.1, 25 | 1.2 26 | ], 27 | "ViewVectors": [ 28 | -0.414, 29 | 0.91, 30 | 0 31 | ] 32 | }, 33 | "interferer": { 34 | "Positions": [ 35 | 0.4, 36 | 3.2, 37 | 1.2 38 | ], 39 | "name": "CIN_fan_014", 40 | "nsamples": 383670, 41 | "duration": 27, 42 | "type": "noise", 43 | "offset": 82115 44 | }, 45 | "azimuth_target_listener": -7.54, 46 | "azimuth_interferer_listener": -29.9, 47 | "scene": "S06001", 48 | "dataset": "test", 49 | "pre_samples": 88200, 50 | "post_samples": 44100, 51 | "SNR": 0.976 52 | }] -------------------------------------------------------------------------------- /tests/test_data/recipes/cec1/e009_sheffield/clarity_CEC1_data/clarity_data/train/scenes: -------------------------------------------------------------------------------- 1 | ../../../../../../scenes -------------------------------------------------------------------------------- /tests/test_data/recipes/cec1/e009_sheffield/clarity_CEC1_data_eval/clarity_data/eval/scenes: -------------------------------------------------------------------------------- 1 | ../../../../../../scenes -------------------------------------------------------------------------------- /tests/test_data/recipes/cec1/e009_sheffield/clarity_CEC1_data_eval/clarity_data/metadata/scenes_listeners.eval.json: -------------------------------------------------------------------------------- 1 | ../../../../../../metadata/scenes_listeners.1.json -------------------------------------------------------------------------------- /tests/test_data/recipes/cec2/baseline/cec2_si.csv: -------------------------------------------------------------------------------- 1 | scene,listener,score 2 | S06001,L0064,0.29 3 | S06002,L0064,0.49 4 | -------------------------------------------------------------------------------- /tests/test_data/recipes/cec2/baseline/eval_signals/S06001_L0064_HA-output.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cec2/baseline/eval_signals/S06001_L0064_HA-output.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/clarity_CPC1_data/clarity_data/HA_outputs/train/S08510_L0239_E001.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cpc1/clarity_CPC1_data/clarity_data/HA_outputs/train/S08510_L0239_E001.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/clarity_CPC1_data/clarity_data/scenes/S08510_mixed_CH0.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cpc1/clarity_CPC1_data/clarity_data/scenes/S08510_mixed_CH0.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/clarity_CPC1_data/clarity_data/scenes/S08510_target_anechoic.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cpc1/clarity_CPC1_data/clarity_data/scenes/S08510_target_anechoic.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/clarity_CPC1_data/metadata/CPC1.train.4.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "prompt": "i suppose you wouldn't be free for dinner this evening", 3 | "scene": "S08510", 4 | "n_words": 10, 5 | "hits": 1, 6 | "listener": "L0239", 7 | "system": "E001", 8 | "correctness": 10.0, 9 | "response": "freeze evening", 10 | "volume": 56, 11 | "signal": "S08510_L0239_E001" 12 | }, 13 | { 14 | "prompt": "i think he confirmed this on saturday", 15 | "scene": "S08529", 16 | "n_words": 7, 17 | "hits": 5, 18 | "listener": "L0216", 19 | "system": "E001", 20 | "correctness": 71.4285714286, 21 | "response": "i think you confirm this on saturday", 22 | "volume": 99, 23 | "signal": "S08529_L0216_E001" 24 | }, 25 | { 26 | "prompt": "the term mixed race is also inaccurate", 27 | "scene": "S08532", 28 | "n_words": 7, 29 | "hits": 0, 30 | "listener": "L0242", 31 | "system": "E001", 32 | "correctness": 0.0, 33 | "response": "", 34 | "volume": 75, 35 | "signal": "S08532_L0242_E001" 36 | }, 37 | { 38 | "prompt": "it will remain off the song sheet", 39 | "scene": "S08537", 40 | "n_words": 7, 41 | "hits": 0, 42 | "listener": "L0229", 43 | "system": "E001", 44 | "correctness": 0.0, 45 | "response": "", 46 | "volume": 100, 47 | "signal": "S08537_L0229_E001" 48 | } 49 | ] -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/clarity_CPC1_data/metadata/CPC1.train.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "prompt": "i suppose you wouldn't be free for dinner this evening", 3 | "scene": "S08510", 4 | "n_words": 10, 5 | "hits": 1, 6 | "listener": "L0239", 7 | "system": "E001", 8 | "correctness": 10.0, 9 | "response": "freeze evening", 10 | "volume": 56, 11 | "signal": "S08510_L0239_E001" 12 | }] -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/clarity_CPC1_data/metadata/CPC1.train_indep.4.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "prompt": "i suppose you wouldn't be free for dinner this evening", 3 | "scene": "S08510", 4 | "n_words": 10, 5 | "hits": 1, 6 | "listener": "L0239", 7 | "system": "E001", 8 | "correctness": 10.0, 9 | "response": "freeze evening", 10 | "volume": 56, 11 | "signal": "S08510_L0239_E001" 12 | }, 13 | { 14 | "prompt": "i think he confirmed this on saturday", 15 | "scene": "S08529", 16 | "n_words": 7, 17 | "hits": 5, 18 | "listener": "L0216", 19 | "system": "E001", 20 | "correctness": 71.4285714286, 21 | "response": "i think you confirm this on saturday", 22 | "volume": 99, 23 | "signal": "S08529_L0216_E001" 24 | }, 25 | { 26 | "prompt": "the term mixed race is also inaccurate", 27 | "scene": "S08532", 28 | "n_words": 7, 29 | "hits": 0, 30 | "listener": "L0242", 31 | "system": "E001", 32 | "correctness": 0.0, 33 | "response": "", 34 | "volume": 75, 35 | "signal": "S08532_L0242_E001" 36 | }, 37 | { 38 | "prompt": "it will remain off the song sheet", 39 | "scene": "S08537", 40 | "n_words": 7, 41 | "hits": 0, 42 | "listener": "L0229", 43 | "system": "E001", 44 | "correctness": 0.0, 45 | "response": "", 46 | "volume": 100, 47 | "signal": "S08537_L0229_E001" 48 | } 49 | ] -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/e029_sheffield/clarity_CPC1_data_test/clarity_data/HA_outputs/test/S08520_L0216_E001.wav: -------------------------------------------------------------------------------- 1 | ../../../../../clarity_CPC1_data/clarity_data/HA_outputs/train/S08510_L0239_E001.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/e029_sheffield/clarity_CPC1_data_test/clarity_data/scenes/S08520_target_anechoic.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cpc1/e029_sheffield/clarity_CPC1_data_test/clarity_data/scenes/S08520_target_anechoic.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/e029_sheffield/clarity_CPC1_data_test/metadata/CPC1.test.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "prompt": "or i won't come to the party", 3 | "scene": "S08520", 4 | "n_words": 7, 5 | "hits": 1, 6 | "listener": "L0216", 7 | "system": "E001", 8 | "correctness": 14.2857142857, 9 | "response": "for i am", 10 | "volume": 99, 11 | "signal": "S08520_L0216_E001" 12 | }] -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/e029_sheffield/clarity_CPC1_data_test/metadata/listeners.CPC1_all.json: -------------------------------------------------------------------------------- 1 | ../../clarity_CPC1_data_train/metadata/listeners.CPC1_train.json -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/e029_sheffield/clarity_CPC1_data_train/clarity_data/HA_outputs/train/S08510_L0239_E001.wav: -------------------------------------------------------------------------------- 1 | ../../../../../clarity_CPC1_data/clarity_data/HA_outputs/train/S08510_L0239_E001.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/e029_sheffield/clarity_CPC1_data_train/clarity_data/scenes/S08510_target_anechoic.wav: -------------------------------------------------------------------------------- 1 | ../../../../clarity_CPC1_data/clarity_data/scenes/S08510_target_anechoic.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/e029_sheffield/clarity_CPC1_data_train/metadata/CPC1.train.json: -------------------------------------------------------------------------------- 1 | ../../../clarity_CPC1_data/metadata/CPC1.train.json -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/e029_sheffield/clarity_CPC1_data_train/metadata/listeners.CPC1_train.json: -------------------------------------------------------------------------------- 1 | ../../../clarity_CPC1_data/metadata/listeners.CPC1_train.json -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/e032_sheffield: -------------------------------------------------------------------------------- 1 | e029_sheffield -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/exps/test/sii.csv: -------------------------------------------------------------------------------- 1 | signal_ID,intelligibility_score 2 | S08520_L0216_E001,0.141754787420827215 3 | S08547_L0239_E001,0.241754787420827215 4 | S08564_L0206_E001,0.341754787420827215 5 | S08564_L0212_E001,0.441754787420827215 6 | -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/exps/test_indep/sii.csv: -------------------------------------------------------------------------------- 1 | signal_ID,intelligibility_score 2 | S08520_L0243_E018,0.141754787420827215 3 | S08523_L0225_E018,0.241754787420827215 4 | S08523_L0231_E018,0.341754787420827215 5 | S08547_L0229_E018,0.441754787420827215 6 | -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/exps/train/eval_signals/S08510_L0239_E001_HL-mixoutput.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cpc1/exps/train/eval_signals/S08510_L0239_E001_HL-mixoutput.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/exps/train/eval_signals/S08510_L0239_E001_HL-output.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cpc1/exps/train/eval_signals/S08510_L0239_E001_HL-output.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/exps/train/eval_signals/S08510_L0239_E001_HLddf-output.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cpc1/exps/train/eval_signals/S08510_L0239_E001_HLddf-output.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/exps/train/eval_signals/S08510_flat0dB_HL-output.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/recipes/cpc1/exps/train/eval_signals/S08510_flat0dB_HL-output.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/exps/train/sii.csv: -------------------------------------------------------------------------------- 1 | signal_ID,intelligibility_score 2 | S08510_L0239_E001,-0.041754787420827215 3 | S08529_L0216_E001,-0.051754787420827215 4 | S08532_L0242_E001,-0.061754787420827215 5 | S08537_L0229_E001,-0.071754787420827215 6 | -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc1/exps/train_indep/sii.csv: -------------------------------------------------------------------------------- 1 | signal_ID,intelligibility_score 2 | S08510_L0239_E001,-0.041754787420827215 3 | S08529_L0216_E001,-0.051754787420827215 4 | S08532_L0242_E001,-0.061754787420827215 5 | S08537_L0229_E001,-0.071754787420827215 6 | -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc2/clarity_data/HA_outputs/signals/CEC2/S08547_L0001_E001.wav: -------------------------------------------------------------------------------- 1 | ../../../../../cec2/baseline/eval_signals/S06001_L0064_HA-output.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc2/clarity_data/HA_outputs/signals/CEC2/S08564_L0001_E001.wav: -------------------------------------------------------------------------------- 1 | ../../../../../cec2/baseline/eval_signals/S06001_L0064_HA-output.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc2/clarity_data/HA_outputs/signals/CEC2/S08564_L0002_E002.wav: -------------------------------------------------------------------------------- 1 | ../../../../../cec2/baseline/eval_signals/S06001_L0064_HA-output.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc2/clarity_data/HA_outputs/signals/CEC2/S08564_L0003_E003.wav: -------------------------------------------------------------------------------- 1 | ../../../../../cec2/baseline/eval_signals/S06001_L0064_HA-output.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc2/clarity_data/metadata/CEC1.train.sample.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "prompt": "i don't want us to apportion blame she said", 3 | "scene": "S08547", 4 | "n_words": 9, 5 | "hits": 4, 6 | "listener": "L0001", 7 | "system": "E001", 8 | "correctness": 44.4444444444, 9 | "response": "i don't want to have to report he said", 10 | "volume": 56, 11 | "signal": "S08547_L0001_E001" 12 | }, 13 | { 14 | "prompt": "at home indoors i didn't ask my mum", 15 | "scene": "S08564", 16 | "n_words": 8, 17 | "hits": 2, 18 | "listener": "L0001", 19 | "system": "E001", 20 | "correctness": 25.0, 21 | "response": "at home", 22 | "volume": 50, 23 | "signal": "S08564_L0001_E001" 24 | }, 25 | { 26 | "prompt": "at home indoors i didn't ask my mum", 27 | "scene": "S08564", 28 | "n_words": 8, 29 | "hits": 2, 30 | "listener": "L0002", 31 | "system": "E002", 32 | "correctness": 25.0, 33 | "response": "at home", 34 | "volume": 50, 35 | "signal": "S08564_L0002_E002" 36 | }, 37 | { 38 | "prompt": "at home indoors i didn't ask my mum", 39 | "scene": "S08564", 40 | "n_words": 8, 41 | "hits": 2, 42 | "listener": "L0003", 43 | "system": "E003", 44 | "correctness": 25.0, 45 | "response": "at home", 46 | "volume": 50, 47 | "signal": "S08564_L0003_E003" 48 | } 49 | ] -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc2/clarity_data/scenes/CEC2/S08547_target_ref.wav: -------------------------------------------------------------------------------- 1 | ../../../../../scenes/S06001_target_anechoic.wav -------------------------------------------------------------------------------- /tests/test_data/recipes/cpc2/clarity_data/scenes/CEC2/S08564_target_ref.wav: -------------------------------------------------------------------------------- 1 | ../../../../../scenes/S06001_target_anechoic.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/HOA_IRs/HOA_R06001_i1.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/HOA_IRs/HOA_R06001_i1.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/HOA_IRs/HOA_R06001_i2.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/HOA_IRs/HOA_R06001_i2.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/HOA_IRs/HOA_R06001_i3.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/HOA_IRs/HOA_R06001_i3.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/HOA_IRs/HOA_R06001_t.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/HOA_IRs/HOA_R06001_t.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/brir/anech_brir_R00001_t_CH1.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/brir/anech_brir_R00001_t_CH1.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/brir/brir_R00001_i1_CH0.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/brir/brir_R00001_i1_CH0.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/brir/brir_R00001_i1_CH1.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/brir/brir_R00001_i1_CH1.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/brir/brir_R00001_i1_CH2.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/brir/brir_R00001_i1_CH2.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/brir/brir_R00001_i1_CH3.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/brir/brir_R00001_i1_CH3.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/brir/brir_R00001_t_CH0.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/brir/brir_R00001_t_CH0.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/brir/brir_R00001_t_CH1.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/brir/brir_R00001_t_CH1.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/brir/brir_R00001_t_CH2.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/brir/brir_R00001_t_CH2.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/brir/brir_R00001_t_CH3.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/brir/brir_R00001_t_CH3.wav -------------------------------------------------------------------------------- /tests/test_data/rooms/brir/brir_R06001_t_CH1.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/rooms/brir/brir_R06001_t_CH1.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_mix_CH0.wav: -------------------------------------------------------------------------------- 1 | S06001_mixed_CH0.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_mix_CH1.wav: -------------------------------------------------------------------------------- 1 | S06001_mixed_CH1.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_mix_CH2.wav: -------------------------------------------------------------------------------- 1 | S06001_mixed_CH2.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_mix_CH3.wav: -------------------------------------------------------------------------------- 1 | S06001_mixed_CH3.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_mixed_CH0.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/scenes/S06001_mixed_CH0.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_mixed_CH1.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/scenes/S06001_mixed_CH1.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_mixed_CH2.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/scenes/S06001_mixed_CH2.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_mixed_CH3.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/scenes/S06001_mixed_CH3.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_target_CH0.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/scenes/S06001_target_CH0.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_target_CH1.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/scenes/S06001_target_CH1.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_target_CH2.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/scenes/S06001_target_CH2.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_target_CH3.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/scenes/S06001_target_CH3.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_target_anechoic.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/scenes/S06001_target_anechoic.wav -------------------------------------------------------------------------------- /tests/test_data/scenes/S06001_target_anechoic_CH1.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/scenes/S06001_target_anechoic_CH1.wav -------------------------------------------------------------------------------- /tests/test_data/targets/T010_G0N_02468.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claritychallenge/clarity/77bdeaae10fc6f7b8e2fe1c7c26db4f4660f1daa/tests/test_data/targets/T010_G0N_02468.wav -------------------------------------------------------------------------------- /tests/test_data/test/interferers: -------------------------------------------------------------------------------- 1 | ../interferers -------------------------------------------------------------------------------- /tests/test_data/test/rooms: -------------------------------------------------------------------------------- 1 | ../rooms -------------------------------------------------------------------------------- /tests/test_data/test/targets: -------------------------------------------------------------------------------- 1 | ../targets -------------------------------------------------------------------------------- /tests/test_import.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=unused-import 2 | import clarity.enhancer # noqa: F401 3 | 4 | 5 | def test_null(): 6 | """Test that should always.""" 7 | assert True 8 | -------------------------------------------------------------------------------- /tests/test_scene_renderer_cec2.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import clarity.data.scene_renderer_cec2 as sr 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | 8 | def test_rotation(): 9 | """Test head rotation code.""" 10 | rotation = [ 11 | {"sample": 100, "angle": -1.50}, 12 | {"sample": 200, "angle": 1.50}, 13 | ] 14 | origin = [1.0, 0.0, 0.0] 15 | duration = 300 16 | 17 | angles = sr.two_point_rotation(rotation, origin, duration) 18 | logger.info(f"{angles[0]}, {angles[299]}") 19 | logger.info(angles) 20 | -------------------------------------------------------------------------------- /tests/utils/car_noise_simulator/test_carnoise_signal_generator.py: -------------------------------------------------------------------------------- 1 | """Tests for the Car noise signal generator""" 2 | 3 | # pylint: disable=import-error 4 | 5 | from pathlib import Path 6 | 7 | import numpy as np 8 | 9 | from clarity.utils.car_noise_simulator.carnoise_signal_generator import ( 10 | CarNoiseSignalGenerator, 11 | ) 12 | 13 | BASE_DIR = Path.cwd() 14 | RESOURCES = BASE_DIR / "tests" / "resources" / "utils" 15 | 16 | 17 | def test_car_noise_generation(): 18 | """Test that the car noise generator returns the expected signal""" 19 | np.random.seed(42) 20 | carnoise_params = { 21 | "bump": {"btype": "bandpass", "cutoff_hz": [30, 60], "order": 1}, 22 | "dip_high": {"btype": "highpass", "cutoff_hz": 300, "order": 2}, 23 | "dip_low": {"btype": "lowpass", "cutoff_hz": 200, "order": 2}, 24 | "engine_num_harmonics": 25, 25 | "gear": 6, 26 | "primary_filter": { 27 | "btype": "lowpass", 28 | "cutoff_hz": 16.860000000000003, 29 | "order": 1, 30 | }, 31 | "reference_level_db": 30, 32 | "rpm": 1680.0000000000002, 33 | "secondary_filter": { 34 | "btype": "lowpass", 35 | "cutoff_hz": 280.0, 36 | "order": 2, 37 | }, 38 | "speed": 100.0, 39 | } 40 | 41 | car_noise = CarNoiseSignalGenerator( 42 | sample_rate=16000, duration_secs=1, random_flag=True 43 | ) 44 | car_noise_signal = car_noise.generate_car_noise(carnoise_params, 3, 0.5) 45 | 46 | assert car_noise_signal.shape == (4, 16000) 47 | expected = np.load( 48 | RESOURCES / "test_carnoise.signal_generator.npy", allow_pickle=True 49 | ) 50 | np.testing.assert_array_almost_equal(car_noise_signal, expected) 51 | -------------------------------------------------------------------------------- /tests/utils/test_results_support.py: -------------------------------------------------------------------------------- 1 | """test for results support module""" 2 | 3 | # pylint: disable=import-error 4 | from pathlib import Path 5 | 6 | import pytest 7 | 8 | from clarity.utils.results_support import ResultsFile 9 | 10 | # Define some sample data for testing 11 | sample_header = ["Name", "Score"] 12 | sample_data = [{"Name": "Alice", "Score": 95}, {"Name": "Bob", "Score": 88}] 13 | 14 | 15 | @pytest.fixture(name="results_file") 16 | def fixture_results_file(tmpdir): 17 | # Create a temporary directory and a temporary CSV file for testing 18 | file_name = Path(tmpdir, "test_results.csv") 19 | return ResultsFile(file_name, sample_header) 20 | 21 | 22 | def test_create_file_str(tmpdir): 23 | # Create a temporary directory and a temporary CSV file for testing 24 | file_name = f"{tmpdir}/test_results.csv" 25 | result_file = ResultsFile(file_name, sample_header) 26 | assert result_file.file_name.as_posix() == file_name 27 | 28 | 29 | def test_add_result(results_file): 30 | # Test adding a result to the CSV file 31 | results_file.add_result({"Name": "Charlie", "Score": 75}) 32 | 33 | # Read the CSV file and check if the added data is present 34 | with open(results_file.file_name, encoding="utf-8") as csv_file: 35 | lines = csv_file.readlines() 36 | assert len(lines) == 2 # There should be 2 lines (header + 1 data row) 37 | assert "Charlie,75\n" in lines # Check if the added data is present 38 | 39 | 40 | def test_header_written(results_file): 41 | # Test if the header row is written when the ResultsFile is created 42 | with open(results_file.file_name, encoding="utf-8") as csv_file: 43 | lines = csv_file.readlines() 44 | assert len(lines) == 1 # There should be 1 line (only header) 45 | assert lines[0].strip() == "Name,Score" # Check the header content 46 | 47 | 48 | def test_missing_column(results_file): 49 | # Test adding a result with a missing column 50 | with pytest.raises(KeyError): 51 | results_file.add_result({"Name": "Eve"}) 52 | 53 | 54 | def test_nonexistent_file(tmp_path): 55 | # Test creating a ResultsFile with a non-existent file 56 | file_name = Path(tmp_path) / "nonexistent.csv" 57 | with pytest.raises(FileNotFoundError): 58 | ResultsFile(file_name, sample_header, append_results=True) 59 | --------------------------------------------------------------------------------