├── .gitignore ├── Code ├── Local_Databases │ └── AIR │ │ └── ACE16.tar.gz ├── pythonsrc │ ├── ace_discriminative_nets.py │ ├── fe_utils.py │ ├── gan_model_worker.sh │ ├── run_ace_discriminative_nets.sh │ ├── run_cnnrnn_net.sh │ ├── utils_base.py │ ├── utils_dnntrain.py │ ├── utils_reverb.py │ └── utils_spaudio.py └── results_dir │ └── ace_h5_info.h5 ├── LICENSE └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | *..synctex.gz(busy) 2 | Code/results_dir/concWavs 3 | Code/results_dir/tensorlogs 4 | Code/results_dir/stNW_MultChan_all 5 | Code/examplestructs 6 | Code/Local_Databases 7 | Code/results_dir/feature_extractors_*.mat 8 | *.fls 9 | *.fdb_latexmk 10 | *.out 11 | *.swp 12 | *.aux 13 | *.bbl 14 | *.blg 15 | *.log 16 | *.synctex.gz 17 | *.mexmaci64 18 | *.mexa64 19 | *.mexa32 20 | *.m~ 21 | *.asv 22 | *.glo 23 | *.xdy 24 | *.toc 25 | __pycache__ 26 | .idea 27 | .spyproject 28 | *.pyc 29 | *.scp 30 | *.ark 31 | Code/results_dir/tensorlog* 32 | Code/results_dir/surface_model*.* 33 | Code/results_dir/airs_* 34 | Code/results_dir/boundary_ids_* 35 | Code/results_dir/names_* 36 | *.wav 37 | Code/matlabsrc/*.wav 38 | Code/pythonsrc/*.wav 39 | tmp.py 40 | Code/pythonsrc/train_file_tuples.py 41 | Code/results_dir/training_test_data.json 42 | Code/matlabsrc/ThirdParty/simonhenin-columnlegend-8883602/ 43 | Code/results_dir/acenvgenmodel_cachedir/ 44 | Code/results_dir/air_modeling_results/ 45 | Code/results_dir/gan_h5_info.h5 46 | Code/results_dir/training_test_data_wav.h5 47 | Code/tmp.m 48 | Code/pythonsrc/requirements.txt 49 | -------------------------------------------------------------------------------- /Code/Local_Databases/AIR/ACE16.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/papayiannis/reverberation_learning_python/81ec8e70bea614c5d8a38a8ece7a7a39ac1f50b9/Code/Local_Databases/AIR/ACE16.tar.gz -------------------------------------------------------------------------------- /Code/pythonsrc/ace_discriminative_nets.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Constantinos Papayiannis 2 | # 3 | # This file is part of Reverberation Learning Toolbox for Python. 4 | # 5 | # Reverberation Learning Toolbox for Python is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # Reverberation Learning Toolbox for Python is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with Reverberation Learning Toolbox for Python. If not, see . 17 | 18 | """ 19 | 20 | This file is the main worker for training and evaluating the DNNs proposed in [1]. 21 | The script /Code/pythonsrc/run_ace_discriminative_nets.sh offers usage examples for the experiments 22 | presented in the paper. 23 | 24 | This file was original distributed in the repository at: 25 | {repo} 26 | 27 | If you use this code in your work, then cite [1]. 28 | 29 | [1] C. Papayiannis, C. Evers and P. A. Naylor, 30 | "End-to-End Classification of Reverberant Rooms Using DNNs," 31 | in IEEE/ACM Transactions on Audio, Speech, and Language Processing, 32 | vol. 28, pp. 3010-3017, 2020, doi: 10.1109/TASLP.2020.3033628. 33 | 34 | """ 35 | 36 | import argparse 37 | from os.path import exists 38 | from subprocess import call 39 | from time import time 40 | 41 | import numpy as np 42 | from keras import backend as K 43 | from keras.layers import Dense, InputLayer, Reshape, \ 44 | Dropout, Conv2D, MaxPooling2D, GRU, Bidirectional, TimeDistributed, \ 45 | Activation, BatchNormalization 46 | from keras.layers import Layer 47 | from keras.models import Sequential 48 | from keras.models import load_model 49 | from tabulate import tabulate 50 | 51 | from fe_utils import get_ace_xy 52 | from utils_base import float2str 53 | from utils_base import run_command 54 | from utils_dnntrain import model_trainer, get_scaler_descaler, PostEpochWorker, \ 55 | accuracy_eval, batch_gen 56 | 57 | _TIMESTAMP = str(time()) + '_' + str(np.random.rand(1)[0]) 58 | HOSTNAME = run_command('hostname')[0] 59 | FAST_TEST = False and not HOSTNAME == 'sapws' 60 | MODEL_BASENAME = 'ace_model' 61 | 62 | MAX_EPOCHS = 50 # 50 63 | UTT_PER_ENV_DEF = 20 # 20 64 | MAX_STEPS_PER_EPOCH = 1e100 # 1e100 65 | 66 | BATCH_SIZE_BASE_PER_CLASS_AIR = 1 67 | BATCH_SIZE_BASE_PER_CLASS_SPEECH = 1 68 | MODEL_FRAMESIZE_SPEECH = 320 69 | MAX_SPEECH_LEN = 5 70 | WAVFORM_LOGPOW = False 71 | GET_POW_SPEC = True 72 | START_AT_MAX = True 73 | 74 | MODEL_FS = 16000. 75 | SIM_DATA_FS = 16000. 76 | 77 | SCRATCHPAD_DEF = '/tmp/ace_models_unsup/' 78 | SCRATCHPAD = SCRATCHPAD_DEF 79 | 80 | 81 | class Attention(Layer): 82 | # https://www.analyticsvidhya.com/blog/2019/11/comprehensive-guide-attention-mechanism-deep-learning/ 83 | def __init__(self, **kwargs): 84 | super(Attention, self).__init__(**kwargs) 85 | 86 | def build(self, input_shape): 87 | self.W = self.add_weight(name="att_weight", shape=(input_shape[-1], 1), initializer="normal") 88 | self.b = self.add_weight(name="att_bias", shape=(input_shape[1], 1), initializer="zeros") 89 | super(Attention, self).build(input_shape) 90 | 91 | def call(self, x): 92 | et = K.squeeze(K.tanh(K.dot(x, self.W) + self.b), axis=-1) 93 | at = K.softmax(et) 94 | at = K.expand_dims(at, axis=-1) 95 | output = x * at 96 | return K.sum(output, axis=1) 97 | 98 | def compute_output_shape(self, input_shape): 99 | return (input_shape[0], input_shape[-1]) 100 | 101 | def get_config(self): 102 | return super(Attention, self).get_config() 103 | 104 | 105 | def get_model_speech(input_dims, n_outputs, dense_width=128, filters=(8, 16, 32), kernel_size=((3, 3),) * 3, 106 | strides=((1, 1),) * 3, pooling_size=((2, 3),) * 3, use_cnn=False, use_rnn=False, 107 | use_attention=False): 108 | """ 109 | Constructs models for environment classification based on reverberant speech. 110 | 111 | Args: 112 | input_dims: Dimensionality of the input 113 | n_outputs: Number of output classes 114 | dense_width: Width of FF layers 115 | filters: Number of Conv filters 116 | kernel_size: Kernel size for Conv filters 117 | strides: Strides of Conv filters 118 | pooling_size: The pooling size for th Max Poolign layers. 119 | use_cnn: Enable the use of convolutional layers 120 | use_rnn: Enable the use of recurrent layers 121 | use_attention: Enable Attention 122 | 123 | Returns: 124 | A Keras Sequential model 125 | 126 | """ 127 | 128 | activation_layer = lambda: Activation('relu') 129 | 130 | print(f'Generating model with inputs: {input_dims}') 131 | 132 | if use_rnn: 133 | n_recurrent = 2 134 | else: 135 | n_recurrent = 0 136 | 137 | model = Sequential() 138 | model.add(InputLayer(input_shape=tuple(list(input_dims)))) 139 | model.add(BatchNormalization()) 140 | if not use_cnn: 141 | for _, _ in enumerate(filters): 142 | model.add(TimeDistributed( 143 | Dense(dense_width, activation='linear', ) 144 | )) 145 | model.add(activation_layer()) 146 | else: 147 | model.add(Reshape((model.output_shape[1], model.output_shape[2], 1))) 148 | for i, nfilts in enumerate(filters): 149 | for _ in range(2): 150 | model.add(Conv2D(nfilts, kernel_size[i], 151 | activation='linear', padding='same', 152 | strides=strides[i])) 153 | model.add(activation_layer()) 154 | model.add(MaxPooling2D(pooling_size[i])) 155 | 156 | if n_recurrent > 0: 157 | model.add(Reshape((-1, np.prod(model.output_shape[2:]).astype(int)))) 158 | model.add(Bidirectional(GRU(dense_width, activation='linear', 159 | return_sequences=True if n_recurrent > 1 else use_attention))) 160 | model.add(activation_layer()) 161 | for i in range(n_recurrent - 1): 162 | model.add(GRU(dense_width, activation='linear', 163 | return_sequences=True if i < n_recurrent - 2 else use_attention)) 164 | if use_attention: 165 | model.add(Attention()) 166 | 167 | model.add(Reshape((-1,))) 168 | else: 169 | model.add(Reshape((-1,))) 170 | model.add(Dropout(0.1)) 171 | model.add(Dense(dense_width, activation='linear')) 172 | model.add(activation_layer()) 173 | model.add(Dropout(0.1)) 174 | model.add(Dense(dense_width, activation='linear')) 175 | model.add(activation_layer()) 176 | model.add(Dense(dense_width, activation='linear')) 177 | model.add(activation_layer()) 178 | 179 | model.add(Dense(n_outputs, activation='softmax')) 180 | 181 | return model 182 | 183 | 184 | def show_classification_results(all_preds, y, ids, class_names, fold=None, mark_wrongs=False): 185 | """ 186 | Prints the results of the classification predictions in a way which allows for a comparison 187 | between the predictions and the true classes. 188 | 189 | Args: 190 | all_preds: A matrix of [ N_samples x N_classes ], with 1's on the predicted class 191 | y: A matrix of [ N_samples x N_classes ], with 1's on the true class 192 | ids: The id (a string) of each sample 193 | class_names: The unique classes in the classification problem (as strings) 194 | fold: The fold in which each sample belongs to (fold of cross validation) 195 | mark_wrongs: Put a marker next to misclassified samples 196 | 197 | Returns: 198 | Nothing 199 | 200 | """ 201 | 202 | accuracy = np.sum( 203 | np.all(all_preds == y, axis=1) 204 | ) / float(y.shape[0]) 205 | n_hots = np.sum(all_preds, axis=1) 206 | if ~np.all(n_hots == 1): 207 | too_hot = np.where(~(n_hots == 1))[-1] 208 | raise AssertionError( 209 | 'Predictions do not make sense because the following idxs had more than one hots ' + 210 | str(too_hot) + ' with the following hots ' + str(n_hots[too_hot])) 211 | n_hots = np.sum(y, axis=1) 212 | if ~np.all(n_hots == 1): 213 | too_hot = np.where(~(n_hots == 1))[-1] 214 | raise AssertionError( 215 | 'Ground truths do not make sense because the following idxs had more than one hots ' + 216 | str(too_hot) + ' with the following hots ' + str(n_hots[too_hot])) 217 | results = np.concatenate(( 218 | np.atleast_2d(ids).T, 219 | np.atleast_2d(class_names[np.argmax(all_preds, axis=1)]).T 220 | ), axis=1) 221 | headers = ('AIR', 'Prediction') 222 | if fold is not None: 223 | results = np.concatenate(( 224 | results, 225 | np.atleast_2d(fold).T 226 | ), axis=1) 227 | headers = tuple(list(headers) + ['Fold']) 228 | if mark_wrongs: 229 | correct = [i.replace('EE_lobby', 'EE-lobby').split('_')[1] for i in results[:, 0] 230 | ] == results[:, 1] 231 | results = results[~correct, :] 232 | print(f'Showing {(np.sum(~correct))} wrongs of ' + str(correct.size)) 233 | print(tabulate(results, headers=headers)) 234 | 235 | print(f'Overall Accuracy: {float2str(accuracy, 3)}') 236 | 237 | 238 | def train_eval(h5_loc, ace_base, timestamp, 239 | use_cnn=False, use_rnn=False, 240 | read_cache=True, cacheloc_master='/tmp/', split_type='position', 241 | speech_dir=None, use_attention=False): 242 | """ 243 | Worker which trains and evaluates DNN solutions for room classification, based on the data 244 | provided with the ACE challenge database. 245 | 246 | Args: 247 | h5_loc: Location of HFD5 dataset file for the ACE database, which is provided with this 248 | repository at Code/results_dir/ace_h5_info.h5. Contains information about the filenames, 249 | number of channels and also ground truth acoustic parameter values. 250 | ace_base: The folder containing the ACE wav data. 251 | timestamp: A timestamp to use for file saving 252 | use_cnn: Use CNN layers 253 | use_rnn: Use RNN layers 254 | use_attention: Use Attention mechanism layers 255 | to the ACE database ones. The dataset has 2 fields, one is 'filenames', which contains 256 | the locations of the wav AIRs and the other is 'chan', which indicates the number of 257 | channels in the audio file. 258 | read_cache: Enable the reading of any cached data, if any. 259 | cacheloc_master: Location for saving and reading cached data. 260 | split_type: Choice between array and position. The cross validation folds 261 | speech_dir: Location of speech data. Given as a list of [location of train data, 262 | location of test data]. 263 | 264 | Returns: Nothing 265 | 266 | """ 267 | 268 | np.random.seed(601) 269 | 270 | experiment = 'room' 271 | cacheloc_train = cacheloc_master + '/train_set/' 272 | cacheloc_bs = cacheloc_master + '/bs_set_%d/' 273 | cacheloc_test = cacheloc_master + '/train_test/' 274 | print(f'Cache location train : {cacheloc_train}') 275 | print(f'Cache location test : {cacheloc_test}') 276 | print(f'Cache location bs : {cacheloc_bs}') 277 | 278 | call(["mkdir", "-p", SCRATCHPAD]) 279 | model_filename = f'{SCRATCHPAD}/{MODEL_BASENAME}_{timestamp}.h5' 280 | 281 | feature_ex_config = { 282 | 'max_air_len': MAX_SPEECH_LEN, 283 | 'fs': SIM_DATA_FS, 'forced_fs': MODEL_FS, 284 | 'max_speech_read': MAX_SPEECH_LEN, 'drop_speech': True, 285 | 'as_hdf5_ds': True, 286 | 'framesize': MODEL_FRAMESIZE_SPEECH, 287 | 'keep_ids': None, 'utt_per_env': UTT_PER_ENV_DEF, 288 | 'write_cached_latest': True, 'wavform_logpow': WAVFORM_LOGPOW, 289 | 'read_cached_latest': read_cache, 'get_pow_spec': GET_POW_SPEC, 290 | 'start_at_max': False, 291 | } 292 | 293 | (x_out_train, (y_train, y_position_train)), ids_train, class_names_train, \ 294 | (_, _, _), \ 295 | ((group_names_array_train, group_names_position_train, group_names_room_train), 296 | (groups_array_train, groups_position_train, group_room_train) 297 | ) = get_ace_xy(h5_file=h5_loc, ace_base=ace_base, 298 | scratchpad=SCRATCHPAD + '/train/', 299 | speech_files=speech_dir[0], 300 | group_by=('array', 'position', 'room'), 301 | cacheloc=cacheloc_train, 302 | y_type=(experiment, 'position'), 303 | **feature_ex_config) 304 | 305 | groups_array_train = [groups_array_train[ii] for ii in group_names_array_train.argsort()] 306 | group_names_array_train = group_names_array_train[group_names_array_train.argsort()] 307 | groups_position_train = [groups_position_train[ii] for ii in 308 | group_names_position_train.argsort()] 309 | group_names_position_train = group_names_position_train[group_names_position_train.argsort()] 310 | 311 | (x_out_test, y_test), ids_test, class_names_test, \ 312 | (_, _, _), \ 313 | ((group_names_array_test, group_names_position_test), 314 | (groups_array_test, groups_position_test) 315 | ) = get_ace_xy(h5_file=h5_loc, ace_base=ace_base, 316 | scratchpad=SCRATCHPAD + '/test/', 317 | speech_files=speech_dir[ 318 | 1] if speech_dir is not None else None, 319 | group_by=('array', 'position'), cacheloc=cacheloc_test, 320 | y_type=experiment, **feature_ex_config) 321 | groups_array_test = [groups_array_test[ii] for ii in group_names_array_test.argsort()] 322 | group_names_array_train = group_names_array_train[group_names_array_train.argsort()] 323 | groups_position_test = [groups_position_test[ii] for ii in 324 | group_names_position_test.argsort()] 325 | group_names_position_test = group_names_position_test[group_names_position_test.argsort()] 326 | if not group_names_array_test.size == group_names_array_train.size: 327 | raise AssertionError('Test and train sets do not match') 328 | if ~np.all(group_names_array_train == group_names_array_test): 329 | raise AssertionError('Test and train sets do not match') 330 | if not group_names_position_test.size == group_names_position_train.size: 331 | raise AssertionError('Test and train sets do not match') 332 | if ~np.all(group_names_position_test == group_names_position_train): 333 | raise AssertionError('Test and train sets do not match') 334 | 335 | scaler, _ = get_scaler_descaler(x_out_train) 336 | x_out_train = scaler(x_out_train) 337 | x_out_test = scaler(x_out_test) 338 | callback_eval_func = accuracy_eval 339 | all_preds = [] 340 | all_folds = [] 341 | 342 | if split_type == 'position': 343 | fold_set = range(len(groups_position_test)) 344 | experiment_groups_train = groups_position_train 345 | experiment_groups_test = groups_position_test 346 | experiment_group_names = group_names_position_train 347 | elif split_type == 'array': 348 | fold_set = range(len(groups_array_test)) 349 | experiment_groups_train = groups_array_train 350 | experiment_groups_test = groups_array_test 351 | experiment_group_names = group_names_array_train 352 | else: 353 | raise AssertionError(f'Bad split type {split_type}') 354 | 355 | print('Train set : ') 356 | for i in experiment_groups_train[0]: 357 | print(f'Train : {ids_train[i]}') 358 | print('Test set : ') 359 | for i in experiment_groups_test[0]: 360 | print(f'Test : {ids_test[i]}') 361 | 362 | for i, this_outgroup in enumerate(experiment_groups_train): 363 | eval_idxs = experiment_groups_test[i] 364 | 365 | val_idxs = [] 366 | for i_g in range(len(groups_position_train)): 367 | possible_choices = [j for j in groups_position_train[i_g].tolist() if j not in this_outgroup.tolist()] 368 | if len(possible_choices) == 1: 369 | if np.random.rand() < .15: 370 | val_idxs += possible_choices 371 | elif len(possible_choices) > 0: 372 | val_idxs += np.random.choice( 373 | possible_choices, int(.15 / len(groups_position_train) * x_out_train.shape[0]), 374 | replace=False).tolist() 375 | val_idxs = np.sort(val_idxs).tolist() 376 | 377 | train_idxs = np.array([j for j in range(x_out_train.shape[0]) if j not in 378 | (this_outgroup.tolist() + val_idxs)]).astype(int) 379 | print(f'For fold (zero counting absolute) {fold_set[i]} (at step: {i + 1}' 380 | f' of {len(fold_set)}), at {experiment_group_names[i]}' 381 | f' with {train_idxs.size} train ' 382 | f' and {eval_idxs.size} test samples') 383 | 384 | evaluation_eval_func = lambda cmodel: callback_eval_func( 385 | x_out_test[eval_idxs, :, :], y_test[eval_idxs, :], cmodel, prefix='Test') 386 | 387 | model_filename_fold = model_filename.replace('.h5', '_fold_' + str(i) + '.h5') 388 | if exists(model_filename_fold): 389 | print('Loading pre-trained model from {model_filename_fold}') 390 | model = load_model(model_filename_fold) 391 | print('Loaded pre-trained model from {model_filename_fold}') 392 | model.summary() 393 | else: 394 | print(f'File {model_filename_fold} does not exist so i will train the model') 395 | callbacks = [PostEpochWorker( 396 | (x_out_train[val_idxs, :, :], 397 | x_out_test[eval_idxs, :, :]), 398 | (y_train[val_idxs, :], y_test[eval_idxs, :]), 399 | model_filename, 400 | eval_fun=( 401 | lambda x, y, cmodel: callback_eval_func(x, y, cmodel, prefix='Val'), 402 | lambda x, y, cmodel: callback_eval_func(x, y, cmodel, prefix='Test')), 403 | eval_every_n_epochs=100), 404 | ] 405 | 406 | effective_batch_size = BATCH_SIZE_BASE_PER_CLASS_SPEECH * y_position_train.shape[1] 407 | this_batch_gen = batch_gen( 408 | x_out_train, y_train, BATCH_SIZE_BASE_PER_CLASS_SPEECH, 409 | y_to_balance=y_position_train, sub_idxs=train_idxs) 410 | steps_per_epoch = min(int(round(y_train.shape[0] / effective_batch_size)), MAX_STEPS_PER_EPOCH) 411 | 412 | print(f'For a batch size of {effective_batch_size} i will do {steps_per_epoch} steps per epoch.') 413 | this_batch_gen_val = batch_gen( 414 | x_out_train, y_train, BATCH_SIZE_BASE_PER_CLASS_SPEECH, 415 | y_to_balance=y_position_train, sub_idxs=val_idxs) 416 | _, filename = model_trainer( 417 | this_batch_gen, x_out_train.shape[1:], y_train.shape[1], 418 | get_model_speech, 419 | tensorlog=True, callbacks=callbacks, epochs=MAX_EPOCHS, 420 | loss_patience=10, scratchpad=SCRATCHPAD, 421 | model_filename=model_filename_fold, use_attention=use_attention, 422 | print_summary=i == 0, use_cnn=use_cnn, use_rnn=use_rnn, 423 | val_gen=this_batch_gen_val, val_patience=15, steps_per_epoch=steps_per_epoch, 424 | ) 425 | model = callbacks[0].best_val_model 426 | 427 | new_preds = evaluation_eval_func(model) 428 | all_preds.append(new_preds) 429 | all_folds.append(np.zeros(new_preds.shape[0], dtype=int) + fold_set[i]) 430 | idxs_to_do = np.concatenate(experiment_groups_test[0:i + 1]).astype(int) 431 | show_classification_results( 432 | np.concatenate(all_preds, axis=0)[np.argsort(idxs_to_do), ...], 433 | y_test[np.sort(idxs_to_do), ...], 434 | ids_test[np.sort(idxs_to_do).tolist()], 435 | class_names_test, mark_wrongs=True, 436 | fold=np.concatenate(all_folds)[np.argsort(idxs_to_do)] 437 | ) 438 | K.clear_session() 439 | print('Overall Test results for all folds:') 440 | idxs_to_do = np.concatenate(experiment_groups_test).astype(int) 441 | show_classification_results( 442 | np.concatenate(all_preds, axis=0)[np.argsort(idxs_to_do), ...], 443 | y_test[np.sort(idxs_to_do), ...], 444 | ids_test[np.sort(idxs_to_do).tolist()], 445 | class_names_test, 446 | fold=np.concatenate(all_folds)[np.argsort(idxs_to_do)] 447 | ) 448 | 449 | 450 | if __name__ == '__main__': 451 | """ 452 | This file is the main worker for training and evaluating the DNNs proposed in: 453 | C. Papayiannis, C. Evers and P. A. Naylor, "End-to-End Classification of Reverberant Rooms Using DNNs," in IEEE/ACM Transactions on Audio, Speech, and Language Processing, vol. 28, pp. 3010-3017, 2020, doi: 10.1109/TASLP.2020.3033628. 454 | 455 | For usage help, run : python ace_discriminative_nets.py --help 456 | """ 457 | 458 | parser = argparse.ArgumentParser( 459 | description='Arguments for training or ACE models and encodings') 460 | parser.add_argument('--ace', dest='ace_base', type=str, 461 | default='../Local_Databases/AIR/ACE16/', 462 | help='Location of the ACE database') 463 | parser.add_argument('--h5', dest='h5', type=str, default='../results_dir/ace_h5_info.h5', 464 | help='Location of HFD5 dataset file for the ACE database, which is ' 465 | 'provided with this repository at Code/results_dir/ace_h5_info.h5. ' 466 | 'Contains information about the filenames, number of channels and ' 467 | 'also ground truth acoustic parameter values. If you want to create ' 468 | 'a new one, then use fe_utils.compile_ace_h5') 469 | parser.add_argument('--speech', dest='speech', type=str, required=True, nargs='*', 470 | help='Locations where the wav files') 471 | parser.add_argument('--readcache', dest='readcache', action="store_true", 472 | default=False, 473 | help='Do not load new data, just read the last cached data') 474 | parser.add_argument('--attention', action="store_true", 475 | default=False, help='Use attention') 476 | parser.add_argument('--cnn', dest='cnn', action="store_true", 477 | default=False, help='Add CNN layers to the net') 478 | parser.add_argument('--rnn', dest='rnn', action="store_true", 479 | default=False, help='Add RNN layers to the net') 480 | parser.add_argument('--cacheloc', dest='cacheloc', type=str, default='/tmp/', 481 | help='Locations where the cache is located and/or will be stored') 482 | parser.add_argument('--split-type', type=str, default='position', choices=('position', 'array'), 483 | help='ACE Splits') 484 | 485 | args = parser.parse_args() 486 | 487 | run_command(f'mkdir -p {SCRATCHPAD}') 488 | 489 | train_eval(args.h5, args.ace_base, _TIMESTAMP, speech_dir=args.speech, read_cache=args.readcache, 490 | use_cnn=args.cnn, use_rnn=args.rnn, use_attention=args.attention, 491 | cacheloc_master=args.cacheloc, split_type=args.split_type) 492 | -------------------------------------------------------------------------------- /Code/pythonsrc/fe_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Constantinos Papayiannis 2 | # 3 | # This file is part of Reverberation Learning Toolbox for Python. 4 | # 5 | # Reverberation Learning Toolbox for Python is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # Reverberation Learning Toolbox for Python is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with Reverberation Learning Toolbox for Python. If not, see . 17 | 18 | """ 19 | 20 | This file contains a set of routines which make feature extraction and data handling easy for 21 | deep learning tasks around reverberation. It offeres some dedicated routines for the ACE 22 | challenge database (http://www.ee.ic.ac.uk/naylor/ACEweb/index.html) 23 | 24 | This file was original distributed in the repository at: 25 | {repo} 26 | 27 | If you use this code in your work, then cite: 28 | C. Papayiannis, C. Evers and P. A. Naylor, 29 | "End-to-End Classification of Reverberant Rooms Using DNNs," 30 | in IEEE/ACM Transactions on Audio, Speech, and Language Processing, 31 | vol. 28, pp. 3010-3017, 2020, doi: 10.1109/TASLP.2020.3033628. 32 | 33 | """ 34 | 35 | from os.path import abspath 36 | from os.path import basename 37 | from os.path import isfile 38 | from random import randint 39 | from random import sample 40 | from time import sleep 41 | from time import time 42 | 43 | import numpy as np 44 | import pandas as pd 45 | from h5py import File 46 | from numpy.fft import rfft 47 | from scipy.io import wavfile 48 | from scipy.io.wavfile import read 49 | from scipy.signal import fftconvolve 50 | from tabulate import tabulate 51 | 52 | from utils_base import find_all_ft, run_command 53 | from utils_base import flatten_list 54 | from utils_spaudio import enframe 55 | from utils_spaudio import my_resample 56 | from utils_spaudio import write_wav 57 | 58 | 59 | def print_split_report(y, split_idxs=(), split_names=()): 60 | """ 61 | 62 | Prints the distribution of the labels of data across splits. If no splits exist, 63 | then a report on the global distribution is given. Splits are indicated with the set of 64 | indices in the provided tuple. Split names can be provided fro clarity. 65 | 66 | Args: 67 | y: The classes labels as a vector 68 | split_idxs: A list of lists of indices, indicating members of the vector which belong to 69 | each split. 70 | split_names: The name of each split as a list. Should have the same length as the 71 | index list. 72 | 73 | Returns: 74 | Nothing 75 | 76 | """ 77 | if len(split_idxs) == 0: 78 | split_idxs = (np.arange(0, y.shape[0]),) 79 | if not len(split_idxs) == len(split_names): 80 | if len(split_names) > 0: 81 | raise AssertionError('Invalid Inputs') 82 | else: 83 | split_names = [] 84 | for i in range(len(split_idxs)): 85 | split_names.append('Set ' + str(i)) 86 | 87 | distributions = np.zeros((y.shape[1], len(split_idxs)), dtype=int) 88 | for i in range(len(split_idxs)): 89 | idxs = split_idxs[i] 90 | for j in range(y.shape[1]): 91 | distributions[j, i] = np.sum(y[idxs, j]) 92 | print('Data Distributions:') 93 | print(tabulate(distributions, headers=split_names, showindex=True)) 94 | 95 | 96 | def __make_start_at_max(x_in, fs=None, max_air_len=None, just_crop=False, leeway=0.0): 97 | x = np.array(x_in) 98 | max_samples = None 99 | if max_air_len is not None or leeway is not None: 100 | if fs is None: 101 | raise AssertionError('Max length and leeway should be given with fs') 102 | if max_air_len is not None: 103 | max_samples = int(np.ceil(max_air_len * fs)) 104 | 105 | if leeway is None: 106 | leeway_samples = 0 107 | else: 108 | leeway_samples = int(np.ceil(leeway * fs)) 109 | min_max = np.inf 110 | max_max = 0 111 | if not just_crop: 112 | for i in range(x.shape[0]): 113 | maxp = max(0, np.argmax(np.abs(x[i, :])) - leeway_samples) 114 | min_max = min(min_max, maxp) 115 | max_max = max(max_max, maxp) 116 | if maxp > 0: 117 | x[i, 0:-maxp] = np.array(x[i, maxp:]) 118 | x[i, -maxp:] = 0 119 | # x = x[:, np.any(x > 0, axis=0)] 120 | print(f'Max shift : {max_max} min max : {min_max}') 121 | if max_air_len is not None: 122 | x = x[:, 0:max_samples] 123 | if x.shape[1] < max_samples: 124 | padding = np.zeros((x.shape[0], max_samples - x.shape[1])) 125 | x = np.concatenate((x, padding), axis=1) 126 | return x 127 | 128 | 129 | def __to_enframed(x, framesize=None, window=True): 130 | return np.stack( 131 | [enframe(x[i, :], framesize, int(np.ceil(framesize / 2)), hamming_window=window) 132 | for i in range(x.shape[0])]) 133 | 134 | 135 | def __to_pow_spec(x, framesize=None, match_training_spectrum=False): 136 | print('Getting FFTs') 137 | oshape = x.shape 138 | if framesize is not None: 139 | if x.ndim < 3: 140 | x = __to_enframed(x, framesize=framesize) 141 | oshape = x.shape 142 | x = np.concatenate([x[i, :, :] for i in range(x.shape[0])], axis=0) 143 | x = (abs(rfft(x, axis=1))) 144 | if match_training_spectrum: 145 | av_spec = np.genfromtxt('../results_dir/surface_models/average_response_abs_fft.csv', 146 | delimiter=',') 147 | x *= np.atleast_2d(av_spec) 148 | x[x == 0] = np.min(x[x > 0]) * 0.01 149 | x = np.log(x) 150 | if framesize is not None: 151 | x = x.reshape(tuple(list(oshape)[0:2] + [-1])) 152 | return x 153 | 154 | 155 | def data_post_proc(x, fs, start_at_max, framesize, get_pow_spec, max_len, wavform_logpow): 156 | """ 157 | 158 | Processes audio files, before they are passed to DNNs for training. 159 | 160 | Args: 161 | x: Audio signal samples a numpy array [N_signals X N_samples] 162 | fs: Sampling frequency 163 | start_at_max: Shift each signal so that they start at the maximum sample 164 | framesize: Set a framesize (in samples) for the signals to be enframed at. Turns the 165 | array of signals into a 3D array. 166 | get_pow_spec: Transform the signals into the log-power spectrum of the signals 167 | max_len: Maximum singal length in seconds (truncate or pad to this) 168 | wavform_logpow: Get the signals in the log-power time domain 169 | 170 | Returns: 171 | The processed signals 172 | 173 | """ 174 | 175 | match_training_spectrum = False 176 | if get_pow_spec and wavform_logpow: 177 | raise AssertionError('Unexpected scenario') 178 | 179 | print('For this feature extraction i will:') 180 | if start_at_max: 181 | print('Make all inputs start at the maximum energy sample') 182 | if framesize is not None: 183 | print('Enframe the inputs') 184 | if get_pow_spec: 185 | print('Get the logpow spectrum') 186 | if max_len is not None: 187 | print('Truncate the maximum length of the input') 188 | if wavform_logpow: 189 | print('Convert the time domain samples to the logpow') 190 | print(f'And return the results for {x.shape[0]} inputs') 191 | 192 | if start_at_max or (max_len is not None): 193 | x = __make_start_at_max(x, fs, max_len, just_crop=(not start_at_max), leeway=0.0) 194 | 195 | if framesize is not None: 196 | x = __to_enframed(x, framesize=framesize, window=get_pow_spec) 197 | 198 | if get_pow_spec: 199 | x = __to_pow_spec(x, framesize=framesize, match_training_spectrum=match_training_spectrum) 200 | 201 | if wavform_logpow: 202 | x = x ** 2 203 | xmax = np.max(x) 204 | x[x < (xmax / 200)] = xmax / 200 205 | x = np.nan_to_num(np.log(x)) 206 | 207 | return x 208 | 209 | 210 | def read_airs_from_wavs(wav_files, framesize=None, get_pow_spec=True, 211 | max_air_len=None, fs=None, forced_fs=None, 212 | keep_ids=None, cacheloc='/tmp/', 213 | start_at_max=True, read_cached_latest=False, 214 | wavform_logpow=False, 215 | write_cached_latest=True, max_speech_read=None, 216 | max_air_read=None, utt_per_env=1, 217 | parse_as_dirctories=True, 218 | speech_files=None, save_speech_associations=True, 219 | save_speech_examples=10, drop_speech=False, as_hdf5_ds=True, 220 | choose_channel=None, no_fex=False, scratchpad='/tmp/', 221 | copy_associations_to=None, given_associations=None): 222 | """ 223 | 224 | Given a set of AIR files and additional inforamtion, data for the training of DNNs for 225 | environment classification are prepared. 226 | 227 | Args: 228 | wav_files: Location of AIR wav files 229 | framesize: The framesize to ues 230 | get_pow_spec: Convert audio to log-power spectrum domain 231 | max_air_len: The maximum length of the signals (truncate to or pad to) 232 | fs: The sampling frequency of the wav fiels to expect 233 | forced_fs: The sampling frequency to convert the data to 234 | keep_ids: None (not used) 235 | cacheloc: Location to use for cache reading and saving 236 | start_at_max: Modify the signals so that the maximum energy sample is at the begiing. ( 237 | can be used to align AIRs) 238 | read_cached_latest: Read the data from the last saved cache (if nay) 239 | wavform_logpow: Get the signals in the log-power time domain 240 | write_cached_latest: Write the collected data in a cache for fast reuse 241 | max_speech_read: Maximum length of speech signal to read 242 | max_air_read: maximum aIR length to read up to 243 | utt_per_env: Number of utternaces to convolve with each AIR 244 | parse_as_dirctories: Parse the inputs as directiries and not as individual fiels 245 | speech_files: Speec files of locations 246 | save_speech_associations: Save the speech associations with the corresponding AIRs 247 | save_speech_examples: Enable the saving of examples of the reverberant speech created 248 | drop_speech: Do not include the speech samples in the saving of the cache or in the RAM. 249 | Keep only the training data arrays 250 | as_hdf5_ds: Keep the data as HDF5 datasets on disk. (Reduces RAM usage a lot) 251 | choose_channel: Channels to use for each AIR 252 | no_fex: Skip the data processign phase and just return the raw singals 253 | scratchpad: Location to use for temporary saving 254 | copy_associations_to: Save a copy of the speech-aIR associations here 255 | given_associations: Provided associatiosn between speech files and AIRs. This can be used 256 | in the case where you want to use specific speech samples for specific AIRs 257 | 258 | Returns: 259 | (X, None), Sample_names, None, 260 | (AIRs, Speech, Reverberant_speech), 261 | (Group_name, Groups), Number_of_utternaces_convolved_with_each_AIR 262 | 263 | """ 264 | run_command('mkdir -p ' + cacheloc) 265 | latest_file = cacheloc + '/training_test_data_wav.h5' 266 | timestamp = str(time()) 267 | filename_associations = scratchpad + '/air_speech_associations_' + timestamp + '.csv' 268 | base_examples_dir = scratchpad + '/feature_extraction_examples/' 269 | if keep_ids is not None: 270 | raise AssertionError('No ids exist in this context') 271 | if speech_files is None: 272 | utt_per_env = 1 273 | if save_speech_associations: 274 | print('There is no speech to save in associations, setting to false') 275 | save_speech_associations = False 276 | if save_speech_examples: 277 | print('There is no speech to save audio for, setting to 0 examples') 278 | save_speech_examples = 0 279 | 280 | hf = None 281 | try: 282 | if isfile(latest_file) and read_cached_latest: 283 | print(f'Reading : {latest_file}') 284 | hf = File(latest_file, 'r') 285 | if as_hdf5_ds: 286 | x = hf['x'] 287 | airs = hf['airs'] 288 | utt_per_env = np.array(hf['utts']) 289 | rev_speech = hf['rev_names'] 290 | clean_speech = hf['clean_speech'] 291 | print(f'Done creating handles to : {latest_file}') 292 | else: 293 | utt_per_env = np.array(hf['utts']) 294 | x = np.array(hf.get('x')) 295 | ids = np.array(hf.get('ids')) 296 | airs = np.array(hf.get('airs')) 297 | rev_speech = np.array(hf.get('rev_names')) 298 | clean_speech = np.array(hf.get('clean_speech')) 299 | print(f'Done reading : {latest_file}') 300 | ids = np.array([x.decode() for x in hf['ids']]) 301 | if given_associations is not None: 302 | print('! I read the cache so the given associations were not used') 303 | if copy_associations_to is not None: 304 | print(f'! I read the cache so the associations could not be saved at {copy_associations_to}') 305 | return (x, None), ids, None, (airs, clean_speech, rev_speech), utt_per_env 306 | except (ValueError, KeyError) as ME: 307 | print('Tried to read ' + latest_file + ' but failed with ' + ME.message) 308 | if hf is not None: 309 | hf.close() 310 | 311 | if given_associations is not None: 312 | print(f'You gave me speech associations, Speech: {len(given_associations["speech"])}' 313 | f' entries and Offsets: {len(given_associations["speech"])} entries') 314 | 315 | ids = None 316 | x = None 317 | x_speech = None 318 | x_rev_speech = None 319 | 320 | if forced_fs is None: 321 | forced_fs = fs 322 | resample_op = lambda x: x 323 | if not forced_fs == fs: 324 | resample_op = lambda x: np.array( 325 | my_resample(np.array(x.T, dtype=float), fs, forced_fs) 326 | ).T 327 | 328 | max_air_read_samples = None 329 | if max_air_read is not None: 330 | if fs is None: 331 | raise AssertionError('Cannot work with max_air_read without fs') 332 | max_air_read_samples = int(np.ceil(fs * max_air_read)) 333 | if max_speech_read is not None: 334 | if fs is None: 335 | raise AssertionError('Cannot work with max_speech_read without fs') 336 | max_speech_read_samples = int(np.ceil(fs * max_speech_read)) 337 | else: 338 | max_speech_read_samples = None 339 | 340 | if parse_as_dirctories: 341 | if not type(wav_files) is list: 342 | wav_files = [wav_files] 343 | wav_files = find_all_ft(wav_files, ft='.wav', find_iname=True) 344 | if speech_files is not None: 345 | if not type(speech_files) is list: 346 | speech_files = [speech_files] 347 | speech_files = find_all_ft(speech_files, ft='.wav', find_iname=True) 348 | 349 | if save_speech_examples: 350 | run_command('rm -r ' + base_examples_dir) 351 | run_command('mkdir -p ' + base_examples_dir) 352 | 353 | associations = [] 354 | save_counter = 0 355 | all_names = [basename(i).replace('.wav', '') + '_' + str(j) for i in wav_files for j in 356 | range(utt_per_env)] 357 | if type(choose_channel) is list: 358 | choose_channel = [i for i in choose_channel for _ in range(utt_per_env)] 359 | wav_files = [i for i in wav_files for _ in range(utt_per_env)] 360 | offsets = [] 361 | for i, this_wav_file in enumerate(wav_files): 362 | # if speech_files is not None: 363 | # print("Reading: " + this_wav_file + " @ " + str(i + 1) + " of " + str(len(wav_files)), end='') 364 | names = [all_names[i]] 365 | this_fs, airs = wavfile.read(this_wav_file) 366 | airs = airs.astype(float) 367 | if airs.ndim > 1: 368 | if choose_channel is not None: 369 | if type(choose_channel) is list: 370 | airs = airs[:, choose_channel[i]] 371 | names[0] += '_ch' + str(choose_channel[i]) 372 | else: 373 | airs = airs[:, choose_channel] 374 | names[0] += '_ch' + str(choose_channel) 375 | else: 376 | names = [names[0] + '_' + str(ch_id) for ch_id in range(airs.shape[1])] 377 | airs = airs.T 378 | airs = np.atleast_2d(airs) 379 | airs /= np.repeat(np.atleast_2d(abs(airs).max()).T, airs.shape[1], 1).astype(float) 380 | if airs.shape[0] > 1 and given_associations is not None: 381 | raise AssertionError('Cannot work out given associations for multichannel airs') 382 | this_speech_all = [] 383 | this_rev_speech_all = [] 384 | if speech_files is not None: 385 | for ch_id in range(airs.shape[0]): 386 | if given_associations is None: 387 | chosen_file = sample(range(len(speech_files)), 1)[0] 388 | this_speech_file = speech_files[chosen_file] 389 | else: 390 | chosen_file = given_associations['speech'][i] 391 | this_speech_file = chosen_file 392 | associations.append(chosen_file) 393 | this_speech_fs, this_speech = wavfile.read(this_speech_file) 394 | if this_speech.ndim > 1: 395 | raise AssertionError('Can\'t deal with multichannel speech in this context') 396 | if not this_speech_fs == this_fs: 397 | this_speech = my_resample(this_speech, this_speech_fs, this_fs) 398 | max_offset_for_check = None 399 | if max_speech_read_samples is not None: 400 | max_offset_for_check = this_speech.size - max_speech_read_samples 401 | offset = randint(0, this_speech.size - max_speech_read_samples) 402 | this_speech = this_speech[offset:offset + max_speech_read_samples] 403 | else: 404 | offset = 0 405 | if given_associations is not None: 406 | offset = given_associations['offsets'][i] 407 | if max_speech_read_samples is not None: 408 | if offset >= max_offset_for_check: 409 | raise AssertionError( 410 | 'Invalid offset from given associations, got ' + str( 411 | offset) + ' expected max is ' + str( 412 | this_speech.size - max_speech_read_samples)) 413 | 414 | conv_air = np.array(airs[ch_id, :]) 415 | conv_air = conv_air[ 416 | np.where(~(conv_air == 0))[-1][0]:np.where(~(conv_air == 0))[-1][-1]] 417 | 418 | # Making convolution 419 | this_rev_speech = fftconvolve(this_speech, conv_air, 'same') 420 | # 421 | 422 | dp_arival = np.argmax(abs(conv_air)) 423 | this_rev_speech = this_rev_speech[dp_arival:] 424 | if dp_arival > 0: 425 | this_rev_speech = np.concatenate( 426 | (this_rev_speech, np.zeros(dp_arival, dtype=this_rev_speech.dtype))) 427 | 428 | this_speech = np.atleast_2d(this_speech) 429 | this_rev_speech = np.atleast_2d(this_rev_speech) 430 | this_speech_all.append(this_speech) 431 | this_rev_speech_all.append(this_rev_speech) 432 | 433 | offsets.append(offset) 434 | if save_speech_examples >= save_counter: 435 | save_names = [ 436 | basename(this_wav_file).replace('.wav', '') + '_air_' + str( 437 | offset) + '.wav', 438 | basename(this_wav_file).replace('.wav', '') + '_clean_speech_' + str( 439 | offset) + '.wav', 440 | basename(this_wav_file).replace('.wav', '') + '_rev_speech_' + str( 441 | offset) + '.wav' 442 | ] 443 | for examples in range(len(save_names)): 444 | save_names[examples] = base_examples_dir + save_names[examples] 445 | write_wav(save_names[0], this_fs, airs[ch_id, :]) 446 | write_wav(save_names[1], this_fs, this_speech.flatten()) 447 | write_wav(save_names[2], this_fs, this_rev_speech.flatten()) 448 | save_counter += 1 449 | this_speech = np.concatenate(this_speech_all, axis=0) 450 | this_rev_speech = np.concatenate(this_rev_speech_all, axis=0) 451 | 452 | if not this_fs == fs: 453 | raise AssertionError('Your sampling rates are not consistent') 454 | if i > 0: 455 | ids = np.concatenate((ids, names)) 456 | else: 457 | ids = names 458 | 459 | if max_air_read is not None: 460 | airs = airs[:, 0:max_air_read_samples] 461 | if False and speech_files is not None: 462 | print(f"Got {airs.shape}") 463 | airs = resample_op(airs) 464 | if airs.ndim < 2: 465 | airs = np.atleast_2d(airs) 466 | # print('Done resampling') 467 | if i > 0: 468 | if x.shape[1] < airs.shape[1]: 469 | npads = -x.shape[1] + airs.shape[1] 470 | x = np.concatenate((x, np.zeros((x.shape[0], npads)).astype(x.dtype)), axis=1) 471 | x = np.concatenate((x, airs), axis=0) 472 | else: 473 | if x.shape[1] > airs.shape[1]: 474 | npads = x.shape[1] - airs.shape[1] 475 | airs = np.concatenate((airs, np.zeros((airs.shape[0], npads)).astype( 476 | airs.dtype)), axis=1) 477 | x.resize((x.shape[0] + airs.shape[0], x.shape[1]), refcheck=False) 478 | x[-airs.shape[0]:, :] = np.array(airs) 479 | 480 | if speech_files is not None: 481 | if x_speech.shape[1] < this_speech.shape[1]: 482 | npads = -x_speech.shape[1] + this_speech.shape[1] 483 | x_speech = np.concatenate( 484 | (x_speech, np.zeros((x_speech.shape[0], npads)).astype(x_speech.dtype)), 485 | axis=1) 486 | x_speech = np.concatenate((x_speech, this_speech), axis=0) 487 | else: 488 | if x_speech.shape[1] > this_speech.shape[1]: 489 | npads = x_speech.shape[1] - this_speech.shape[1] 490 | this_speech = np.concatenate( 491 | (this_speech, np.zeros((this_speech.shape[0], 492 | npads)).astype( 493 | this_speech.dtype)), axis=1) 494 | x_speech.resize( 495 | (x_speech.shape[0] + this_speech.shape[0], x_speech.shape[1]), 496 | refcheck=False) 497 | x_speech[-this_speech.shape[0]:, :] = this_speech 498 | 499 | if x_rev_speech.shape[1] < this_rev_speech.shape[1]: 500 | npads = -x_rev_speech.shape[1] + this_rev_speech.shape[1] 501 | x_rev_speech = np.concatenate( 502 | (x_rev_speech, np.zeros((x_rev_speech.shape[0], npads) 503 | ).astype(x_rev_speech.dtype)), 504 | axis=1) 505 | x_rev_speech = np.concatenate((x_rev_speech, this_rev_speech), axis=0) 506 | else: 507 | if x_rev_speech.shape[1] > this_rev_speech.shape[1]: 508 | npads = x_rev_speech.shape[1] - this_rev_speech.shape[1] 509 | this_rev_speech = np.concatenate( 510 | (this_rev_speech, np.zeros((this_rev_speech.shape[0], npads) 511 | ).astype( 512 | this_rev_speech.dtype)), axis=1) 513 | x_rev_speech.resize( 514 | (x_rev_speech.shape[0] + this_rev_speech.shape[0], 515 | x_rev_speech.shape[1]), refcheck=False) 516 | x_rev_speech[-this_rev_speech.shape[0]:, :] = this_rev_speech 517 | else: 518 | x = np.array(airs) 519 | if speech_files is not None: 520 | x_speech = np.array(this_speech) 521 | x_rev_speech = np.array(this_rev_speech) 522 | 523 | if save_speech_associations: 524 | df = pd.DataFrame( 525 | {'air': wav_files, 526 | 'speech': np.array(speech_files)[associations] 527 | if given_associations is None else 528 | given_associations['speech'], 529 | 'offsets': offsets 530 | if given_associations is None else 531 | given_associations['offsets']}) 532 | 533 | df.to_csv(filename_associations, index=False) 534 | print(f'Saved: {filename_associations} ') 535 | if copy_associations_to is not None: 536 | run_command('cp ' + filename_associations + ' ' + copy_associations_to) 537 | print(f'Saved: {copy_associations_to}') 538 | 539 | if fs is not None: 540 | print(f'Got {x.shape[0]} AIRs of duration {x.shape[1] / float(fs)}') 541 | else: 542 | print(f'Got {x.shape[0]} AIRs of length {x.shape[1]}') 543 | 544 | if speech_files is not None: 545 | proc_data = x_rev_speech 546 | else: 547 | proc_data = x 548 | 549 | if drop_speech: 550 | x_rev_speech = [] 551 | x_speech = [] 552 | x = [] 553 | 554 | if no_fex: 555 | x_out = None 556 | print('Skipping feature extraction') 557 | else: 558 | x_out = data_post_proc(np.array(proc_data), forced_fs, start_at_max, framesize, 559 | get_pow_spec, max_air_len, wavform_logpow) 560 | 561 | print(f'Left with {x_out.shape} AIR features data ') 562 | 563 | ids = ids.astype(str) 564 | 565 | wrote_h5 = False 566 | if write_cached_latest: 567 | try: 568 | hf = File(latest_file, 'w') 569 | if no_fex: 570 | hf.create_dataset('x', data=[]) 571 | else: 572 | hf.create_dataset('x', data=x_out) 573 | hf.create_dataset('y', data=[]) 574 | hf.create_dataset('ids', data=[x.encode() for x in ids]) 575 | hf.create_dataset('class_names', data=[]) 576 | hf.create_dataset('airs', data=x) 577 | hf.create_dataset('utts', data=utt_per_env) 578 | if speech_files is not None: 579 | hf.create_dataset('clean_speech', data=x_speech) 580 | hf.create_dataset('rev_names', data=x_rev_speech) 581 | else: 582 | hf.create_dataset('clean_speech', data=[]) 583 | hf.create_dataset('rev_names', data=[]) 584 | hf.close() 585 | wrote_h5 = True 586 | print(f'Wrote: {latest_file}') 587 | except IOError as ME: 588 | print(f'Cache writing failed with {ME}') 589 | 590 | if (not wrote_h5) and as_hdf5_ds: 591 | raise AssertionError('Could not provide data in correct format') 592 | if as_hdf5_ds: 593 | hf = File(latest_file, 'r') 594 | x_out = hf['x'] 595 | ids = hf['ids'] 596 | x = hf['airs'] 597 | x_speech = hf['clean_speech'] 598 | x_rev_speech = hf['rev_names'] 599 | # hf.close() 600 | ids = np.array([x.decode() for x in ids]) 601 | 602 | return (x_out, None), ids, None, (x, x_speech, x_rev_speech), utt_per_env 603 | 604 | 605 | def compile_ace_h5(wav_loc, saveloc, ft='.wav', all_single_channel=False): 606 | """ 607 | 608 | Create an HDF5 dataset which contains information about a set of files which describe AIRs of 609 | acoustic environments. This file can be used to train DNNs using ace_discriminative_nets.py 610 | 611 | Args: 612 | wav_loc: The location of the wav files as a list 613 | saveloc: The location to save to the HDF5 file 614 | ft: The file type to look for 615 | all_single_channel: Assume that all responses are single channel (faster and does not 616 | require soxi) 617 | 618 | Returns: 619 | Nothing 620 | 621 | """ 622 | 623 | all_wavs = find_all_ft(wav_loc, ft=ft, use_find=True) 624 | channels = [] 625 | for i in range(len(all_wavs)): 626 | print(f'Reading : all_wavs[i]') 627 | all_wavs[i] = abspath(all_wavs[i]) 628 | if all_single_channel: 629 | channels.append('1') 630 | else: 631 | try: 632 | channels.append(run_command('soxi -c ' + all_wavs[i])[0]) 633 | except OSError as ME: 634 | print('I think that soxi is not installed because when i tried to use it to get ' 635 | 'the number of channels, i got this ' + str(ME)) 636 | raise 637 | 638 | hf = File(saveloc, 'w') 639 | hf.create_dataset('filenames', data=all_wavs) 640 | hf.create_dataset('chan', data=channels) 641 | hf.close() 642 | print(f'Done with : {saveloc}') 643 | 644 | 645 | def get_ace_xy(h5_file='../results_dir/ace_h5_info.h5', ace_base='../Local_Databases/AIR/ACE/', 646 | y_type='room', group_by=None, utt_per_env=1, speech_files=None, 647 | print_distributions=False, 648 | parse_as_dirctories=False, 649 | choose_channel=None, 650 | **kwargs): 651 | """ 652 | 653 | Collects training data and labels for traiing of DNNs using ace_discriminative_nets, 654 | based on the ACE Challenge data[1]. 655 | 656 | Args: 657 | h5_file: Location of HFD5 dataset file for the ACE database, which is provided with this 658 | repository at Code/results_dir/ace_h5_info.h5. Contains information about the filenames, 659 | number of channels and also ground truth acoustic parameter values. If you want to create a 660 | new one, then use fe_utils.compile_ace_h5 661 | ace_base: The location of the ACE database data 662 | y_type: Creating labels from the data using specific information. This 663 | can be either of: 664 | 'room', 'recording', 'array', 'recording', 'position', 'air' 665 | group_by: Creating grouping information from the data using specific information. This 666 | can be either of: 667 | 'room', 'recording', 'array', 'recording', 'position', 'air' 668 | utt_per_env: Number of speech utterances to convolve with each AIR 669 | speech_files: Speech directory to pick up speech from and convolve it with the AIRs 670 | print_distributions: Print data information with regards to class distributions 671 | parse_as_dirctories: (ignored) 672 | choose_channel: (ignored) 673 | **kwargs: Additional arguments to be passed to read_airs_from_wavs 674 | 675 | Returns: 676 | (X, Y), Sample_names, Class_names, 677 | (AIRs, Speech, Reverberant_speech), 678 | (Group_name, Groups) 679 | 680 | """ 681 | parse_as_dirctories = False 682 | 683 | hf = File(h5_file, 'r') 684 | wav_files = list((np.array(hf.get('filenames')).astype(str)).tolist()) 685 | chan = list((np.array(hf.get('chan')).astype(int) - 1).tolist()) 686 | 687 | type_dict = {'502': 'Office', '803': 'Office', '503': 'Meeting_Room', '611': 'Meeting_Room', 688 | '403a': 'Lecture_Room', '508': 'Lecture_Room', 'EE-lobby': 'Building_Lobby'} 689 | basenames = [thename.split('/')[-1].replace('EE_lobby', 'EE-lobby') for thename in wav_files] 690 | room = [thename.split('_')[1] for thename in basenames] 691 | array = [thename.split('_')[0] for thename in basenames] 692 | room_type = [type_dict[thename.split('_')[1]] for thename in basenames] 693 | recording = basenames 694 | 695 | if ace_base is None: 696 | x_out = None 697 | x = None 698 | x_speech = None 699 | x_rev_speech = None 700 | ids = flatten_list([[basename(this_file).replace('.wav', '') + '_' + str(j) + '_ch' + str(k) 701 | for k in range(chan[i])] 702 | for i, this_file in enumerate(wav_files) 703 | for j in range(utt_per_env)]) 704 | else: 705 | for i in range(len(wav_files)): 706 | wav_files[i] = ace_base + '/' + wav_files[i] 707 | (x_out, _), \ 708 | ids, _, \ 709 | (x, x_speech, x_rev_speech), \ 710 | utt_per_env = read_airs_from_wavs( 711 | wav_files, utt_per_env=utt_per_env, speech_files=speech_files, 712 | parse_as_dirctories=parse_as_dirctories, 713 | choose_channel=chan, 714 | **kwargs) 715 | if 'ch' not in ids[0]: 716 | if np.sum(['ch' in ids[i] for i in range(len(ids))]) > 0: 717 | raise AssertionError('Unexpected condition') 718 | ch = [0 for _ in range(len(ids))] 719 | else: 720 | ch = [int(i.split('ch')[1]) for i in ids] 721 | 722 | y = [] 723 | class_names = [] 724 | 725 | flat_back_y = False 726 | if not (isinstance(y_type, list) or isinstance(y_type, tuple)): 727 | flat_back_y = True 728 | y_type = (y_type,) 729 | 730 | for this_y_type in y_type: 731 | if this_y_type == 'room': 732 | new_y, new_class_names, _ = categorical_to_mat(room) 733 | def_group_by = 'recording' 734 | elif this_y_type == 'type': 735 | new_y, new_class_names, _ = categorical_to_mat(room_type) 736 | def_group_by = 'room' 737 | elif this_y_type == 'array': 738 | new_y, new_class_names, _ = categorical_to_mat(array) 739 | def_group_by = 'recording' 740 | elif this_y_type == 'position' or y_type == 'position': 741 | new_y, new_class_names, _ = categorical_to_mat(recording) 742 | def_group_by = 'air' 743 | elif this_y_type == 'channel': 744 | new_y, new_class_names, _ = categorical_to_mat(ch) 745 | def_group_by = 'position' 746 | else: 747 | raise AssertionError('Invalid y_type') 748 | y.append(new_y) 749 | class_names.append(new_class_names) 750 | 751 | flat_back_groups = False 752 | if group_by is None: 753 | group_by = (def_group_by,) 754 | elif not (isinstance(group_by, list) or isinstance(group_by, tuple)): 755 | flat_back_groups = True 756 | group_by = (group_by,) 757 | 758 | group_name, groups = ([], []) 759 | for this_group_by in group_by: 760 | if this_group_by == 'recording' or this_group_by == 'position': 761 | _, new_group_name, new_groups = categorical_to_mat(recording) 762 | elif this_group_by == 'room': 763 | _, new_group_name, new_groups = categorical_to_mat(room) 764 | elif this_group_by == 'array': 765 | _, new_group_name, new_groups = categorical_to_mat(array) 766 | elif this_group_by == 'air': 767 | new_groups = np.atleast_2d(np.arange(len(y))).T 768 | new_group_name = np.array(ids) 769 | elif this_group_by == 'channel': 770 | max_ch = max(ch) + 1 771 | ch_array = np.zeros((len(ch), max_ch), dtype=bool) 772 | for i in range(len(ch)): 773 | ch_array[i, ch[i]] = True 774 | new_groups = np.array(ch_array) 775 | new_group_name = np.array(['ch_' + str(i) for i in range(max_ch)]) 776 | else: 777 | raise AssertionError('Invalid group_by ' + this_group_by) 778 | group_name.append(new_group_name) 779 | groups.append(new_groups) 780 | 781 | for i in range(len(y)): 782 | if print_distributions: 783 | print_split_report(y[i]) 784 | if np.any(~(np.sum(y[i], axis=1) == 1)): 785 | raise AssertionError('Invalid y outputs') 786 | y[i] = np.concatenate([y[i][ii:ii + 1, :] for ii in range(y[i].shape[0]) 787 | for _ in range(utt_per_env)], 788 | axis=0) 789 | for ii in range(len(groups)): 790 | groups[ii] = [np.concatenate([list(range(i * utt_per_env, (i + 1) * utt_per_env)) 791 | for i in groups[ii][j]]).astype(int) 792 | for j in range(len(groups[ii]))] 793 | 794 | y = tuple(y) 795 | class_names = tuple(class_names) 796 | groups = tuple(groups) 797 | group_name = tuple(group_name) 798 | if flat_back_groups: 799 | groups = groups[0] 800 | group_name = group_name[0] 801 | if flat_back_y: 802 | y = y[0] 803 | class_names = class_names[0] 804 | 805 | return (x_out, y), ids, class_names, (x, x_speech, x_rev_speech), (group_name, groups) 806 | 807 | 808 | def categorical_to_mat(categorical): 809 | """ 810 | 811 | Converts a categorical variable vector into a one-hot class matrix 812 | 813 | Args: 814 | categorical: The categorical variable vector 815 | 816 | Returns: 817 | The class matrix as an array 818 | The name of each class corresponding to each column of the array 819 | The list of indices of 'categorical' which belong to the labels in 'unique_vals' 820 | 821 | """ 822 | categorical = np.array(categorical) 823 | unique_vals = np.unique(categorical) 824 | y = np.zeros((categorical.size, unique_vals.size), dtype=bool) 825 | groups = [] 826 | for i, val in enumerate(unique_vals): 827 | y[categorical == val, i] = True 828 | groups.append(np.where(categorical == val)[-1]) 829 | # groups=np.array(groups) 830 | return y, unique_vals, groups 831 | 832 | 833 | def collect_wavs(filenames, dest_fs=None): 834 | """ 835 | 836 | Collects and packages a set of wav files to an array of samples 837 | 838 | Args: 839 | filenames: File locations as a list 840 | dest_fs: Sampling frequency to use 841 | 842 | Returns: 843 | An array of the samples of the files as [N_files x N_samples] 844 | 845 | """ 846 | if not (isinstance(filenames, list) or isinstance(filenames, tuple)): 847 | filenames = [filenames] 848 | samples = [] 849 | max_len = 0 850 | for the_filename in filenames: 851 | fs, new_samples = read(the_filename) 852 | if dest_fs: 853 | new_samples = my_resample(new_samples, fs, dest_fs) 854 | if new_samples.ndim == 1: 855 | new_samples = np.atleast_2d(new_samples).T 856 | max_len = max(max_len, new_samples.shape[0]) 857 | samples.append(new_samples) 858 | for i in range(len(samples)): 859 | this_len = samples[i].shape[0] 860 | missing = max_len - this_len 861 | if missing > 0: 862 | samples[i] = np.concatenate( 863 | (samples[i], np.zeros((missing, samples[i].shape[1]), dtype=samples[i].dtype))) 864 | 865 | out = np.concatenate([samples[i].T for i in range(len(samples))], axis=0) 866 | 867 | return out 868 | -------------------------------------------------------------------------------- /Code/pythonsrc/gan_model_worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 Constantinos Papayiannis 4 | # 5 | # This file is part of Reverberation Learning Toolbox for Python. 6 | # 7 | # Reverberation Learning Toolbox for Python is free software: you can redistribute it and/or modify 8 | # it under the terms of the GNU General Public License as published by 9 | # the Free Software Foundation, either version 3 of the License, or 10 | # (at your option) any later version. 11 | # 12 | # Reverberation Learning Toolbox for Python is distributed in the hope that it will be useful, 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | # GNU General Public License for more details. 16 | # 17 | # You should have received a copy of the GNU General Public License 18 | # along with Reverberation Learning Toolbox for Python. If not, see . 19 | 20 | # Give the following arguments 21 | # Location of saved modeling results for ace which contains the result in the form the-location/the-name-of-the-environment-containing-the-room-name/log.txt 22 | # Scratchpad location 23 | 24 | ################################################################################################################## 25 | ################################################################################################################## 26 | # 27 | # Description: 28 | # 29 | # This script collects the results of modeling ACE AIRs [2] using the proposed model in [1]. Then these results are 30 | # used to train GANs for the rooms in ACE. This will result in 1 GAN for each of the 7 rooms, part of the ACE 31 | # challenge measurements. Each GAN is trained using data for each room and then produces 100 acoustic environment 32 | # instances from each model. These instances are to be used for training DNNs, providing a method for data 33 | # augmentation. 34 | # 35 | # To collect the results prior to running this step, run: 36 | # bash ace_acenvgenmodeling.sh /tmp/modeling_results 37 | # 38 | # Usage: 39 | # bash gan_model_worker.sh 40 | # 41 | # Example: 42 | # bash gan_model_worker.sh /tmp/modeling_results/ /tmp/gan_results/ 43 | # 44 | # 45 | # This file was original distributed in the repository at: 46 | # {repo} 47 | # If you use this code in your work, then cite [1]. 48 | # 49 | # [1] C. Papayiannis, C. Evers and P. A. Naylor, 50 | # "End-to-End Classification of Reverberant Rooms Using DNNs," 51 | # in IEEE/ACM Transactions on Audio, Speech, and Language Processing, 52 | # vol. 28, pp. 3010-3017, 2020, doi: 10.1109/TASLP.2020.3033628. 53 | # [2] http://www.ee.ic.ac.uk/naylor/ACEweb/index.html 54 | # 55 | ################################################################################################################# 56 | ################################################################################################################# 57 | 58 | 59 | set -e 60 | 61 | if [ "$#" -lt 2 ]; then 62 | echo 'Illegal number of parameters, expected >= 2. 63 | 1) The location where the logs of ace_acenvgenmodeling.sh results were saved 64 | 2) The location where the results and the data augmentation h5 dataset will be saved 65 | 3+) Arguments passed to gan_model.py 66 | Example: 67 | bash gan_model_worker.sh /tmp/modeling_results/ /tmp/gan_results/ 68 | 69 | ' 70 | exit 1 71 | fi 72 | 73 | ace_results=$1 74 | saveloc=$2 75 | test_array='Mobile' 76 | 77 | shift 78 | shift 79 | 80 | mkdir -p $saveloc 81 | 82 | for i in 611 403a 803 503 502 508 EE_lobby; do 83 | $HOME/anaconda2/bin/python -u acenvgenmodel_collect_results.py ` find $ace_results/*${i}* -name log.txt | grep -v "$test_array"` --saveloc $saveloc/ref_rep_$i/ 84 | log_dest=$saveloc/log_${i}.txt 85 | echo Working for room $i and saving log at $log_dest 86 | $HOME/anaconda2/bin/python -u gan_model.py --h5 $saveloc/ref_rep_$i/reflection_y_data.h5 --saveloc $saveloc/gan_$i/ --airname GAN_${i}_%d_RIR.wav --nodisplay $* | tee $log_dest 87 | done 88 | 89 | aug_h5=$saveloc/gan_aug_data.h5 90 | python -c "from fe_utils import compile_ace_h5; compile_ace_h5('$saveloc','$aug_h5',ft='RIR.wav');" 91 | echo Saved data augmentation AIR dataset at $aug_h5 92 | 93 | echo All done 94 | -------------------------------------------------------------------------------- /Code/pythonsrc/run_ace_discriminative_nets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 Constantinos Papayiannis 4 | # 5 | # This file is part of Reverberation Learning Toolbox for Python. 6 | # 7 | # Reverberation Learning Toolbox for Python is free software: you can redistribute it and/or modify 8 | # it under the terms of the GNU General Public License as published by 9 | # the Free Software Foundation, either version 3 of the License, or 10 | # (at your option) any later version. 11 | # 12 | # Reverberation Learning Toolbox for Python is distributed in the hope that it will be useful, 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | # GNU General Public License for more details. 16 | # 17 | # You should have received a copy of the GNU General Public License 18 | # along with Reverberation Learning Toolbox for Python. If not, see . 19 | 20 | ################################################################################################################## 21 | ################################################################################################################## 22 | # 23 | # Description: 24 | # This script uses the code provided with this repository to run the experiments described in [1]. 25 | # The code is able to train and evaluate DNNs for the task of 'Room Classification' using the data provided with 26 | # the ACE challenge database [2]. The following DNNs and configurations can be trained and evaluated: 27 | # Using AIRs only 28 | # 1) RNN 29 | # 2) CNN 30 | # 3) CRNN 31 | # 4) Att RNN 32 | # 5) Att-CRNN 33 | # 34 | # A copy of the necessary files from the ACE database is provided with this repo. To unpack it do 35 | # Code/Local_Databases/AIR$ tar zxf ACE16.tar.gz 36 | # The corpus was published under 'Creative Commons Attribution-NoDerivatives 4.0 International Public License' and in the package you can find a copy of the license 37 | # 38 | # Usage: 39 | # bash run_ace_discriminative_nets.sh 40 | # 41 | # : Location of ACE challenge data 42 | # : Must include a TRAIN and a TEST subdirectory, with the corresponding speech files included. The provided script wav_concatenator.sh can be used to create longer speech utterances, which are used in this experiment. The experiment will use 5s of speech per AIR and will assume that speech utterances are longer. It uses offsetting of longer utterances as a primitive data augmentation method. The script has been successfully trailed with the TIMIT database. It creates an concatenation of all the wav files in a directory, for each directory. Since TIMIT has one directory per speaker it created a long utterance per speaker, ideal for the task. 43 | # : Location of HFD5 dataset file for the ACE database, which is provided with this repository at Code/results_dir/ace_h5_info.h5. Contains information about the filenames, number of channels and also ground truth acoustic parameter values. If you want to create a new one, then use fe_utils.compile_ace_h5 44 | # : 0 or 1 to read any available caches 45 | # : List of indices between 1 to 8, see above 46 | # 47 | # Example: 48 | # bash run_ace_discriminative_nets.sh ../Local_Databases/AIR/ACE16 ../results_dir/concWavs/concWavs/Local_Databases/speech/TIMIT/TIMIT/ ../results_dir/ace_h5_info.h5 0 4 8 49 | # 50 | # 51 | # This file was original distributed in the repository at: 52 | # {repo} 53 | # If you use this code in your work, then cite [1]. 54 | # 55 | # [1] C. Papayiannis, C. Evers and P. A. Naylor, 56 | # "End-to-End Classification of Reverberant Rooms Using DNNs," 57 | # in IEEE/ACM Transactions on Audio, Speech, and Language Processing, 58 | # vol. 28, pp. 3010-3017, 2020, doi: 10.1109/TASLP.2020.3033628. 59 | # [2] http://www.ee.ic.ac.uk/naylor/ACEweb/index.html 60 | # 61 | ################################################################################################################# 62 | ################################################################################################################# 63 | 64 | set -e 65 | 66 | if [ "$#" -lt 5 ]; then 67 | echo 'Illegal number of parameters, expected >= 5. 68 | 1) ACE dir (/media/cp510/ExtraGiaWindows/db_exp_data/Local_Databases/AIR/ACE16) 69 | 2) speech dir (/home/cp510/GitHub/base_git_repo/Code/results_dir/concWavs) 70 | 3) h5 loc (/home/cp510/GitHub/base_git_repo/Code/results_dir/ace_h5_info.h5) 71 | 4) 0/1 whether cache should be read or not. 72 | 5) Modes to run 1-5 73 | Example: 74 | run_ace_discriminative_nets.sh ../Local_Databases/AIR/ACE16 ../results_dir/concWavs/ ../results_dir/ace_h5_info.h5 1 4 5 8 75 | ' 76 | exit 1 77 | fi 78 | 79 | ace_dir=$1 80 | speech_dir=$2 81 | h5_loc=$3 82 | force_readcache=$4 83 | shift 84 | shift 85 | shift 86 | shift 87 | mode=$@ 88 | base_scrap=/tmp/ 89 | mkdir -p $base_scrap/ace_discriminative_nets_eval/ 90 | logloc=$base_scrap/ace_discriminative_nets_eval/log.txt 91 | 92 | args_speech=(--ace "$ace_dir" --h5 "$h5_loc" --speech "$speech_dir/TRAIN" "$speech_dir/TEST" --cacheloc "$base_scrap" --split array) 93 | 94 | echo "Speech Args : ${args_speech[*]}" 95 | 96 | if [ $force_readcache == 1 ]; then 97 | readcache_arg='--readcache' 98 | else 99 | readcache_arg='' 100 | fi 101 | 102 | for this_mode in ${mode[@]}; do 103 | 104 | case $this_mode in 105 | 1) # RNN 106 | mode_args="--rnn" 107 | ;; 108 | 2) # CNN 109 | mode_args="--cnn" 110 | ;; 111 | 3) # CRNN 112 | mode_args="--rnn --cnn" 113 | ;; 114 | 4) # Att-RNN 115 | mode_args="--rnn --att" 116 | ;; 117 | 5) # Att-CRNN 118 | mode_args="--rnn --cnn --att" 119 | ;; 120 | *) 121 | echo Invalid mode $this_mode 122 | continue 123 | ;; 124 | esac 125 | 126 | cmd=(python -u ace_discriminative_nets.py ${mode_args[@]} ${args_speech[*]} $readcache_arg) 127 | echo Running ${cmd[@]} 128 | ${cmd[@]} | tee $logloc 129 | 130 | readcache_arg='--readcache' 131 | done 132 | 133 | echo Logs at $logloc 134 | -------------------------------------------------------------------------------- /Code/pythonsrc/run_cnnrnn_net.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 Constantinos Papayiannis 4 | # 5 | # This file is part of Reverberation Learning Toolbox for Python. 6 | # 7 | # Reverberation Learning Toolbox for Python is free software: you can redistribute it and/or modify 8 | # it under the terms of the GNU General Public License as published by 9 | # the Free Software Foundation, either version 3 of the License, or 10 | # (at your option) any later version. 11 | # 12 | # Reverberation Learning Toolbox for Python is distributed in the hope that it will be useful, 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | # GNU General Public License for more details. 16 | # 17 | # You should have received a copy of the GNU General Public License 18 | # along with Reverberation Learning Toolbox for Python. If not, see . 19 | 20 | ################################################################################################################## 21 | ################################################################################################################## 22 | # 23 | # Description: 24 | # This script uses the code provided with this repository to run the experiments described in [1]. 25 | # The code is able to train and evaluate DNNs for the task of 'Room Classification' using the data provided with 26 | # the ACE challenge database [2]. The trained models are CNN-RNNs 27 | # 28 | # A copy of the necessary files from the ACE database is provided with this repo. To unpack it do 29 | # Code/Local_Databases/AIR$ tar zxf ACE16.tar.gz 30 | # The corpus was published under 'Creative Commons Attribution-NoDerivatives 4.0 International Public License' and in the package you can find a copy of the license 31 | # 32 | # Usage: 33 | # bash run_cnnrnn_net.sh.sh <> 34 | # 35 | # : Location of ACE challenge data 36 | # : Must include a TRAIN and a TEST subdirectory, with the corresponding speech files included. The provided script wav_concatenator.sh can be used to create longer speech utterances, which are used in this experiment. The experiment will use 5s of speech per AIR and will assume that speech utterances are longer. It uses offsetting of longer utterances as a primitive data augmentation method. The script has been successfully trailed with the TIMIT database. It creates an concatenation of all the wav files in a directory, for each directory. Since TIMIT has one directory per speaker it created a long utterance per speaker, ideal for the task. 37 | # : Location of HFD5 dataset file for the ACE database, which is provided with this repository at Code/results_dir/ace_h5_info.h5. Contains information about the filenames, number of channels and also ground truth acoustic parameter values. If you want to create a new one, then use fe_utils.compile_ace_h5 38 | # : 0 or 1 to read any available caches 39 | # 40 | # Example: 41 | # bash run_cnnrnn_net.sh.sh ../Local_Databases/AIR/ACE16 ../results_dir/concWavs/concWavs/Local_Databases/speech/TIMIT/TIMIT/ ../results_dir/ace_h5_info.h5 0 42 | # 43 | # 44 | # This file was original distributed in the repository at: 45 | # {repo} 46 | # If you use this code in your work, then cite [1]. 47 | # 48 | # [1] C. Papayiannis, C. Evers and P. A. Naylor, 49 | # "End-to-End Classification of Reverberant Rooms Using DNNs," 50 | # in IEEE/ACM Transactions on Audio, Speech, and Language Processing, 51 | # vol. 28, pp. 3010-3017, 2020, doi: 10.1109/TASLP.2020.3033628. 52 | # [2] http://www.ee.ic.ac.uk/naylor/ACEweb/index.html 53 | # 54 | ################################################################################################################# 55 | ################################################################################################################# 56 | 57 | set -e 58 | 59 | if [ "$#" -lt 5 ]; then 60 | echo 'Illegal number of parameters, expected >= 5. 61 | 1) ACE dir (/media/cp510/ExtraGiaWindows/db_exp_data/Local_Databases/AIR/ACE16) 62 | 2) speech dir (/home/cp510/GitHub/base_git_repo/Code/results_dir/concWavs) 63 | 3) h5 loc (/home/cp510/GitHub/base_git_repo/Code/results_dir/ace_h5_info.h5) 64 | 4) 0/1 whether cache should be read or not. 65 | 5+)Passed to ace_discriminative_nets.py 66 | Example: 67 | run_cnnrnn_net.sh.sh ../Local_Databases/AIR/ACE16 ../results_dir/concWavs/ ../results_dir/ace_h5_info.h5 1 68 | ' 69 | exit 1 70 | fi 71 | 72 | python_loc=$HOME/anaconda2/bin/python 73 | $python_loc utils_base.py 74 | 75 | ace_dir=$1 76 | speech_dir=$2 77 | h5_loc=$3 78 | force_readcache=$4 79 | shift 80 | shift 81 | shift 82 | shift 83 | extras=$@ 84 | 85 | base_scrap=/tmp/ 86 | if [ `hostname | cut -d'-' -f1` == 'login' ]; then 87 | base_scrap=$WORK 88 | fi 89 | 90 | save_loc_base=$base_scrap/ace_discriminative_nets_eval/ 91 | 92 | tmp_args=( ${extras[@]} ) 93 | args_speech=( --utts 20 --experiment room --ace $ace_dir --h5 $h5_loc --speech $speech_dir/TRAIN $speech_dir/TEST --cacheloc $base_scrap ${tmp_args[@]}) 94 | args_air=( --experiment room --ace $ace_dir --h5 $h5_loc --cacheloc $base_scrap ${tmp_args[@]}) 95 | # Change this to speech if you want to do speech 96 | final_args=${args_air[@]} 97 | 98 | echo Args : ${args_final[*]} 99 | 100 | 101 | echo Running Speech CNN RNN 102 | this_saveloc=$save_loc_base/speech/speech_cnn_rnn 103 | mkdir -p $this_saveloc 104 | $python_loc -u ace_discriminative_nets.py --saveloc $this_saveloc ${final_args[*]} --cnn --rnn $readcache_arg | tee $this_saveloc/log.txt 105 | 106 | 107 | 108 | -------------------------------------------------------------------------------- /Code/pythonsrc/utils_base.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Constantinos Papayiannis 2 | # 3 | # This file is part of Reverberation Learning Toolbox for Python. 4 | # 5 | # Reverberation Learning Toolbox for Python is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # Reverberation Learning Toolbox for Python is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with Reverberation Learning Toolbox for Python. If not, see . 17 | 18 | """ 19 | This file defines a set of basic functions to be used across a variety of applications 20 | 21 | This file was original distributed in the repository at: 22 | {repo} 23 | 24 | If you use this code in your work, then cite: 25 | C. Papayiannis, C. Evers and P. A. Naylor, 26 | "End-to-End Classification of Reverberant Rooms Using DNNs," 27 | in IEEE/ACM Transactions on Audio, Speech, and Language Processing, 28 | vol. 28, pp. 3010-3017, 2020, doi: 10.1109/TASLP.2020.3033628. 29 | 30 | """ 31 | 32 | import inspect 33 | import sys 34 | from os import walk 35 | from os.path import join 36 | from subprocess import Popen, PIPE 37 | from time import time 38 | 39 | import numpy as np 40 | 41 | 42 | def matrix_stats(x, flat=False): 43 | """ 44 | Prints statistics for the data in a given matrix 45 | 46 | Args: 47 | x: The input data (np.array) 48 | flat: Flatten the data and provide global statistics instead of per column 49 | 50 | Returns: 51 | Nothing 52 | 53 | """ 54 | args = {'axis': 0} if not flat else {} 55 | print_func = lambda xx: float2str(xx, num_decimals=5) 56 | print('For data with shape ' + str(x.shape)) 57 | print('Mean : ' + print_func(np.mean(x, **args))) 58 | print('Median : ' + print_func(np.median(x, **args))) 59 | print('Min : ' + print_func(np.min(x, **args))) 60 | print('Max : ' + print_func(np.max(x, **args))) 61 | print('aMin : ' + print_func(np.min(np.abs(x), **args))) 62 | print('aMax : ' + print_func(np.max(np.abs(x), **args))) 63 | print('StD : ' + print_func(np.std(x, **args))) 64 | 65 | 66 | def isiter(x): 67 | """ 68 | Checks if x is iterable 69 | 70 | Args: 71 | x: Check this 72 | 73 | Returns: 74 | Iterable or not 75 | 76 | """ 77 | try: 78 | if x[0] > 0: 79 | pass 80 | except TypeError: 81 | return False 82 | except IndexError: 83 | pass 84 | return True 85 | 86 | 87 | def flatten_list(x): 88 | """ 89 | Flattens lists of lists (of lists, of lists...) 90 | 91 | Args: 92 | x: The list of lists 93 | 94 | Returns: 95 | The flat list 96 | 97 | """ 98 | if not isiter(x): 99 | raise TypeError('You did not given me something which makes sense') 100 | for i in range(len(x)): 101 | if isiter(x[i]): 102 | x[i] = flatten_list(x[i]) 103 | else: 104 | x[i] = [x[i]] 105 | 106 | xx = np.concatenate([i for i in x]).tolist() 107 | return xx 108 | 109 | 110 | def add_axis_back(x, times=1, make_copy=False): 111 | """ 112 | Add an axis to as the last dimension of a numpy array with dimensionality 1. useful for 113 | adding channels to DNN training data when they natively are not present in the data. 114 | 115 | Args: 116 | x: The original data 117 | times: Number of axis to add 118 | make_copy: Make a copy of the array before changing it 119 | 120 | Returns: 121 | The array 122 | 123 | """ 124 | if make_copy: 125 | x = np.array(x) 126 | for i in range(times): 127 | x.shape = tuple(list(x.shape) + [1]) 128 | return x 129 | 130 | 131 | def add_axis_front(x, times=1, make_copy=False): 132 | """ 133 | Add an axis to as the first dimension of a numpy array with dimensionality 1. useful for 134 | adding channels to DNN training data when they natively are not present in the data. 135 | 136 | Args: 137 | x: The original data 138 | times: Number of axis to add 139 | make_copy: Make a copy of the array before changing it 140 | 141 | Returns: 142 | The new array 143 | 144 | """ 145 | if make_copy: 146 | x = np.array(x) 147 | for i in range(times): 148 | x.shape = tuple([1] + list(x.shape)) 149 | return x 150 | 151 | 152 | def repack_array_list(array_in, shapes=None, orientation='portrait'): 153 | """ 154 | Pack an array into a list of vectors. The vectors can be either the rows or the columns. 155 | This operation is the opposite of flatten_array_list. 156 | 157 | Args: 158 | array_in: The array 159 | shapes: The original shapes of the vectors. This assumes that you had a list of vectors 160 | and each one had its own size. You had to put them into a 2D array so you made some of 161 | them longer or shorter. This list will contain the shapes of the original vectors. 162 | orientation: Setting this to 'portrait'means that you stuck the original arrays so that 163 | they are the columns of the array. Anything else means that they are the rows. 164 | 165 | Returns: 166 | The list of vectors 167 | 168 | """ 169 | doing_landscape = not orientation == 'portrait' 170 | outlist = [] 171 | array_in = np.atleast_2d(array_in) 172 | if shapes is None: 173 | if doing_landscape: 174 | shapes = (array_in.shape[1:], 1) * array_in.shape[0] 175 | else: 176 | shapes = (0, array_in.shape[1:]) * array_in.shape[0] 177 | row_counter = 0 178 | for i in shapes: 179 | if doing_landscape: 180 | if len(i) == 1: 181 | i = [i[0], 1] 182 | new_row_counter = row_counter + i[0] 183 | outlist.append(array_in[row_counter:new_row_counter, :][:, 0:i[1]]) 184 | else: 185 | if len(i) == 1: 186 | i = [1, i[0]] 187 | new_row_counter = row_counter + i[1] 188 | outlist.append(array_in[:, row_counter:new_row_counter][0:i[0], :]) 189 | return outlist 190 | 191 | 192 | def flatten_array_list(list_in, orientation='portrait'): 193 | """ 194 | PAcks a list of vectors into an array. The vectors can be either the rows or the columns of 195 | the new array. This operation is the opposite of repack_array_list. 196 | 197 | Args: 198 | list_in: The lsit of vectors 199 | orientation: Setting this to 'portrait'means that you will stick the original arrays so 200 | that they are the columns of the array. Anything else means that they are the rows. 201 | 202 | Returns: 203 | The new array 204 | The shapes of the vectors in the given list 205 | """ 206 | 207 | doing_landscape = not orientation == 'portrait' 208 | if type(list_in) is np.ndarray: 209 | out_mat = np.atleast_2d(list_in) 210 | return out_mat, (out_mat.shape,) 211 | if len(list_in) < 2: 212 | out_mat = np.atleast_2d(list_in) 213 | return out_mat, (out_mat.shape,) 214 | max_y = 0 215 | for i in list_in: 216 | max_y = max(max_y, np.atleast_2d(i).shape[1 - doing_landscape]) 217 | if doing_landscape: 218 | out_array = np.zeros((max_y, np.sum([np.atleast_2d(i).shape[1] for i in list_in]))) 219 | else: 220 | out_array = np.zeros((np.sum([np.atleast_2d(i).shape[0] for i in list_in]), max_y)) 221 | counter = 0 222 | or_shapes = [] 223 | for i in range(len(list_in)): 224 | or_shapes.append(np.array(list_in[i]).shape) 225 | twoddlist = np.atleast_2d(list_in[i]) 226 | n_padding = max_y - twoddlist.shape[1 - doing_landscape] 227 | next_counter = counter + twoddlist.shape[0 + doing_landscape] 228 | if not doing_landscape: 229 | newis = np.concatenate( 230 | (twoddlist, np.zeros((twoddlist.shape[0], n_padding), 231 | dtype=list_in[i].dtype)), axis=1) 232 | else: 233 | newis = np.concatenate( 234 | (np.zeros((twoddlist.shape[0], n_padding), 235 | dtype=list_in[i].dtype), twoddlist), axis=1) 236 | if not doing_landscape: 237 | out_array[counter:next_counter, :] = newis 238 | else: 239 | out_array[:, counter:next_counter] = newis 240 | counter = next_counter 241 | return out_array, tuple(or_shapes) 242 | 243 | 244 | def get_git_hash(): 245 | """ 246 | 247 | Get the Git has of the current commit of the repo in this directory 248 | 249 | Returns: 250 | The hash 251 | 252 | """ 253 | return run_command('git rev-parse HEAD')[0] 254 | 255 | 256 | def eprint(*args, **kwargs): 257 | """ 258 | Prints to stderr 259 | 260 | Args: 261 | *args: Passed to print 262 | **kwargs: Passed to print 263 | 264 | Returns: 265 | 266 | """ 267 | print(*args, file=sys.stderr, **kwargs) 268 | 269 | 270 | def run_command_list_stdout(command): 271 | """ 272 | String to be run in bash 273 | 274 | Args: 275 | command: The command. 276 | 277 | Returns: 278 | The stdout as a list of strings. Each string element is a returned line 279 | 280 | """ 281 | std_out = run_command(command)[0].decode() 282 | return std_out.rstrip().split("\n") 283 | 284 | 285 | def run_command(command): 286 | """ 287 | String to be run in bash 288 | 289 | Args: 290 | command: The command. 291 | 292 | Returns: 293 | The stdout 294 | 295 | """ 296 | proc = Popen(command.split(' '), stdout=PIPE, stderr=PIPE) 297 | std_out, std_err = [x.decode() for x in proc.communicate()] 298 | if len(std_err) > 0: 299 | print('stderr: ' + std_err) 300 | return std_out.rstrip(), std_err.rstrip() 301 | 302 | 303 | def join_strings(str_iter, delim=', '): 304 | """ 305 | Joins elements of ant iterable as a string delimited by delim 306 | 307 | Args: 308 | str_iter: An iterable 309 | delim: The delimiter 310 | 311 | Returns: 312 | The concatenated string 313 | 314 | """ 315 | out = '' 316 | for i in str_iter: 317 | out += delim + str(i) 318 | return out[len(delim):] 319 | 320 | 321 | def find_all_ft(directory_location, ft=".Wav", use_find=True, find_iname=False): 322 | """ 323 | Finds all files of a specific filetype in a given set of directories (and subdirectories of 324 | them) 325 | 326 | Args: 327 | directory_location: The list of directories to look into 328 | ft: The extension of the file to look for 329 | use_find: Use the unix `find` command to do this 330 | find_iname: Use case insensitive search 331 | 332 | Returns: 333 | The list of files found 334 | 335 | """ 336 | if isinstance(directory_location, str): 337 | directory_location = [directory_location] 338 | if use_find: 339 | name = 'name' 340 | if find_iname: 341 | name = 'iname' 342 | directories = '' 343 | for i in directory_location: 344 | directories += i + ' ' 345 | directories = directories[0:-1] 346 | all_files = run_command(f'find -L {directories} -type f -{name} *{ft}')[0].rstrip().split("\n") 347 | else: 348 | if find_iname: 349 | raise AssertionError( 350 | 'You are expecting case insensitive searching but you are not using \'use_find\' ' 351 | 'which allows this') 352 | print(f'Finding all {ft} in {directory_location}') 353 | all_files = [] 354 | if not isinstance(directory_location, list): 355 | directory_location = [directory_location] 356 | for this_dir in directory_location: 357 | for root, dirs, files in walk(this_dir): 358 | for file in files: 359 | if file.endswith(ft): 360 | all_files.append(join(root, file)) 361 | print(f'Found {len(all_files)} of {ft} in {directory_location}') 362 | return all_files 363 | 364 | 365 | def float2str(floatval, num_decimals=2): 366 | """ 367 | Convert a float (or a numy array) to a string with a given precision 368 | 369 | Args: 370 | floatval: The valueof the float 371 | num_decimals: Decimal points to use 372 | 373 | Returns: 374 | The string 375 | 376 | """ 377 | floatval = np.atleast_1d(np.array(floatval, dtype=float)).flatten() 378 | conv_rule = lambda x: ( 379 | "{0:." + str(num_decimals if x >= 0 else num_decimals - 1) + "f}").format(x) 380 | if floatval.size == 0: 381 | return "" 382 | elif floatval.size == 1: 383 | return conv_rule(floatval[0]) 384 | else: 385 | stris = '' 386 | for i in floatval: 387 | stris += ', ' + conv_rule(i) 388 | return stris[1:] 389 | 390 | 391 | def getfname(): 392 | """ 393 | Get the name of the calling function 394 | 395 | Returns: 396 | The name 397 | 398 | """ 399 | curframe = inspect.currentframe() 400 | calframe = inspect.getouterframes(curframe, 2) 401 | return calframe[1][3] 402 | 403 | 404 | def matmax(alike): 405 | """ 406 | Finds the maximum value and the index of it 407 | 408 | Args: 409 | alike: An iterable 410 | 411 | Returns: 412 | The maximum value 413 | The index of the maximum value 414 | 415 | """ 416 | maxi = np.argmax(alike) 417 | maxv = alike[maxi] 418 | return [maxv, maxi] 419 | 420 | 421 | def matmin(alike): 422 | """ 423 | Finds the minimum value and the index of it 424 | 425 | Args: 426 | alike: An iterable 427 | 428 | Returns: 429 | The minimum value 430 | The index of the minimum value 431 | 432 | """ 433 | mini = np.argmin(alike) 434 | minv = alike[mini] 435 | return [minv, mini] 436 | 437 | 438 | def column_vector(alike): 439 | """ 440 | Converts the input to a column vector (numpy array) 441 | 442 | Args: 443 | alike: Input 444 | 445 | Returns: 446 | The column vector 447 | 448 | """ 449 | alike = np.atleast_1d(np.array(alike)).flatten() 450 | nelements = alike.size 451 | outmat = np.array(alike) 452 | outmat.shape = (nelements, 1) 453 | return outmat 454 | 455 | 456 | def row_vector(alike): 457 | """ 458 | Converts the input to a row vector (numpy array) 459 | 460 | Args: 461 | alike: Input 462 | 463 | Returns: 464 | The row vector 465 | 466 | """ 467 | npa = np.atleast_1d(np.array(alike)).flatten() 468 | nelements = npa.size 469 | npa.shape = (1, nelements) 470 | return npa 471 | 472 | 473 | def get_timestamp(): 474 | """ 475 | Generate a timestamp from the current time 476 | 477 | Returns: 478 | The timestamp as a string 479 | 480 | """ 481 | timestamp = str(time()) 482 | return timestamp 483 | 484 | 485 | def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): 486 | """ 487 | Check if floats are equal to a given precision 488 | 489 | Args: 490 | a: Float to compare 491 | b: Float to compare 492 | rel_tol: Relative tolerance 493 | abs_tol: Absolute tolerance 494 | 495 | Returns: 496 | Yes/no 497 | 498 | """ 499 | res = np.abs(a - b) <= np.maximum(rel_tol * np.maximum(np.abs(a), np.abs(b)), abs_tol) 500 | return res 501 | 502 | 503 | if __name__ == '__main__': 504 | print(f'Your repo git hash: {get_git_hash()}') 505 | -------------------------------------------------------------------------------- /Code/pythonsrc/utils_dnntrain.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Constantinos Papayiannis 2 | # 3 | # This file is part of Reverberation Learning Toolbox for Python. 4 | # 5 | # Reverberation Learning Toolbox for Python is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # Reverberation Learning Toolbox for Python is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with Reverberation Learning Toolbox for Python. If not, see . 17 | 18 | """ 19 | 20 | This file contains a number of routines useful in the training and evaluation of DNNs using Keras. 21 | 22 | This file was original distributed in the repository at: 23 | {repo} 24 | 25 | If you use this code in your work, then cite: 26 | C. Papayiannis, C. Evers and P. A. Naylor, 27 | "End-to-End Classification of Reverberant Rooms Using DNNs," 28 | in IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29 | vol. 28, pp. 3010-3017, 2020, doi: 10.1109/TASLP.2020.3033628. 30 | 31 | """ 32 | 33 | from random import shuffle 34 | from subprocess import call 35 | from time import time 36 | 37 | import numpy as np 38 | from keras.callbacks import Callback 39 | from keras.callbacks import EarlyStopping, TensorBoard 40 | from keras.utils import plot_model 41 | 42 | from fe_utils import print_split_report 43 | from utils_base import float2str 44 | 45 | 46 | def multi_batch_gen(x_data_list, y_out_list, samples_per_class, y_to_balance=None, sub_idxs=None, 47 | **kwargs): 48 | """ 49 | Batch generator for keras model training. Based on the function in this file 'batch_gen', this 50 | function allows for batch generators to be created which fuse together a number of datasets. 51 | 52 | In addition to the documentation of 'batch_gen', this function allows for a batch generator 53 | to be created which accepts a list arrays X, which contain training data and their 54 | corresponding labels Y. Then the batches will be constructed as if the data were part of only 55 | one dataset, using samples from all of them during hte batch construction. It is useful if 56 | the arrays are too big to be loaded onto RAM and they are stored in HDF5 datasets for instance. 57 | 58 | Args: 59 | x_data_list: List of X data as 60 | [[N_samples_1 X data_dimensionality], 61 | [N_samples_2 X data_dimensionality],...] 62 | y_out_list: List of labels or Y data as: 63 | [[N_samples_1 X out_data_dim],... 64 | [N_samples_2 X out_data_dim],...] 65 | samples_per_class: List of number of samples per class to take, per set 66 | y_to_balance: List of labels or Y data to be used to balance selections from each batch 67 | as: 68 | [[N_samples_1 X 1],... 69 | [N_samples_2 X 1],...] 70 | sub_idxs: 71 | **kwargs: List of lists of indices of samples to consider, indicating their locations in 72 | the X and Y array. 73 | 74 | Returns: The generator. 75 | 76 | """ 77 | nsets = len(x_data_list) 78 | if y_to_balance is None: 79 | y_to_balance = [None] * nsets 80 | if sub_idxs is None: 81 | sub_idxs = [None] * nsets 82 | 83 | if not nsets == len(y_out_list): 84 | raise AssertionError('Invalid inputs') 85 | if not nsets == len(samples_per_class): 86 | raise AssertionError('Invalid inputs') 87 | if y_to_balance is None: 88 | y_to_balance = [None for _ in range(nsets)] 89 | if sub_idxs is None: 90 | sub_idxs = [None for _ in range(nsets)] 91 | if not nsets == len(samples_per_class): 92 | raise AssertionError('Invalid inputs') 93 | 94 | gens = [] 95 | for i in range(nsets): 96 | gens.append( 97 | batch_gen(x_data_list[i], y_out_list[i], samples_per_class[i], sub_idxs=sub_idxs[i], 98 | y_to_balance=y_to_balance[i], **kwargs)) 99 | 100 | while True: 101 | xs = [None for _ in range(nsets)] 102 | ys = [None for _ in range(nsets)] 103 | for i in range(nsets): 104 | xs[i], ys[i] = gens[i].next() 105 | yield np.concatenate(xs, axis=0), np.concatenate(ys, axis=0) 106 | 107 | 108 | def batch_gen(x_data, y_out, samples_per_class, y_to_balance=None, no_check=True, verbose=False, 109 | sub_idxs=None, augmentation_func=None, aug_prob=1.): 110 | """ 111 | Batch generator for keras model training. Features: 112 | * It is capable of handling simultaneously multiple Inputs, useful for Functional API models. 113 | * Performs batch balancing in terms of the classes in Y. It accounts for class imbalance 114 | in the data. It can do that based on the give Y labels or balancing data can be given. 115 | * A smaller subset of the data can be made visible to batch generator and the rest will be 116 | ignore and not used for the training. 117 | * A data augmentation function can be given, which will modify a percentage of the batch 118 | samples. The percentage is configurable with an augmentation probability. 119 | * Able to directly work on HDF5 datasets. 120 | 121 | (Array refers to numpy array or HDF5 datasets) 122 | Args: 123 | x_data: Array of training input data as [N_samples X data_dimensionality], or a list of 124 | such arrays, which would correspond to the set of inputs to the network for Functional 125 | API nets with multiple inputs. 126 | y_out: Labels of the data, or y data for regression as an array 127 | [N_samples X out_data_dimensionality]. 128 | samples_per_class: The number of samples to include in each batch from each class 129 | y_to_balance: An array of labels for the data as [N_samples X 1], used to balance the data. 130 | no_check: Skip any checks 131 | verbose: Verbose output 132 | sub_idxs: Indices of samples to consider, indicating their locations in the X and Y 133 | array. Passed as an iterable. 134 | augmentation_func: Function used to modify samples as a data augmentation strategy. 135 | aug_prob: Probability of a sample in the batch to be modified, using the augmentation_func. 136 | 137 | Note: If you do not want the balancing operation then pass y_to_balance=[0]*N_samples 138 | and samples_per_class=desired_batch_size 139 | 140 | Returns: The generator 141 | 142 | """ 143 | turn_to_list = False 144 | if not isinstance(x_data, list) or isinstance(x_data, tuple): 145 | if isinstance(augmentation_func, list) or isinstance(augmentation_func, tuple): 146 | raise AssertionError('Unexpected condition') 147 | x_data = [x_data] 148 | turn_to_list = True 149 | nsets = len(x_data) 150 | if len(x_data) > 1: 151 | for i in range(1, nsets): 152 | if not x_data[i].shape[0] == x_data[i].shape[i]: 153 | raise AssertionError('Input error') 154 | 155 | if sub_idxs is None: 156 | sub_idxs = np.arange(0, x_data[0].shape[0]).astype(int) 157 | else: 158 | sub_idxs = np.sort(sub_idxs) 159 | 160 | if augmentation_func is None: 161 | augmentation_func = [lambda x: x for _ in range(nsets)] 162 | elif turn_to_list: 163 | augmentation_func = [augmentation_func] 164 | if not (isinstance(augmentation_func, list) or isinstance(augmentation_func, tuple)): 165 | raise AssertionError('Invalid Input') 166 | for i in range(nsets): 167 | if augmentation_func[i] is None: 168 | augmentation_func[i] = lambda x: x 169 | 170 | if not len(augmentation_func) == nsets: 171 | raise AssertionError('Input error') 172 | 173 | if y_to_balance is None: 174 | if y_out is None: 175 | raise AssertionError('Cannot work without any y\'s') 176 | y_to_balance = y_out 177 | 178 | index_pools = [] 179 | for i in range(y_to_balance.shape[1]): 180 | index_pools.append(np.where(y_to_balance[sub_idxs, i])[-1].tolist()) 181 | shuffle(index_pools[-1]) 182 | else: 183 | samples_per_pool = samples_per_class 184 | counter = [0 for _ in range(len(index_pools))] 185 | while True: 186 | these_idxs = [] 187 | for i in range(len(index_pools)): 188 | if len(index_pools[i]) > 0: 189 | subsamples = min(samples_per_pool, len(index_pools[i])) 190 | repeats = int(np.ceil(samples_per_pool / float(subsamples))) 191 | for _ in range(repeats): 192 | these_idxs += (index_pools[i][counter[i]:counter[i] + samples_per_pool]) 193 | remove = subsamples * repeats - samples_per_pool 194 | if remove > 0: 195 | these_idxs = these_idxs[0:-remove] 196 | counter[i] += len(these_idxs) 197 | for i in range(len(index_pools)): 198 | if counter[i] >= len(index_pools[i]): 199 | if verbose: 200 | print('Reshuffling batch gen pool ' + str(i) + ' because i gave you ' + 201 | str(counter[i]) + ' samples already') 202 | shuffle(index_pools[i]) 203 | counter[i] = 0 204 | if not no_check and (not len(these_idxs) == samples_per_pool * y_out.shape[1]): 205 | raise AssertionError('Generator failure') 206 | if verbose: 207 | print('New batch ready: ') 208 | print_split_report(y_to_balance[sub_idxs[these_idxs], ...]) 209 | these_idxs = np.sort(these_idxs).tolist() 210 | 211 | if aug_prob == 0: 212 | if isinstance(x_data, list) or isinstance(x_data, tuple): 213 | out_x_aug = [x_data[k][sub_idxs[these_idxs], ...] for k in range(len(x_data))] 214 | else: 215 | out_x_aug = x_data[sub_idxs[these_idxs], ...] 216 | else: 217 | out_x_aug = [np.concatenate([ 218 | augmentation_func[k](x_data[k][i:i + 1, ...]) 219 | if np.random.rand() < aug_prob 220 | else 221 | x_data[k][i:i + 1, ...] 222 | for i in sub_idxs[these_idxs] 223 | ], 224 | axis=0) for k in range(nsets)] 225 | returner = lambda x: x 226 | if turn_to_list: 227 | returner = lambda x: x[0] 228 | if y_out is None: 229 | out_y = None 230 | elif isinstance(y_out, list) or isinstance(y_out, tuple): 231 | out_y = [y_out[k][sub_idxs[these_idxs], ...] for k in range(len(y_out))] 232 | else: 233 | out_y = y_out[sub_idxs[these_idxs], ...] 234 | yield returner(out_x_aug), out_y 235 | 236 | 237 | def model_trainer(the_batch_gen, in_shape, out_shape, get_model, val_patience=15, 238 | loss_patience=10, val_gen=None, 239 | tensorlog=False, callbacks=[], model_filename=None, epochs=1000, 240 | save_model_image=True, print_summary=True, steps_per_epoch=10, 241 | scratchpad='/tmp/', **kwargs): 242 | """ 243 | A function which trains a Keras model for classification. It handles callbacks 244 | and puts together the training strategy, given a batch generator. 245 | 246 | Args: 247 | the_batch_gen: Batch generator, able to be used with Keras fit_generator 248 | in_shape: Input dimensionality for the model 249 | out_shape: Number of classes 250 | get_model: A function which constructs the model as 251 | get_model(input_shape, out_shape, **kwargs) 252 | and returns only a Keras Sequential model 253 | val_patience: Validation loss patience for Early Stopping. 254 | loss_patience: Training loss patience for Early Stopping. 255 | val_gen: Batch generator, able to be used with Keras fit_generator. Used for generating 256 | validation data. 257 | tensorlog: Location to save Tensorboard logs. 258 | callbacks: A list of callbacks to append to the network. 259 | model_filename: The filename for the model to be saved in. 260 | epochs: Number of training epochs 261 | save_model_image: Ask for the model diagram to be saved. 262 | print_summary: Print a summary of the model structure. 263 | steps_per_epoch: Number of generator batches to be used per epoch. 264 | scratchpad: Location for any data saving 265 | **kwargs: Passed to get_model 266 | 267 | Returns: 268 | 269 | """ 270 | 271 | timestamp = str(time()) 272 | if model_filename is None: 273 | call(["mkdir", "-p", scratchpad]) 274 | model_filename = scratchpad + '/ace_model' + timestamp + '.h5' 275 | input_shape = in_shape 276 | model = get_model(input_shape, out_shape, **kwargs) 277 | if print_summary: 278 | model.summary() 279 | 280 | model.compile(loss='categorical_crossentropy', optimizer='adam') 281 | 282 | if loss_patience > 0: 283 | callbacks.append( 284 | EarlyStopping( 285 | monitor='loss', min_delta=0, patience=loss_patience, verbose=1, 286 | mode='auto')) 287 | if val_patience is not None: 288 | if val_patience > 0: 289 | callbacks.append( 290 | EarlyStopping( 291 | monitor='val_loss', min_delta=0, patience=val_patience, verbose=1, 292 | mode='auto')) 293 | if tensorlog: 294 | tensordir = model_filename.replace('.h5', '_tensorlog') 295 | callbacks.append( 296 | TensorBoard(log_dir=tensordir, histogram_freq=0, 297 | batch_size=16, 298 | write_graph=False, write_grads=False, write_images=True, 299 | embeddings_freq=0, embeddings_layer_names=None, 300 | embeddings_metadata=None)) 301 | print('Will save Tensorboard Logs at : ' + tensordir) 302 | if save_model_image: 303 | imgdir = model_filename.replace('.h5', '.pdf') 304 | try: 305 | plot_model(model, to_file=imgdir, show_shapes=True) 306 | except (ImportError, ValueError): 307 | print('Could not save model image') 308 | print('Saved model image at: ' + imgdir) 309 | 310 | print('Training...') 311 | model.fit_generator(the_batch_gen, epochs=epochs, 312 | validation_data=val_gen, validation_steps=int(np.ceil(.15 * steps_per_epoch)), 313 | verbose=0, callbacks=callbacks, steps_per_epoch=steps_per_epoch, ) 314 | for i in callbacks: 315 | if type(i) is PostEpochWorker: 316 | best_val_model = i.best_val_model 317 | if best_val_model is not None: 318 | model = best_val_model 319 | try: 320 | model.save(model_filename) 321 | print('Saved : ' + model_filename) 322 | except IOError as ME: 323 | print('Could not save model ' + model_filename + ' because ' + ME.message) 324 | return model, model_filename 325 | 326 | 327 | def accuracy_eval(x, y, cmodel, prefix=None): 328 | """ 329 | Accepts input data, labels and a model to predict the labels, which are then evaluated in 330 | terms of their accuracy. 331 | 332 | Can be combined with PostEpochWorker, to provide an evaluation of the accuracy in a custom 333 | way during the training of DNNs as: 334 | 335 | PostEpochWorker( 336 | (x_out_train[val_idxs, :, :], 337 | x_out_test[test_idxs, :, :]), 338 | (y_train[val_idxs, :], 339 | y_test[test_idxs, :]), 340 | model_filename, 341 | eval_fun=( 342 | lambda x, y, cmodel: accuracy_eval(x, y, cmodel, prefix='Val'), 343 | lambda x, y, cmodel: accuracy_eval(x, y, cmodel, prefix='Test')), 344 | eval_every_n_epochs=100) 345 | 346 | Args: 347 | x: Input data 348 | y: Labels 349 | cmodel: Trained model for inference 350 | prefix: Prefix for the reporting 351 | 352 | Returns: The predictions 353 | 354 | """ 355 | y_pred = np.argmax(cmodel.predict(x), axis=1).flatten() 356 | acc = np.sum(y_pred == np.argmax(y, axis=1)).flatten() / float(x.shape[0]) 357 | print(((prefix + ' ') if prefix is not None else '') + 'Accuracy: ' + float2str(acc, 4)) 358 | y_pred_out = np.zeros_like(y) 359 | for i in range(y_pred_out.shape[0]): 360 | y_pred_out[i, y_pred[i]] = True 361 | n_hots = np.sum(y_pred_out, axis=1) 362 | if ~np.all(n_hots == 1): 363 | too_hot = np.where(~(n_hots == 1))[-1] 364 | raise AssertionError( 365 | 'Predictions do not make sense because the following idxs had more than one hots ' + 366 | str(too_hot) + ' with the following hots ' + str(n_hots[too_hot])) 367 | return y_pred_out 368 | 369 | 370 | def get_scaler_descaler(x, verbose=False): 371 | """ 372 | Creates scaling and descaling functions for preparation of training data and reconstruction 373 | from DNNs 374 | 375 | Args: 376 | x: Input data 377 | verbose: Verbose reporting 378 | 379 | Returns: 380 | Scaler function object 381 | Descaler function object 382 | 383 | """ 384 | if x.ndim > 2: 385 | conced_x = np.concatenate(x, axis=0) 386 | else: 387 | conced_x = np.array(x) 388 | subval = np.min(conced_x, axis=0) 389 | scale_val = np.max(conced_x, axis=0) - subval 390 | scale_val[scale_val == 0] = 1 391 | 392 | subval.shape = tuple([1, 1] + list(subval.shape)) 393 | scale_val.shape = tuple([1, 1] + list(scale_val.shape)) 394 | 395 | if verbose: 396 | print('Will construct scaler with subs: ' + float2str( 397 | subval) + "\n" + '... and scalers ' + float2str( 398 | scale_val)) 399 | 400 | def scaler(y): 401 | return (y - subval) / scale_val 402 | 403 | def descaler(y): 404 | return y * scale_val + subval 405 | 406 | return scaler, descaler 407 | 408 | 409 | class PostEpochWorker(Callback): 410 | def __init__(self, x_data, y_data, model_filename, eval_fun=None, eval_every_n_epochs=1, 411 | save_best=True): 412 | """ 413 | Produces an instance of a keras Callback. It allows for running a set of routines when a 414 | number of training epochs has elapsed. 415 | 416 | Each routine accepts the X and Y data and the best performing model up to the current 417 | epoch. The best performing model is evaluated by the validation accuracy (or the train 418 | accuracy if no validation is done). It allows for you to evaluate you model during 419 | training and print performance reports of the model. it also allows you to have snapshots 420 | of your model during training. 421 | 422 | Args: 423 | x_data: The list of X data 424 | y_data: The list of Y data 425 | model_filename: The filename to use for saving the model snapshots 426 | eval_fun: The list of functions to run 427 | eval_every_n_epochs: The number of epochs after each to run the routines 428 | save_best: Set True to save the model snapshots 429 | """ 430 | Callback.__init__(self) 431 | self.save_best = save_best 432 | self.eval_every_n_epochs = eval_every_n_epochs 433 | self.eval_fun = eval_fun 434 | if eval_fun is None: 435 | self.eval_fun = [] 436 | 437 | elif not isinstance(eval_fun, list) and not isinstance(eval_fun, tuple): 438 | self.eval_fun = [eval_fun] 439 | else: 440 | self.eval_fun = eval_fun 441 | if not isinstance(x_data, list) and not isinstance(x_data, tuple): 442 | self.x_test = [x_data] 443 | else: 444 | self.x_test = x_data 445 | if not isinstance(y_data, list) and not isinstance(y_data, tuple): 446 | self.y_test = [y_data] 447 | else: 448 | self.y_test = y_data 449 | if not isinstance(self.y_test, type(self.x_test)) or not isinstance(self.eval_fun, 450 | type(self.x_test)): 451 | assert AssertionError('Given types for x_y or incorrect') 452 | self.best_val_loss = None 453 | self.model_filename = model_filename 454 | self.best_val_model = None 455 | self.update_since_last = False 456 | 457 | def on_train_begin(self, logs={}): 458 | self.losses = [] 459 | if self.save_best: 460 | print('Will save best model as ' + self.model_filename) 461 | 462 | def run_eval(self, epoch, logs={}): 463 | if epoch is None: 464 | eval_go = True 465 | else: 466 | eval_go = ((epoch + 1) % self.eval_every_n_epochs == 0) or self.eval_every_n_epochs == 1 467 | if eval_go: 468 | if len(self.eval_fun) > 0: 469 | if self.update_since_last: 470 | print('Running eval at epoch ' + ( 471 | str(epoch) if epoch is not None else '*last*') + 472 | ' ') 473 | for i in range(len(self.eval_fun)): 474 | if self.eval_every_n_epochs > 1: 475 | self.eval_fun[i](np.array(self.x_test[i]), np.array(self.y_test[i]), 476 | self.best_val_model) 477 | else: 478 | self.eval_fun[i](np.array(self.x_test[i]), np.array(self.y_test[i]), 479 | self.model) 480 | else: 481 | print(f'Skipping eval of epoch {epoch} since this is a dead season {" " * 25}', end='\r') 482 | self.update_since_last = False 483 | 484 | def on_epoch_end(self, epoch, logs={}): 485 | 486 | cval_loss = logs.get('val_loss') 487 | loss_name = 'val_loss' 488 | if cval_loss is None: 489 | loss_name = 'loss' 490 | cval_loss = logs.get('loss') 491 | if self.best_val_loss is None or cval_loss <= self.best_val_loss: 492 | self.update_since_last = True 493 | self.best_val_loss = cval_loss 494 | self.best_val_model = self.model 495 | if self.save_best: 496 | try: 497 | self.model.save(self.model_filename) 498 | except TypeError: 499 | print(f'Could not save model {self.model_filename}') 500 | print(f'At epoch : {epoch} found new best {loss_name} model with {loss_name} ' 501 | f'{float2str(self.best_val_loss, 12)}{" " * 25}', end='\r') 502 | 503 | self.run_eval(epoch) 504 | 505 | def on_train_end(self, logs=None): 506 | print(' ' * 74, end='\r') 507 | self.run_eval(None) 508 | -------------------------------------------------------------------------------- /Code/pythonsrc/utils_reverb.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Constantinos Papayiannis 2 | # 3 | # This file is part of Reverberation Learning Toolbox for Python. 4 | # 5 | # Reverberation Learning Toolbox for Python is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # Reverberation Learning Toolbox for Python is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with Reverberation Learning Toolbox for Python. If not, see . 17 | 18 | """ 19 | 20 | This is a collection of functions relevant to room acoustics and reverberation 21 | 22 | This file was original distributed in the repository at: 23 | {repo} 24 | 25 | If you use this code in your work, then cite: 26 | C. Papayiannis, C. Evers and P. A. Naylor, 27 | "End-to-End Classification of Reverberant Rooms Using DNNs," 28 | in IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29 | vol. 28, pp. 3010-3017, 2020, doi: 10.1109/TASLP.2020.3033628. 30 | 31 | """ 32 | import numpy as np 33 | from scipy.optimize import curve_fit 34 | 35 | from utils_base import matmax, getfname, column_vector 36 | from utils_spaudio import enframe, overlapadd 37 | from utils_spaudio import get_array_energy 38 | 39 | 40 | def npm(h, hhat): 41 | """ 42 | 43 | Estimates the Normalized Projection Misalignment (NPM) from [1] 44 | 45 | Args: 46 | h: AIR to compare 47 | hhat: AIR to compare 48 | 49 | Returns: The NPM 50 | 51 | [1] Morgan, D.R., Benesty, J. and Sondhi, M.M., 1998. On the evaluation of estimated impulse 52 | responses. IEEE Signal processing letters, 5(7), pp.174-176. 53 | 54 | """ 55 | 56 | hhat = np.array(hhat) 57 | h = np.array(h) 58 | 59 | if hhat.ndim > 1 or h.ndim > 1: 60 | raise AssertionError('Expecting single channel responses') 61 | 62 | h = h.flatten() 63 | hhat = hhat.flatten() 64 | 65 | epsilon = np.sum(h * hhat) / ( 66 | np.sqrt(np.sum(hhat * hhat) * np.sum(h * h)) 67 | ) 68 | npm_val = 1 - epsilon ** 2 69 | 70 | return npm_val 71 | 72 | 73 | def scale_with_absorption_coefs(x, fs, freqs, abs_coef, framesize=0.020, times=(1,)): 74 | """ 75 | 76 | Filters a given input, based on the sound energy abosrption coefficients provided, 77 | in a frame-based mode 78 | 79 | Args: 80 | x: Audio signal 81 | fs: Sampling frequency 82 | freqs: Frequency points 83 | abs_coef: Sound energy absorption coefficient at given frquency point 84 | framesize: Framesize in samples 85 | times: Number of times to pass the signal through the filering 86 | 87 | Returns: 88 | The filtered signal 89 | 90 | """ 91 | 92 | times = np.array(times).round().astype(int) 93 | abs_coef = np.atleast_2d(abs_coef) 94 | 95 | if np.sum(times) == 0: 96 | return x 97 | 98 | framelength = int(np.ceil(framesize * fs)) 99 | window = np.hanning(framelength) 100 | original_length = x.size 101 | if x.size < framelength: 102 | missing = framelength - x.size 103 | x = np.concatenate((x.flatten(), np.zeros((missing,)).astype(x.dtype))) 104 | x = np.atleast_2d(x) 105 | else: 106 | x = enframe(x, framelength, int(np.ceil(framelength / 2))) 107 | if x.shape[0] > 1: 108 | for i in range(x.shape[0]): 109 | x[i, :] = x[i, :] * window 110 | xft = np.fft.rfft(x, axis=1) 111 | dreqs = np.arange(0, xft.shape[1], 1) / float(xft.shape[1]) * fs / 2. 112 | 113 | if not times.size == abs_coef.shape[0]: 114 | raise AssertionError('times for application should match filters') 115 | 116 | def get_scale(f, freqs_local, abs_coef_local): 117 | previous = np.where(f > freqs_local)[-1] 118 | if previous.size == 0: 119 | previous = 0 120 | else: 121 | previous = previous[-1] 122 | scale = 1 123 | for i, this_time in enumerate(times): 124 | if this_time > 0: 125 | if previous == freqs_local.size - 1: 126 | this_absorption = abs_coef_local[i, -1] 127 | else: 128 | this_absorption = abs_coef_local[i, previous] * (f - freqs_local[previous]) / ( 129 | freqs_local[previous + 1] - freqs_local[previous]) + \ 130 | abs_coef_local[i, previous + 1] * ( 131 | -f + freqs_local[previous + 1]) / ( 132 | freqs_local[previous + 1] - freqs_local[previous]) 133 | 134 | scale *= np.sqrt(1 - this_absorption) ** this_time 135 | return scale 136 | 137 | freqs = np.array(freqs) 138 | scale = [] 139 | for i in range(dreqs.size): 140 | scale.append(get_scale(dreqs[i], freqs, abs_coef)) 141 | xft[:, i] = xft[:, i] * scale[-1] 142 | 143 | y = np.fft.irfft(xft, axis=1) 144 | if y.shape[0] > 1: 145 | y = overlapadd(y, inc=int(np.ceil(framelength / 2)))[0] 146 | else: 147 | y = y.flatten() 148 | 149 | y = y[0:original_length] 150 | return y 151 | 152 | 153 | def get_drr_linscale(air_fir_taps, sampling_freq, direct_window_length_secs=0.0008, 154 | ignore_reflections_up_to=None): 155 | """ 156 | Estimates the Direct to Reverberant ration given an Acoustic Impulse Response (AIR) 157 | 158 | Args: 159 | air_fir_taps: The taps of the AIR 160 | sampling_freq: The sampling frequency 161 | direct_window_length_secs: The length of the window that is estimated ot contain the 162 | direct sound 163 | ignore_reflections_up_to: The reflections up to this point (in seconds) are ignored and 164 | not considered to be part of either the early or the late part. 165 | 166 | Returns: The DRR in linear scale 167 | 168 | """ 169 | air_fir_taps = np.array(air_fir_taps) 170 | if air_fir_taps.ndim > 1: 171 | raise NameError(getfname() + "AIR_Not1D") 172 | nsidesamples = int(np.ceil(direct_window_length_secs / 2. * sampling_freq)) 173 | dpathcenter = abs(air_fir_taps).argmax() 174 | dstartsample = max(dpathcenter, dpathcenter - nsidesamples) 175 | dendsample = min(dpathcenter + nsidesamples, air_fir_taps.size) 176 | if ignore_reflections_up_to is not None: 177 | ignore_until_sample = int(np.ceil(ignore_reflections_up_to * sampling_freq)) 178 | if ignore_until_sample > dendsample: 179 | air_fir_taps[dendsample:ignore_until_sample] = 0 180 | else: 181 | print('You gave an ignore range for DRR calculation but it was invalid') 182 | return get_array_energy(air_fir_taps[dstartsample:dendsample]) \ 183 | / get_array_energy(air_fir_taps[dendsample:]) 184 | 185 | 186 | def get_t60_decaymodel(air_fir_taps, sampling_freq): 187 | """ Estimates the Reverberation Time, given an Acoustic Impulse Response. 188 | 189 | The mode of operation is defined by the reference below. 190 | This is a wrapper of a python translation of the code made available by 191 | the authors of: Karjalainen, Antsalo, and Peltonen, 192 | Estimation of Modal Decay Parameters from Noisy Response Measurements. 193 | at : http://www.acoustics.hut.fi/software/decay 194 | 195 | Examples: 196 | When involving an AIR that is sampled at 48 kHz for example. 197 | 198 | reverb_time = get_t60_decaymodel(air, 48000) 199 | 200 | Args: 201 | air_fir_taps : Acoustic Impulse Response to process 202 | sampling_freq : The sampling frequency at which air was recorded at 203 | 204 | Returns: 205 | An estimate of the reverbration time in seconds 206 | 207 | """ 208 | 209 | def decay_model(x_points, param0, param1, param2): 210 | """ The function used bo the non-linear least squares fitting method to 211 | estimate the decay parameters""" 212 | expf = 0.4 213 | y1_dm = np.multiply(param0, np.exp(np.multiply(param1, x_points))) 214 | y2_dm = param2 215 | fit_res = np.multiply(weights, np.power( 216 | np.add(np.power(y1_dm, 2), np.power(y2_dm, 2)), 0.5 * expf)) 217 | return fit_res 218 | 219 | # air is a 1D list 220 | # Set up things. Move to dB domain and scale 221 | leny = len(air_fir_taps) 222 | air = np.multiply(20, np.log10(abs(air_fir_taps) + np.finfo(float).eps)) 223 | _, ymaxi = matmax(air) 224 | air = air - air[ymaxi] 225 | weights = [1] * leny 226 | weights[0:max(1, ymaxi)] = [0] * max(1, ymaxi) 227 | time_points = np.linspace(0, leny / float(sampling_freq), leny) 228 | # Lin fit 229 | leny2 = leny // 2 230 | leny10 = leny // 10 231 | ydata = np.power(np.power(10, air / 20.), 0.4) 232 | start_of_range = np.nonzero(weights)[0][0] 233 | meanval1 = np.mean(ydata[start_of_range:leny10 + start_of_range + 1]) 234 | meanvaln = np.mean(ydata[leny - leny10:leny]) 235 | tmat = np.concatenate((np.ones((leny2, 1)), 236 | column_vector(time_points[start_of_range:leny2 + start_of_range])), 237 | axis=1) 238 | tau0 = np.linalg.lstsq(tmat, air[start_of_range:leny2 + start_of_range], rcond=None) 239 | tau0 = tau0[0][1] / 8.7 240 | ydata = np.multiply(weights, np.array(ydata)) 241 | fit_bounds = ([0, -2000, 0], [200., -0.1, 200.]) 242 | if tau0 > -0.1: # to satisfy the bounds 243 | tau0 = -0.1 244 | 245 | sol_final = curve_fit(decay_model, time_points, ydata, p0=(meanval1, tau0, meanvaln), 246 | bounds=fit_bounds) 247 | 248 | reverb_time = np.log(1 / 1000.) / float(sol_final[0][1]) 249 | if reverb_time <= 0: 250 | raise NameError(getfname() + ':NegativeRT') 251 | 252 | return reverb_time 253 | 254 | 255 | def get_edc(air_fir_taps): 256 | """ 257 | Calculates and returns the Energy Decay Curve (EDC) (Schroeder Integral) of the supplied 258 | AIR. 259 | 260 | Args: 261 | air_fir_taps: Taps of FIR filter representation of AIR 262 | 263 | Returns: 264 | The EDC of the supplied AIR as a numpy.array 265 | 266 | Examples: 267 | edcurve = get_edc(air_fir_taps) 268 | 269 | """ 270 | edcurve = np.flip(np.square(np.cumsum(np.array(air_fir_taps[::-1])) ** 2).flatten(), 0) 271 | return edcurve 272 | 273 | 274 | def air_up_to_db(air_fir_taps, up_to_db): 275 | """ 276 | 277 | Args: 278 | air_fir_taps: Input AIR 279 | up_to_db: The energy cutoff point in dB 280 | 281 | Returns: The AIR truncated from tap 0 to tap N, the point at which the energy remaining is 282 | less than 'up_to_db' of the energy of the entire AIR 283 | than up_to_db of the 284 | 285 | """ 286 | up_to = 10 ** (up_to_db / 10.) 287 | air_edc = get_edc(air_fir_taps) 288 | cutoff_sample = np.where(air_edc < up_to)[0] 289 | if cutoff_sample.size > 0: 290 | cutoff_sample = min(air_fir_taps.size, np.where(air_edc < up_to)[0][0]) 291 | else: 292 | cutoff_sample = air_fir_taps.size 293 | 294 | return air_fir_taps[0:cutoff_sample] 295 | -------------------------------------------------------------------------------- /Code/pythonsrc/utils_spaudio.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Constantinos Papayiannis 2 | # 3 | # This file is part of Reverberation Learning Toolbox for Python. 4 | # 5 | # Reverberation Learning Toolbox for Python is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # Reverberation Learning Toolbox for Python is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with Reverberation Learning Toolbox for Python. If not, see . 17 | 18 | """ 19 | 20 | This is a collection of functions relevant to speech and audio processing. 21 | 22 | This file was original distributed in the repository at: 23 | {repo} 24 | 25 | If you use this code in your work, then cite: 26 | C. Papayiannis, C. Evers and P. A. Naylor, 27 | "End-to-End Classification of Reverberant Rooms Using DNNs," 28 | in IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29 | vol. 28, pp. 3010-3017, 2020, doi: 10.1109/TASLP.2020.3033628. 30 | 31 | """ 32 | import numpy as np 33 | from scipy.signal import lfilter 34 | 35 | from utils_base import getfname, column_vector, row_vector 36 | 37 | resample_eng = None 38 | 39 | from utils_base import repack_array_list 40 | 41 | import matplotlib.pyplot as mplot 42 | 43 | from utils_base import flatten_array_list 44 | from scipy.io import wavfile 45 | from scipy.signal import welch 46 | 47 | try: 48 | import matlab 49 | import matlab.engine 50 | except ImportError: 51 | print('Could not import matlab libraries') 52 | 53 | 54 | def my_resample(x, fs_old, fs_new, matlab_eng=None, verbose=False, 55 | close_after=False): 56 | """ 57 | 58 | Uses the Matlab engine to resample audio files. Produces much better results than other 59 | alternatives. 60 | 61 | If you do not have matlab, then you can : 62 | bash conda install -c conda-forge resampy 63 | then replace this function with 64 | def my_resample(x, fs_old, fs_new, matlab_eng=None, verbose=False, 65 | close_after=False): 66 | from resampy import resample 67 | return resample(x, fs_old, fs_new) 68 | You can use any other library other than resampy of course. 69 | 70 | Args: 71 | x: Signal to resample 72 | fs_old: Old sampling rate 73 | fs_new: New sampling rate 74 | matlab_eng: Matlab engine object if pre-initialized 75 | verbose: Verbose reporting 76 | close_after: Close the matlab engine when done 77 | 78 | Returns: The resampled audio signal 79 | 80 | """ 81 | global resample_eng 82 | was_int16 = False 83 | 84 | if fs_old == fs_new: 85 | return x 86 | if x.dtype == 'int16': 87 | was_int16 = True 88 | x = (x / np.iinfo('int16').max).astype('float') 89 | if verbose: 90 | print('Your input for resampling is int16, will go to float for ' 91 | 'calculations then put it back to int16') 92 | 93 | if resample_eng is not None: 94 | eng = resample_eng 95 | print('Using static engine') 96 | elif matlab_eng is not None: 97 | print('Using provided Matlab engine') 98 | eng = matlab_eng 99 | my_resample.ext_eng = eng 100 | else: 101 | print('Creating Matlab engine') 102 | eng = matlab.engine.start_matlab() 103 | resample_eng = eng 104 | 105 | x, shapes = flatten_array_list(x, orientation='landscape') 106 | 107 | print('Resampling ' + str(x.shape) + ' from ' + str(fs_old) + ' to ' + str(fs_new)) 108 | 109 | (up, down) = (float(fs_new) / fs_old).as_integer_ratio() 110 | x_tmp = matlab.double(x.tolist()) 111 | x_out = eng.resample(x_tmp, matlab.double([up]), matlab.double([down]), nargout=1) 112 | 113 | if close_after: 114 | eng.exit() 115 | resample_eng = None 116 | 117 | x_out = np.array(x_out) 118 | print('Got ' + str(x_out.shape) + ' output samples') 119 | x_out = repack_array_list(x_out, shapes=shapes, orientation='landscape') 120 | if len(x_out) == 1: 121 | x_out = x_out[0] 122 | 123 | if was_int16: 124 | if type(x_out) is list: 125 | for i in range(len(x_out)): 126 | x_out[i] = (x_out[i].astype('float64') * np.iinfo('int16').max).astype('int16') 127 | else: 128 | x_out = (x_out.astype('float64') * np.iinfo('int16').max).astype('int16') 129 | 130 | return x_out 131 | 132 | 133 | def get_psd(x, fs, window_seconds, ): 134 | """ 135 | 136 | Estimates the Power Spectral Density and Power spectrum of a given signal, using : 137 | P. Welch, "The use of the fast Fourier transform for the 138 | estimation of power spectra: A method based on time averaging 139 | over short, modified periodograms", IEEE Trans. Audio 140 | Electroacoust. vol. 15, pp. 70-73, 1967. 141 | 142 | Args: 143 | x: Signal 144 | fs: Sampling frequency 145 | window_seconds: Window length in seconds 146 | 147 | Returns: frequency_points, PSD, Power Spectrum 148 | 149 | """ 150 | 151 | nperseg = int(np.ceil(fs * window_seconds)) 152 | _, psd = welch(x, fs=fs, window='hamming', nperseg=nperseg, noverlap=None, nfft=None, 153 | detrend='constant', return_onesided=True, scaling='density', axis=-1) 154 | f, pspec = welch(x, fs=fs, window='hamming', nperseg=nperseg, noverlap=None, nfft=None, 155 | detrend='constant', return_onesided=True, scaling='spectrum', axis=-1) 156 | 157 | return f, psd, pspec 158 | 159 | 160 | def distitpf(pf1, pf2, mode='0'): 161 | """ 162 | 163 | Adaptation of the Itakura distance estimation method from Voicebox[1] 164 | 165 | Args: 166 | pf1: Power spectrum to compare 167 | pf2: Power spectrum to compare 168 | mode: Character string selecting the following options: 169 | 'x' Calculate the full distance matrix from every row of PF1 to every row of PF2 170 | 'd' Calculate only the distance between corresponding rows of PF1 and PF2 171 | The default is 'd' if PF1 and PF2 have the same number of rows otherwise 'x'. 172 | 173 | Returns: Itakura distance 174 | 175 | [1] http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/doc/voicebox/distitpf.html 176 | 177 | """ 178 | # 179 | pf1 = np.atleast_2d(pf1) 180 | pf2 = np.atleast_2d(pf2) 181 | (nf1, p2) = pf1.shape 182 | p1 = p2 - 1 183 | nf2 = pf2.shape[0] 184 | 185 | if mode == 'd' or (not mode == 'x' and nf1 == nf2): 186 | nx = min(nf1, nf2); 187 | r = pf1[0:nx, :] / pf2[0:nx, :] 188 | q = np.log(r); 189 | d = (np.log((np.sum(r[:, 1:p1], 1) + 0.5 * (r[:, 0] + r[:, p2 - 1])) / p1) - 190 | (np.sum(q[:, 1:p1], 1) + 0.5 * (q[:, 0] + q[:, p2 - 1])) / p1)[0] 191 | else: 192 | r = np.transpose(pf1[:, :, np.ones((nf2,), dtype=int)], axes=[0, 2, 1]) / \ 193 | np.transpose(pf2[:, :, np.ones((nf1,), dtype=int)], axes=[2, 0, 1]) 194 | q = np.log(r) 195 | d = np.log((np.sum(r[:, :, 1:p1], 2) + 0.5 * (r[:, :, 0] + r[:, :, p2 - 1])) / p1) - \ 196 | (np.sum(q[:, :, 1:p1], 2) + 0.5 * (q[:, :, 0] + q[:, :, p2 - 1])) / p1 197 | return d 198 | 199 | 200 | def write_wav(filename, fs, ss): 201 | """ 202 | 203 | Writes audio samples as wav files to disk. 204 | 205 | Args: 206 | filename: Name to save file as 207 | fs: Sampling frequency 208 | ss: Audio samples as a numpy array 209 | 210 | Returns: Nothing 211 | 212 | """ 213 | wavfile.write(filename, fs, (ss.astype('float64') / abs(ss).max() * np.iinfo('int16').max 214 | ).astype('int16')) 215 | print('Wrote : ' + filename) 216 | 217 | 218 | def ar_to_cepstrum(ar_coef, cep_order=None): 219 | """ 220 | 221 | Converts Autoregressive (AR) coeficients to cepstral coefficients using the method discussed 222 | in [1] 223 | 224 | Args: 225 | ar_coef: AR coefficients 226 | cep_order: Order up to which to estimate cepstral coefficients 227 | 228 | Returns: 229 | The cepstral coefficients 230 | 231 | [1] K. Kalpakis, D. Gada, and V. Puttagunta, 'Distance measures for effective clustering of 232 | ARIMA time-series,' in ICDM, San Jose, California, USA, 2001, pp. 273-280. 233 | 234 | """ 235 | back_to_flat = False 236 | if ar_coef.ndim == 1: 237 | back_to_flat = True 238 | ar_coef = np.atleast_2d(ar_coef) 239 | cep_order = ar_coef.shape[1] if cep_order is None else cep_order 240 | cep_coef = np.zeros((ar_coef.shape[0], cep_order), dtype=ar_coef.dtype) 241 | if not np.all(cep_coef[:, 0] == 1): 242 | raise AssertionError('Expected first AR coef ot be 1') 243 | for i in range(cep_coef.shape[0]): 244 | for k in range(cep_order): 245 | if k == 1: 246 | cep_coef[i, k] = -ar_coef[i, k] 247 | elif k <= cep_coef.shape[1]: 248 | cep_coef[i, k] = -ar_coef[i, k] 249 | for m in range(k): 250 | cep_coef[i, k] = cep_coef[i, k] - (1 - (m + 1) / k) * ar_coef[i, m] * cep_coef[ 251 | i, k - m] 252 | else: 253 | cep_coef[i, k] = 0 254 | for m in range(k): 255 | cep_coef[i, k] = cep_coef[i, k] - (1 - (m + 1) / k) * ar_coef[i, m] * cep_coef[ 256 | i, k - m] 257 | if back_to_flat: 258 | cep_coef.flatten() 259 | 260 | return cep_coef 261 | 262 | 263 | def align_max_samples(yin, scan_range=None): 264 | """ 265 | 266 | Aligns input signals so that the maximum energy samples are at the same index. 267 | 268 | Args: 269 | yin: List of input singals 270 | scan_range: Range of indicesto scan for in order to find the maximum energy sample 271 | 272 | Returns: 273 | List of aligned signals 274 | List of delay introduces in each signal during alignment 275 | 276 | """ 277 | 278 | yin = flatten_array_list(yin)[0] 279 | if yin.ndim == 1: 280 | raise ValueError('Expected 2D array as input') 281 | 282 | if yin.shape[0] == 1: 283 | yout = yin 284 | delays = np.array([0]).astype(float) 285 | return yout, delays 286 | 287 | if scan_range is None: 288 | scan_range = range(yin.shape[1]) 289 | delays = abs(yin[:, scan_range]).argmax(axis=1) 290 | 291 | delays = abs(delays - delays.max()) 292 | padding = delays.max() 293 | 294 | yout = np.concatenate( 295 | (np.zeros_like(yin), 296 | np.zeros((yin.shape[0], padding), dtype=yin.dtype)), 297 | axis=1).astype(yin.dtype) 298 | 299 | for idx, this_delay in enumerate(delays): 300 | tmp = np.zeros_like(yout[idx, :]) 301 | tmp[this_delay:this_delay + yin.shape[1]] = yin[idx, :] 302 | yout[idx, :] = tmp 303 | if padding > 0: 304 | yout = yout[:, 0:-padding] 305 | 306 | return yout, delays 307 | 308 | 309 | def scale_x_to_y(x, y): 310 | """ 311 | 312 | Estimates a scaling for signal X, in order to Least Squares match the amplitude of samples in 313 | X and Y 314 | 315 | Args: 316 | x: X 317 | y: Y 318 | 319 | Returns: 320 | Scale (scalar) 321 | 322 | """ 323 | scale = np.linalg.lstsq(np.atleast_2d(x).T, np.atleast_2d(y).T, rcond=None)[0][0][0] 324 | return scale 325 | 326 | 327 | def fractional_alignment(yin, resolution=0.01, ls_scale=False, take_base_as=0): 328 | """ 329 | 330 | Fractionally aligns signals (between (-1,+1) sample shifts) based on a least squares method 331 | of the mismatch of the samples, operating on a fixed resolution grid. 332 | 333 | Args: 334 | yin: List of input signals 335 | resolution: Resolution of search grid for fractional alignment 336 | ls_scale: Enable the scaling of the signals to a least squares mach of their samples in 337 | addition to the delaying 338 | take_base_as: Signal to take as the reference signal in the matching process. The default 339 | is to take the first signal in the list. This signal will remain unchanged and the rest 340 | will be matched to it. 341 | 342 | Returns: 343 | List of aligned signals 344 | List of delay introduces in each signal during alignment 345 | List of scale introduces in each signal 346 | 347 | """ 348 | 349 | def get_mse(frac_kernels, idx, x, valid_range, y): 350 | x = np.convolve(x, frac_kernels[:, idx]) 351 | x = x[valid_range] 352 | if ls_scale: 353 | scale = scale_x_to_y(x, y) 354 | else: 355 | scale = 1. 356 | mse = np.sum((y - x * scale) ** 2) 357 | return mse, scale 358 | 359 | if yin.ndim == 1: 360 | raise ValueError('Expected 2D array as input') 361 | 362 | resolution = float(resolution) 363 | npoints = int(np.ceil(1. / resolution)) 364 | yin = np.array(yin) 365 | if yin.shape[0] == 1: 366 | yout = yin 367 | delays = np.array([0]).astype(float) 368 | return yout, delays 369 | 370 | context = 4 371 | valid_range = np.arange(context, context + yin.shape[1], 1).astype(int) 372 | _, frac_kernels = gm_frac_delayed_copies(np.ones((npoints,)), 373 | np.arange(context, context + 1, resolution) - .5, 374 | 2 * context + 1) 375 | yout = np.zeros_like(yin) 376 | yout[take_base_as, :] = yin[take_base_as, :] 377 | delays = np.zeros((yin.shape[0],)) 378 | scales = np.zeros((yin.shape[0],)) 379 | scales[take_base_as] = 1. 380 | all_delays = np.arange(0, 1, resolution) - .5 381 | for i in range(yin.shape[0]): 382 | if take_base_as == i: 383 | continue 384 | mse_scales = np.array( 385 | [get_mse(frac_kernels, idx, yin[i, :], valid_range, yin[take_base_as, :]) 386 | for idx in range(0, npoints)]) 387 | min_point = mse_scales[:, 0].argmin() 388 | delays[i] = all_delays[min_point] 389 | scales[i] = mse_scales[min_point, 1] 390 | thisyout = scales[i] * np.convolve(yin[i, :], frac_kernels[:, min_point]) 391 | yout[i, :] = thisyout[valid_range] 392 | 393 | return yout, delays, scales 394 | 395 | 396 | def gm_frac_delayed_copies(amplitudes, delays, tot_length, excitation_signal=np.array([]), 397 | center_filter_peaks=True): 398 | """ 399 | 400 | The function when called implements the model defined as: 401 | (1) h(n) = \sum_{i=1}^{D}\left[{\beta_i}h_e(n){\ast}\frac{\sin~\pi(n-k_i)}{\pi(n-k_i)}\right] 402 | This is the model proposed in [1] 403 | 404 | This is effectively the summation on D copies of the signal excitation_signal. 405 | This copies are placed at sample locations 'delays' (which are not bound to integers) and 406 | their scaling is defined by 'amplitudes'. The length of the signal is defined as 'tot_length'. 407 | 408 | Args: 409 | amplitudes: Vector containing the scaling of each copy 410 | delays: Sample index of occurence each copy 411 | tot_length: Total length of the output vector 412 | excitation_signal: The signal to be copied at each location 413 | center_filter_peaks: Center the signal in 'excitation_signal', so that the samples of 414 | maximum energy occur at samples 'delays' 415 | 416 | Returns: 417 | y : h(n) from the equation (1) 418 | Y : A matrix containing as vectors the components of the summation in equation (1) 419 | 420 | [1] Papayiannis, C., Evers, C. and Naylor, P.A., 2017, August. Sparse parametric modeling of 421 | the early part of acoustic impulse responses. In Signal Processing Conference (EUSIPCO), 422 | 2017 25th European (pp. 678-682). IEEE. 423 | 424 | """ 425 | excitation_signal = np.atleast_2d(excitation_signal) 426 | if excitation_signal.shape[1] == 1: 427 | excitation_signal = excitation_signal.T 428 | 429 | if excitation_signal.size > 0: 430 | nfilters = excitation_signal.shape[0] 431 | else: 432 | nfilters = 0 433 | sincspan = 9 434 | 435 | amplitudes = np.atleast_2d(amplitudes) 436 | if amplitudes.shape[1] == 1: 437 | amplitudes = amplitudes.T 438 | delays = np.atleast_2d(delays) 439 | if delays.shape[1] == 1: 440 | delays = delays.T 441 | 442 | tot_components = amplitudes.size 443 | if delays.size != tot_components: 444 | raise NameError(getfname() + ':InputsMissmatch tot components are ' + str( 445 | tot_components) + ' and got delays ' + str(delays.size)) 446 | if nfilters > 1: 447 | raise NameError(getfname() + ':InputsMissmatch') 448 | if tot_components < 1: 449 | yindiv = np.array([]) 450 | ysignal = np.zeros((tot_length,)) 451 | return [ysignal, yindiv] 452 | sample_indices = np.repeat(column_vector(np.arange(0, tot_length, 1, dtype=np.float64)), 453 | tot_components, axis=1) 454 | sample_indices_offsetting = np.repeat(row_vector(delays), tot_length, axis=0) 455 | sample_indices -= sample_indices_offsetting 456 | yindiv = np.sinc(sample_indices) * np.repeat(row_vector(amplitudes), tot_length, axis=0) 457 | if ~np.isinf(sincspan): 458 | spanscale_numer = sincspan * np.sin(np.pi * sample_indices / float(sincspan)) 459 | spanscale_denom = (np.pi * sample_indices) # Lanczos kernel 460 | limiting_case_idx = np.where(spanscale_denom == 0) 461 | spanscale_denom[limiting_case_idx] = 1 462 | spanscale = spanscale_numer / spanscale_denom 463 | spanscale[limiting_case_idx] = 1 464 | yindiv *= spanscale 465 | yindiv[np.where(abs(sample_indices > sincspan))] = 0 466 | 467 | excitation_signal = excitation_signal.flatten() 468 | if nfilters > 0: 469 | if not center_filter_peaks: 470 | yindiv = lfilter(excitation_signal, [1], yindiv, axis=0) 471 | else: 472 | fcenter = np.argmax(abs(excitation_signal), axis=0) 473 | if fcenter == 0: 474 | yindiv = lfilter(excitation_signal, [1], yindiv, axis=0) 475 | else: 476 | futuresamples = fcenter 477 | tmpconc = np.concatenate((yindiv, np.zeros((futuresamples, yindiv.shape[1]))), 478 | axis=0) 479 | tmpconc = lfilter(excitation_signal, [1], tmpconc, axis=0) 480 | yindiv = tmpconc[futuresamples:, :] 481 | 482 | ysignal = np.sum(yindiv, axis=1) 483 | return [ysignal, yindiv] 484 | 485 | 486 | def enframe(alike, flength, fincr, hamming_window=False): 487 | """ 488 | Breaks the input into frames of length flength, with an increment of fincr samples per frame 489 | Args: 490 | alike: Input array like description of a vector 491 | flength: Frame Length in samples 492 | fincr: Frame Increment in Samples 493 | hamming_window: Apply hamming window on frames 494 | 495 | Returns: The signal broken into frames 496 | 497 | """ 498 | npa = np.array(alike) 499 | flength = int(flength) 500 | fincr = int(fincr) 501 | if npa.ndim > 1: 502 | raise NameError(getfname() + ':Non1DInput') 503 | if not (flength % fincr) == 0: 504 | raise NameError(getfname() + ':SizeOfFrameNotMultipleOfIncrement') 505 | nshifts = int(flength / float(fincr)) 506 | totnframes = int(np.ceil(npa.size / float(fincr))) 507 | noframes_per_shift = int(np.ceil(totnframes / float(nshifts))) 508 | discardlastframes = (totnframes % nshifts) > 0 509 | xout = np.zeros((noframes_per_shift * nshifts, flength)) 510 | tnsamples = int(noframes_per_shift * flength) 511 | fidxs = np.arange(0, noframes_per_shift, dtype=np.int) * nshifts 512 | npadding = (flength - npa.size % flength) % flength + flength 513 | npa = np.append(npa, np.zeros(npadding)) 514 | for i in range(nshifts): 515 | fidxs += i 516 | xout[fidxs, :] = np.array(npa[i * fincr:i * fincr + tnsamples]).reshape(fidxs.size, flength) 517 | if hamming_window: 518 | xout[fidxs, :] = xout[fidxs, :] * np.hamming(flength) 519 | if discardlastframes: 520 | xout = xout[0:-1, :] 521 | return xout 522 | 523 | 524 | def overlapadd(input_frames, window_samples=None, inc=None, 525 | previous_partial_output=None): 526 | """ 527 | Performs overlap-add using the frames in input_samples. This is a Python implementation of 528 | the MATLAB code available in the VOICEBOX toolbox. 529 | (http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/doc/voicebox/overlapadd.html) 530 | 531 | Args: 532 | input_frames: The array of input frames of size M X window_samples 533 | window_samples: The window to be used for the frames 534 | inc: The increment in samples between frames 535 | previous_partial_output: Provide the partial output returned from a previous call to 536 | this function 537 | 538 | Returns: 539 | The overlap-add result and the partial output at the end 540 | 541 | """ 542 | 543 | if window_samples is None: 544 | window_samples = np.ones((input_frames.shape[1],)) 545 | elif window_samples.size != input_frames.shape[1]: 546 | raise NameError(getfname() + ":WindowSizeDoesNotMatchFrameSize") 547 | if inc is None: 548 | inc = input_frames.shape[1] 549 | elif inc > input_frames.shape[1]: 550 | raise NameError(getfname() + ":SampleIncrementTooLarge") 551 | nr = input_frames.shape[0] 552 | nf = input_frames.shape[1] 553 | 554 | nb = int(np.ceil(nf / float(inc))) 555 | no = int(nf + (nr - 1) * inc) 556 | overlapped_output_shape = (no, nb) 557 | 558 | z = np.zeros((int(no * nb),)) 559 | # input_frames = np.asfortranarray(input_frames) 560 | 561 | zidx = ( 562 | np.repeat(row_vector(np.arange(0, nf, dtype=np.int)), nr, axis=0) + 563 | np.repeat(column_vector(np.arange(0, nr, dtype=np.int) * inc + 564 | (np.arange(0, nr, dtype=np.int) % nb) * no), nf, axis=1)) 565 | # input_frames_windowed = input_frames * np.repeat(row_vector(window_samples), n_frames, axis=0) 566 | input_frames *= np.repeat(row_vector(window_samples), nr, axis=0) 567 | z[zidx.flatten(order='F').astype(np.int32)] = input_frames.flatten(order='F') 568 | z = z.reshape(overlapped_output_shape, order='F') 569 | if z.ndim > 1: 570 | z = np.sum(z, axis=1) 571 | if previous_partial_output is not None: 572 | if previous_partial_output.ndim > 1: 573 | raise NameError(getfname() + "PrevPartialOutDimError") 574 | else: 575 | z[0:previous_partial_output.size] += previous_partial_output 576 | out_samples = int(inc * nr) 577 | if no < out_samples: 578 | z[out_samples] = 0 579 | current_partial_output = np.array([]) 580 | else: 581 | current_partial_output = z[out_samples:] 582 | z = z[0:out_samples] 583 | 584 | return z, current_partial_output 585 | 586 | 587 | def get_array_energy(alike): 588 | """ 589 | Getthe total energy of the elements in an array 590 | 591 | Args: 592 | alike: The input array 593 | 594 | Returns: 595 | The total energy 596 | 597 | """ 598 | return np.sum(np.array(alike, dtype='float128') ** 2) 599 | 600 | 601 | def plotnorm(x=None, y=None, title=None, interactive=False, clf=False, savelocation=None, 602 | no_rescaling=False, **mplotargs): 603 | """ 604 | 605 | A useful and flexible plotting tool for signal processing. 606 | It allows you to plot a number of signals on the same normalised scale. It can plot signals 607 | in the time domain when provided with a sampling frequency. 608 | 609 | Args: 610 | x: The x axis points or the sampling frequency as a scalar 611 | y: The list of vectros or the array to plot. the vectors can have a different number of 612 | elements each 613 | title: The string to use as the plot title 614 | interactive: Wait for the user to close the plot before continuing 615 | clf: Clear the plot before plotting 616 | savelocation: Save hte plot as this file 617 | no_rescaling: Do not normalize the scale of the signals 618 | **mplotargs: Arguments to be passed to matplotlib.pyplot.plot 619 | 620 | Returns: 621 | The plot 622 | 623 | """ 624 | 625 | hasfs = False 626 | if (x is None) & (y is not None): 627 | x = np.arange(y.size) 628 | elif (not isinstance(x, np.ndarray)) & (y is not None): 629 | sampling_freq = float(x) 630 | x = np.arange(y.size) / sampling_freq 631 | hasfs = True 632 | elif x.size != y.size: 633 | raise NameError(getfname() + 'XYSizeMismatch') 634 | elif x.size == 0: 635 | return 636 | if not no_rescaling: 637 | y = y / abs(y).max() 638 | 639 | if clf: 640 | mplot.clf() 641 | res = mplot.plot(x, y, linewidth=0.5, **mplotargs) 642 | gen_title = 'Normalised Amplitude' 643 | if hasfs: 644 | gen_title += ' at Fs=' + repr(sampling_freq) + 'Hz' 645 | mplot.xlabel('Time (s)') 646 | else: 647 | mplot.xlabel('Sample') 648 | if title is None: 649 | mplot.title(gen_title) 650 | elif not title == '': 651 | mplot.title(title) 652 | mplot.ylabel('Normalised Amplitude') 653 | mplot.grid(True) 654 | if savelocation is not None: 655 | mplot.savefig(savelocation) 656 | print('Saved: ' + savelocation) 657 | if interactive: 658 | mplot.show() 659 | 660 | return res 661 | -------------------------------------------------------------------------------- /Code/results_dir/ace_h5_info.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/papayiannis/reverberation_learning_python/81ec8e70bea614c5d8a38a8ece7a7a39ac1f50b9/Code/results_dir/ace_h5_info.h5 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # Reverberation Learning Toolbox for Python 3 | 4 | Copyright 2019 [Constantinos Papayiannis](https://www.linkedin.com/in/papayiannis/) 5 | 6 | # Introduction 7 | 8 | The work in [1] has been one of the first steps in using deep learning to classify reverberant environments based on their acoustics. DNNs were provided with reverberant speech signals and the signals were classified in terms of the room where the recording was made in. This repository includes the code that was used in [1]. It is intended to be used by other researchers that aim to build on the work and follow the many promising research paths that stem from it. It also contains useful code for DSP, speech processing and deep learning using Keras. 9 | 10 | 11 | # Setup 12 | 13 | To use the repository start by setting up your environment. I assume you have Anaconda and you are working with Python 3. 14 | 15 | ```bash 16 | # Get the repository 17 | git clone https://github.com/papayiannis/reverberation_learning_python 18 | cd reverberation_learning_python 19 | # Get dependencies 20 | conda install numpy keras scipy tabulate matplotlib pandas seaborn h5py scikit-learn 21 | # Unpack the AIR data 22 | cd Code/Local_Databases/AIR 23 | tar zxf ACE16.tar.gz 24 | ``` 25 | 26 | # Room classification 27 | 28 | 29 | To train a DNN for room classification from reverberant speech, do the following 30 | 31 | ```bash 32 | # Unpack the AIR data 33 | cd Code/Local_Databases/AIR 34 | tar zxf ACE16.tar.gz 35 | cd ../../pythonsrc 36 | mkdir -p /tmp/train_test_speech 37 | ln -s $TRAIN_SPEECH_LOC /tmp/train_test_speech/TRAIN 38 | ln -s $TEST_SPEECH_LOC /tmp/train_test_speech/TEST 39 | # Run the training example for a CNN-RNN room classifier using ACE AIRs and your speech files 40 | bash run_ace_discriminative_nets.sh ../Local_Databases/AIR/ACE16 \ 41 | /tmp/train_test_speech/ ../results_dir/ace_h5_info.h5 0 5 42 | ``` 43 | 44 | The index 8 choses an Attention-CRNN. The locations ```$TRAIN_SPEECH_LOC``` and ```$TEST_SPEECH_LOC``` contain respectively locations where speech wav files are included, for training and for testing of the trained DNNs. The experiments have used TIMIT but any other dataset can be used in practice. 45 | 46 | # Bibliography 47 | 48 | [1]: C. Papayiannis, C. Evers and P. A. Naylor, "End-to-End Classification of Reverberant Rooms Using DNNs," in IEEE/ACM Transactions on Audio, Speech, and Language Processing, vol. 28, pp. 3010-3017, 2020, doi: 10.1109/TASLP.2020.3033628. 49 | 50 | 51 | _Reverberation Learning Toolbox for Python is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version._ 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | --------------------------------------------------------------------------------