├── main.md ├── py_filter ├── docs ├── doc.png ├── bc_s.png ├── bdwn.png ├── nav_f.png ├── nav_g.png ├── nav_h.png ├── open.png ├── tab_a.png ├── tab_b.png ├── tab_h.png ├── tab_s.png ├── closed.png ├── doxygen.png ├── splitbar.png ├── sync_off.png ├── sync_on.png ├── folderopen.png ├── folderclosed.png ├── search │ ├── close.png │ ├── mag_sel.png │ ├── search_l.png │ ├── search_m.png │ ├── search_r.png │ ├── files_c.js │ ├── pages_4.js │ ├── namespaces_4.js │ ├── pages_8.js │ ├── pages_5.js │ ├── pages_1.js │ ├── files_7.js │ ├── files_8.js │ ├── pages_7.js │ ├── files_b.js │ ├── namespaces_3.js │ ├── pages_6.js │ ├── pages_3.js │ ├── namespaces_7.js │ ├── namespaces_0.js │ ├── namespaces_5.js │ ├── namespaces_1.js │ ├── namespaces_6.js │ ├── functions_b.js │ ├── functions_c.js │ ├── classes_2.js │ ├── classes_d.js │ ├── classes_e.js │ ├── pages_0.js │ ├── all_a.js │ ├── files_a.js │ ├── functions_a.js │ ├── namespaces_2.js │ ├── files_d.js │ ├── all_b.js │ ├── files_6.js │ ├── namespaces_8.js │ ├── files_9.js │ ├── classes_9.js │ ├── files_4.js │ ├── pages_2.js │ ├── files_1.js │ ├── files_5.js │ ├── pages_9.js │ ├── files_2.js │ ├── functions_8.js │ ├── files_e.js │ ├── classes_6.js │ ├── functions_4.js │ ├── nomatches.html │ ├── classes_a.js │ ├── functions_13.js │ ├── files_f.js │ ├── classes_4.js │ ├── functions_e.js │ ├── searchdata.js │ ├── classes_5.js │ ├── files_3.js │ ├── functions_15.js │ ├── classes_8.js │ ├── functions_11.js │ ├── classes_7.js │ ├── functions_9.js │ ├── all_15.js │ ├── files_0.js │ ├── functions_10.js │ ├── all_9.js │ ├── all_e.js │ ├── functions_d.js │ ├── files_10.js │ ├── classes_c.js │ ├── all_7.js │ ├── classes_3.js │ ├── functions_7.js │ ├── all_8.js │ ├── functions_2.js │ ├── all_0.html │ ├── all_1.html │ ├── all_10.html │ ├── all_11.html │ ├── all_12.html │ ├── all_13.html │ ├── all_14.html │ ├── all_15.html │ ├── all_16.html │ ├── all_17.html │ ├── all_2.html │ ├── all_3.html │ ├── all_4.html │ ├── all_5.html │ ├── all_6.html │ ├── all_7.html │ ├── all_8.html │ ├── all_9.html │ ├── all_a.html │ ├── all_b.html │ ├── all_c.html │ ├── all_d.html │ ├── all_e.html │ ├── all_f.html │ ├── files_0.html │ ├── files_1.html │ ├── files_10.html │ ├── files_2.html │ ├── files_3.html │ ├── files_4.html │ ├── files_5.html │ ├── files_6.html │ ├── files_7.html │ ├── files_8.html │ ├── files_9.html │ ├── files_a.html │ ├── files_b.html │ ├── files_c.html │ ├── files_d.html │ ├── files_e.html │ ├── files_f.html │ ├── pages_0.html │ ├── pages_1.html │ ├── pages_2.html │ ├── pages_3.html │ ├── pages_4.html │ ├── pages_5.html │ ├── pages_6.html │ ├── pages_7.html │ ├── pages_8.html │ ├── pages_9.html │ ├── classes_0.html │ ├── classes_1.html │ ├── classes_2.html │ ├── classes_3.html │ ├── classes_4.html │ ├── classes_5.html │ ├── classes_6.html │ ├── classes_7.html │ ├── classes_8.html │ ├── classes_9.html │ ├── classes_a.html │ ├── classes_b.html │ ├── classes_c.html │ ├── classes_d.html │ └── classes_e.html ├── classnabu_1_1neuralnetworks_1_1models_1_1model_1_1Model.png ├── classnabu_1_1neuralnetworks_1_1recognizer_1_1Recognizer.png ├── classnabu_1_1processing_1_1tfreaders_1_1tfreader_1_1TfReader.png ├── classnabu_1_1processing_1_1tfwriters_1_1tfwriter_1_1TfWriter.png ├── classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1SaveAtEnd.png ├── classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1StopHook.png ├── classnabu_1_1neuralnetworks_1_1decoders_1_1decoder_1_1Decoder.png ├── classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1Trainer.png ├── classnabu_1_1processing_1_1processors_1_1processor_1_1Processor.png ├── classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1LoadAtBegin.png ├── classnabu_1_1neuralnetworks_1_1components_1_1constraints_1_1MaxNorm.png ├── classnabu_1_1neuralnetworks_1_1evaluators_1_1evaluator_1_1Evaluator.png ├── classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dnn_1_1DNN.png ├── classnabu_1_1processing_1_1tfwriters_1_1array__writer_1_1ArrayWriter.png ├── classnabu_1_1neuralnetworks_1_1decoders_1_1ctc__decoder_1_1CTCDecoder.png ├── classnabu_1_1neuralnetworks_1_1decoders_1_1max__decoder_1_1MaxDecoder.png ├── classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1ParameterServer.png ├── classnabu_1_1processing_1_1tfreaders_1_1binary__reader_1_1BinaryReader.png ├── classnabu_1_1processing_1_1tfreaders_1_1string__reader_1_1StringReader.png ├── classnabu_1_1processing_1_1tfwriters_1_1binary__writer_1_1BinaryWriter.png ├── classnabu_1_1processing_1_1tfwriters_1_1string__writer_1_1StringWriter.png ├── classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1ValidationSaveHook.png ├── classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1BypassWrapper.png ├── classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dblstm_1_1DBLSTM.png ├── classnabu_1_1processing_1_1processors_1_1text__processor_1_1TextProcessor.png ├── classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1BahdanauAttention.png ├── classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1WindowedAttention.png ├── classnabu_1_1neuralnetworks_1_1decoders_1_1random__decoder_1_1RandomDecoder.png ├── classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1speller_1_1Speller.png ├── classnabu_1_1processing_1_1processors_1_1audio__processor_1_1AudioProcessor.png ├── classnabu_1_1processing_1_1processors_1_1feature__computers_1_1mfcc_1_1Mfcc.png ├── classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1ScopeRNNCellWrapper.png ├── classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1StateOutputWrapper.png ├── classnabu_1_1neuralnetworks_1_1decoders_1_1feature__decoder_1_1FeatureDecoder.png ├── classnabu_1_1neuralnetworks_1_1evaluators_1_1loss__evaluator_1_1LossEvaluator.png ├── classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1listener_1_1Listener.png ├── classnabu_1_1processing_1_1processors_1_1binary__processor_1_1BinaryProcessor.png ├── classnabu_1_1processing_1_1processors_1_1feature__computers_1_1fbank_1_1Fbank.png ├── classnabu_1_1processing_1_1tfreaders_1_1alignment__reader_1_1AlignmentReader.png ├── classnabu_1_1processing_1_1tfreaders_1_1string__reader__eos_1_1StringReaderEOS.png ├── classnabu_1_1processing_1_1tfwriters_1_1alignment__writer_1_1AlignmentWriter.png ├── classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1LocationAwareAttention.png ├── classnabu_1_1neuralnetworks_1_1trainers_1_1standard__trainer_1_1StandardTrainer.png ├── classnabu_1_1neuralnetworks_1_1decoders_1_1alignment__decoder_1_1AlignmentDecoder.png ├── classnabu_1_1neuralnetworks_1_1decoders_1_1threshold__decoder_1_1ThresholdDecoder.png ├── classnabu_1_1neuralnetworks_1_1evaluators_1_1decoder__evaluator_1_1DecoderEvaluator.png ├── classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1dnn__decoder_1_1DNNDecoder.png ├── classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1ed__decoder_1_1EDDecoder.png ├── classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1rnn__decoder_1_1RNNDecoder.png ├── classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1ed__encoder_1_1EDEncoder.png ├── classnabu_1_1processing_1_1processors_1_1alignment__processor_1_1AlignmentProcessor.png ├── classnabu_1_1processing_1_1processors_1_1textfile__processor_1_1TextFileProcessor.png ├── classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchState.png ├── classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1AttentionProjectionWrapper.png ├── classnabu_1_1neuralnetworks_1_1decoders_1_1beam__search__decoder_1_1BeamSearchDecoder.png ├── classnabu_1_1processing_1_1tfreaders_1_1audio__feature__reader_1_1AudioFeatureReader.png ├── classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoder.png ├── classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dummy__encoder_1_1DummyEncoder.png ├── classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoderOutput.png ├── classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1hotstart__decoder_1_1HotstartDecoder.png ├── classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1hotstart__encoder_1_1HotstartEncoder.png ├── classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoderFinalOutput.png └── classnabu_1_1processing_1_1processors_1_1feature__computers_1_1feature__computer_1_1FeatureComputer.png ├── images ├── valid.png └── trainloss.png ├── config ├── recipes │ ├── DNN │ │ └── WSJ │ │ │ ├── alignment_processor.cfg │ │ │ ├── feature_processor.cfg │ │ │ ├── recognizer.cfg │ │ │ ├── test_evaluator.cfg │ │ │ ├── validation_evaluator.cfg │ │ │ ├── model.cfg │ │ │ ├── trainer.cfg │ │ │ └── database.cfg │ ├── LAS │ │ ├── GP │ │ │ ├── feature_processor.cfg │ │ │ ├── TU_phone_processor.cfg │ │ │ ├── SP_phone_processor.cfg │ │ │ ├── FR_phone_processor.cfg │ │ │ ├── AR_phone_processor.cfg │ │ │ ├── GE_phone_processor.cfg │ │ │ ├── PO_phone_processor.cfg │ │ │ ├── SW_phone_processor.cfg │ │ │ ├── recognizer.cfg │ │ │ ├── test_evaluator.cfg │ │ │ ├── validation_evaluator.cfg │ │ │ └── model.cfg │ │ └── TIMIT │ │ │ ├── feature_processor.cfg │ │ │ ├── text_processor.cfg │ │ │ ├── recognizer.cfg │ │ │ ├── test_evaluator.cfg │ │ │ ├── model.cfg │ │ │ ├── validation_evaluator.cfg │ │ │ └── trainer.cfg │ └── DBLSTM │ │ └── TIMIT │ │ ├── feature_processor.cfg │ │ ├── text_processor.cfg │ │ ├── recognizer.cfg │ │ ├── validation_evaluator.cfg │ │ ├── model.cfg │ │ ├── test_evaluator.cfg │ │ └── trainer.cfg └── computing │ ├── condor │ ├── non_distributed.cfg │ ├── single_machine.cfg │ └── multi_machine.cfg │ ├── README.md │ └── standard │ ├── single_machine.cfg │ └── multi_machine.cfg ├── nabu ├── computing │ ├── condor │ │ ├── __init__.py │ │ └── dataprep.job │ ├── static │ │ ├── __init__.py │ │ ├── kill_processes.py │ │ └── run_remote.py │ └── __init__.py ├── tools │ └── __init__.py ├── scripts │ ├── __init__.py │ ├── duplicate_model.sh │ └── kaldi │ │ └── compute_prior.py ├── neuralnetworks │ ├── decoders │ │ ├── __init__.py │ │ ├── defaults │ │ │ ├── thresholddecoder.cfg │ │ │ ├── alignmentdecoder.cfg │ │ │ ├── randomdecoder.cfg │ │ │ └── beamsearchdecoder.cfg │ │ └── README.md │ ├── evaluators │ │ ├── __init__.py │ │ ├── defaults │ │ │ ├── decoderevaluator.cfg │ │ │ └── lossevaluator.cfg │ │ ├── evaluator_factory.py │ │ └── README.md │ ├── trainers │ │ ├── __init__.py │ │ ├── trainer_factory.py │ │ ├── README.md │ │ └── standard_trainer.py │ ├── __init__.py │ ├── models │ │ ├── __init__.py │ │ ├── ed_decoders │ │ │ ├── __init__.py │ │ │ ├── defaults │ │ │ │ ├── hotstartdecoder.cfg │ │ │ │ ├── dnndecoder.cfg │ │ │ │ └── speller.cfg │ │ │ ├── README.md │ │ │ └── ed_decoder_factory.py │ │ └── ed_encoders │ │ │ ├── __init__.py │ │ │ ├── defaults │ │ │ ├── dblstm.cfg │ │ │ ├── hotstartencoder.cfg │ │ │ ├── listener.cfg │ │ │ └── dnn.cfg │ │ │ ├── README.md │ │ │ ├── ed_encoder_factory.py │ │ │ └── dummy_encoder.py │ └── components │ │ ├── __init__.py │ │ └── constraints.py ├── processing │ ├── processors │ │ ├── __init__.py │ │ ├── defaults │ │ │ ├── alignmentprocessor.cfg │ │ │ ├── binaryprocessor.cfg │ │ │ ├── audioprocessor.cfg │ │ │ ├── textprocessor.cfg │ │ │ └── textfileprocessor.cfg │ │ ├── feature_computers │ │ │ ├── __init__.py │ │ │ ├── feature_computer_factory.py │ │ │ ├── README.md │ │ │ └── defaults │ │ │ │ ├── fbank.cfg │ │ │ │ └── mfcc.cfg │ │ ├── README.md │ │ └── processor_factory.py │ ├── tfwriters │ │ ├── __init__.py │ │ ├── README.md │ │ ├── alignment_writer.py │ │ ├── binary_writer.py │ │ ├── tfwriter_factory.py │ │ ├── string_writer.py │ │ └── array_writer.py │ ├── tfreaders │ │ ├── __init__.py │ │ ├── README.md │ │ └── tfreader_factory.py │ ├── target_normalizers │ │ ├── __init__.py │ │ ├── phones.py │ │ ├── README.md │ │ ├── normalizer_factory.py │ │ ├── gp.py │ │ └── character.py │ └── __init__.py └── __init__.py ├── .gitignore └── run /main.md: -------------------------------------------------------------------------------- 1 | README.md -------------------------------------------------------------------------------- /py_filter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | doxypypy -a -c $1 3 | -------------------------------------------------------------------------------- /docs/doc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/doc.png -------------------------------------------------------------------------------- /docs/bc_s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/bc_s.png -------------------------------------------------------------------------------- /docs/bdwn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/bdwn.png -------------------------------------------------------------------------------- /docs/nav_f.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/nav_f.png -------------------------------------------------------------------------------- /docs/nav_g.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/nav_g.png -------------------------------------------------------------------------------- /docs/nav_h.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/nav_h.png -------------------------------------------------------------------------------- /docs/open.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/open.png -------------------------------------------------------------------------------- /docs/tab_a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/tab_a.png -------------------------------------------------------------------------------- /docs/tab_b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/tab_b.png -------------------------------------------------------------------------------- /docs/tab_h.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/tab_h.png -------------------------------------------------------------------------------- /docs/tab_s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/tab_s.png -------------------------------------------------------------------------------- /docs/closed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/closed.png -------------------------------------------------------------------------------- /docs/doxygen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/doxygen.png -------------------------------------------------------------------------------- /docs/splitbar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/splitbar.png -------------------------------------------------------------------------------- /docs/sync_off.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/sync_off.png -------------------------------------------------------------------------------- /docs/sync_on.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/sync_on.png -------------------------------------------------------------------------------- /images/valid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/images/valid.png -------------------------------------------------------------------------------- /docs/folderopen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/folderopen.png -------------------------------------------------------------------------------- /docs/folderclosed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/folderclosed.png -------------------------------------------------------------------------------- /docs/search/close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/search/close.png -------------------------------------------------------------------------------- /images/trainloss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/images/trainloss.png -------------------------------------------------------------------------------- /docs/search/mag_sel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/search/mag_sel.png -------------------------------------------------------------------------------- /docs/search/search_l.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/search/search_l.png -------------------------------------------------------------------------------- /docs/search/search_m.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/search/search_m.png -------------------------------------------------------------------------------- /docs/search/search_r.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/search/search_r.png -------------------------------------------------------------------------------- /docs/search/files_c.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['ops_2epy',['ops.py',['../ops_8py.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /docs/search/pages_4.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['licence',['LICENCE',['../md_LICENCE.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /config/recipes/DNN/WSJ/alignment_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = alignment_processor 4 | -------------------------------------------------------------------------------- /docs/search/namespaces_4.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['models',['models',['../namespacemodels.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /docs/search/pages_8.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['recipes',['Recipes',['../md_config_recipes_README.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /nabu/computing/condor/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package condor 2 | the condor functionality''' 3 | 4 | from . import build_cluster 5 | -------------------------------------------------------------------------------- /nabu/tools/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package tools 2 | contains some general tools for nabu''' 3 | 4 | from . import default_conf 5 | -------------------------------------------------------------------------------- /config/computing/condor/non_distributed.cfg: -------------------------------------------------------------------------------- 1 | [computing] 2 | #the minimum memory requirement of the GPUs in Mb 3 | minmemory = 5000 4 | -------------------------------------------------------------------------------- /docs/search/pages_5.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['models',['Models',['../md_nabu_neuralnetworks_models_README.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /nabu/computing/static/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package static 2 | the static distributed functionality''' 3 | 4 | from . import run_remote 5 | -------------------------------------------------------------------------------- /docs/search/pages_1.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['decoders',['Decoders',['../md_nabu_neuralnetworks_decoders_README.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /docs/search/files_7.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['input_5fpipeline_2epy',['input_pipeline.py',['../input__pipeline_8py.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /docs/search/files_8.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['kill_5fprocesses_2epy',['kill_processes.py',['../kill__processes_8py.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /docs/search/pages_7.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['processors',['Processors',['../md_nabu_processing_processors_README.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /nabu/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package scripts 2 | contains all scripts used to run nabu code''' 3 | 4 | from . import train, test, decode, data 5 | -------------------------------------------------------------------------------- /docs/search/files_b.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['normalizer_5ffactory_2epy',['normalizer_factory.py',['../normalizer__factory_8py.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /docs/search/namespaces_3.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['feature_5fcomputers',['feature_computers',['../namespacefeature__computers.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /docs/search/pages_6.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['nabu',['Nabu',['../index.html',1,'']]], 4 | ['nabu',['Nabu',['../md_README.html',1,'']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/decoders/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package decoders 2 | this package contains all the decoders''' 3 | 4 | from . import decoder, decoder_factory 5 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/decoders/defaults/thresholddecoder.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #the threshold used by the decoder to distinguish between 1 and 0 3 | threshold = 0.5 4 | -------------------------------------------------------------------------------- /config/recipes/DNN/WSJ/feature_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = audio_processor 4 | 5 | [feature] 6 | #feature type 7 | feature = fbank 8 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/feature_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = audio_processor 4 | 5 | [feature] 6 | #feature type 7 | feature = fbank 8 | -------------------------------------------------------------------------------- /nabu/processing/processors/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package processors 2 | contains the data processors''' 3 | 4 | from . import processor, processor_factory, feature_computers 5 | -------------------------------------------------------------------------------- /config/recipes/DBLSTM/TIMIT/feature_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = audio_processor 4 | 5 | [feature] 6 | #feature type 7 | feature = fbank 8 | -------------------------------------------------------------------------------- /config/recipes/LAS/TIMIT/feature_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = audio_processor 4 | 5 | [feature] 6 | #feature type 7 | feature = fbank 8 | -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1model_1_1Model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1model_1_1Model.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1recognizer_1_1Recognizer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1recognizer_1_1Recognizer.png -------------------------------------------------------------------------------- /nabu/computing/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package distributed 2 | the distributed computing functinality''' 3 | 4 | from . import cluster, condor, static, local_cluster, create_server 5 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package evaluators 2 | contains objects to use for evaluating models''' 3 | 4 | from . import evaluator, evaluator_factory 5 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/trainers/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package decoders 2 | this package contains all the trainers''' 3 | 4 | from . import trainer, trainer_factory, loss_functions 5 | -------------------------------------------------------------------------------- /nabu/processing/tfwriters/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package tfwriters 2 | contains the objects for writing tensorflow record files''' 3 | 4 | from . import tfwriter, tfwriter_factory 5 | -------------------------------------------------------------------------------- /docs/search/pages_3.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['feature_20computers',['Feature Computers',['../md_nabu_processing_processors_feature_computers_README.html',1,'']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1tfreaders_1_1tfreader_1_1TfReader.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1tfreaders_1_1tfreader_1_1TfReader.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1tfwriters_1_1tfwriter_1_1TfWriter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1tfwriters_1_1tfwriter_1_1TfWriter.png -------------------------------------------------------------------------------- /docs/search/namespaces_7.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['scripts',['scripts',['../namespacescripts.html',1,'']]], 4 | ['static',['static',['../namespacestatic.html',1,'']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package neuralnetworks 2 | The neural network functionality 3 | ''' 4 | from . import models, trainers, decoders, evaluators, components, recognizer 5 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package models 2 | Contains the neural net models and their components 3 | ''' 4 | 5 | from . import model, ed_decoders, ed_encoders 6 | -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1SaveAtEnd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1SaveAtEnd.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1StopHook.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1StopHook.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1decoders_1_1decoder_1_1Decoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1decoders_1_1decoder_1_1Decoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1Trainer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1Trainer.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1processors_1_1processor_1_1Processor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1processors_1_1processor_1_1Processor.png -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_decoders/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package ed_decoders 2 | contains the decoders for encoder-decoder classifiers''' 3 | 4 | from . import ed_decoder, ed_decoder_factory 5 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_encoders/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package encoders 2 | contains the encoders for encoder-decoder classifiers''' 3 | 4 | from . import ed_encoder, ed_encoder_factory 5 | -------------------------------------------------------------------------------- /nabu/processing/tfreaders/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package tfreaders 2 | contains readers that are used for reading and processing tfRecord files''' 3 | 4 | from . import tfreader, tfreader_factory 5 | -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1LoadAtBegin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1LoadAtBegin.png -------------------------------------------------------------------------------- /docs/search/namespaces_0.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['components',['components',['../namespacecomponents.html',1,'']]], 4 | ['condor',['condor',['../namespacecondor.html',1,'']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1constraints_1_1MaxNorm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1constraints_1_1MaxNorm.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1evaluators_1_1evaluator_1_1Evaluator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1evaluators_1_1evaluator_1_1Evaluator.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dnn_1_1DNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dnn_1_1DNN.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1tfwriters_1_1array__writer_1_1ArrayWriter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1tfwriters_1_1array__writer_1_1ArrayWriter.png -------------------------------------------------------------------------------- /docs/search/namespaces_5.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['nabu',['nabu',['../namespacenabu.html',1,'']]], 4 | ['neuralnetworks',['neuralnetworks',['../namespaceneuralnetworks.html',1,'']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1decoders_1_1ctc__decoder_1_1CTCDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1decoders_1_1ctc__decoder_1_1CTCDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1decoders_1_1max__decoder_1_1MaxDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1decoders_1_1max__decoder_1_1MaxDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1ParameterServer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1ParameterServer.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1tfreaders_1_1binary__reader_1_1BinaryReader.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1tfreaders_1_1binary__reader_1_1BinaryReader.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1tfreaders_1_1string__reader_1_1StringReader.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1tfreaders_1_1string__reader_1_1StringReader.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1tfwriters_1_1binary__writer_1_1BinaryWriter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1tfwriters_1_1binary__writer_1_1BinaryWriter.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1tfwriters_1_1string__writer_1_1StringWriter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1tfwriters_1_1string__writer_1_1StringWriter.png -------------------------------------------------------------------------------- /docs/search/namespaces_1.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['decoders',['decoders',['../namespacedecoders.html',1,'']]], 4 | ['distributed',['distributed',['../namespacedistributed.html',1,'']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /docs/search/namespaces_6.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['processing',['processing',['../namespaceprocessing.html',1,'']]], 4 | ['processors',['processors',['../namespaceprocessors.html',1,'']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/components/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package components 2 | contains tensorflow components''' 3 | 4 | from . import hooks, ops, rnn_cell, layer, beam_search_decoder, constraints,\ 5 | attention 6 | -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1ValidationSaveHook.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1ValidationSaveHook.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1BypassWrapper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1BypassWrapper.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dblstm_1_1DBLSTM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dblstm_1_1DBLSTM.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1processors_1_1text__processor_1_1TextProcessor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1processors_1_1text__processor_1_1TextProcessor.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1BahdanauAttention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1BahdanauAttention.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1WindowedAttention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1WindowedAttention.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1decoders_1_1random__decoder_1_1RandomDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1decoders_1_1random__decoder_1_1RandomDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1speller_1_1Speller.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1speller_1_1Speller.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1processors_1_1audio__processor_1_1AudioProcessor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1processors_1_1audio__processor_1_1AudioProcessor.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1processors_1_1feature__computers_1_1mfcc_1_1Mfcc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1processors_1_1feature__computers_1_1mfcc_1_1Mfcc.png -------------------------------------------------------------------------------- /docs/search/functions_b.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['kill_5fprocesses',['kill_processes',['../kill__processes_8py.html#ab534ec51738690d331570ef14324edaf',1,'nabu::computing::static::kill_processes']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /docs/search/functions_c.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['logpowspec',['logpowspec',['../sigproc_8py.html#a0c89f348bedbe8eb7c9c48fc13be1ef1',1,'nabu::processing::processors::feature_computers::sigproc']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/decoders/defaults/alignmentdecoder.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #the path to the numpy file containing the state prior probabilities 3 | # if no file is speified a uniform prior is used 4 | prior = None 5 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/decoders/defaults/randomdecoder.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #the maximum number of output steps 3 | max_steps = 4 | #the alphabet used by the decoder as a space seperated list of symbols 5 | alphabet = 6 | -------------------------------------------------------------------------------- /nabu/processing/target_normalizers/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package target_normalizers 2 | this package contains the normalizer functions for database target normalization 3 | ''' 4 | 5 | from . import normalizer_factory 6 | -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1ScopeRNNCellWrapper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1ScopeRNNCellWrapper.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1StateOutputWrapper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1StateOutputWrapper.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1decoders_1_1feature__decoder_1_1FeatureDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1decoders_1_1feature__decoder_1_1FeatureDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1evaluators_1_1loss__evaluator_1_1LossEvaluator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1evaluators_1_1loss__evaluator_1_1LossEvaluator.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1listener_1_1Listener.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1listener_1_1Listener.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1processors_1_1binary__processor_1_1BinaryProcessor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1processors_1_1binary__processor_1_1BinaryProcessor.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1processors_1_1feature__computers_1_1fbank_1_1Fbank.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1processors_1_1feature__computers_1_1fbank_1_1Fbank.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1tfreaders_1_1alignment__reader_1_1AlignmentReader.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1tfreaders_1_1alignment__reader_1_1AlignmentReader.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1tfreaders_1_1string__reader__eos_1_1StringReaderEOS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1tfreaders_1_1string__reader__eos_1_1StringReaderEOS.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1tfwriters_1_1alignment__writer_1_1AlignmentWriter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1tfwriters_1_1alignment__writer_1_1AlignmentWriter.png -------------------------------------------------------------------------------- /docs/search/classes_2.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['ctcdecoder',['CTCDecoder',['../classnabu_1_1neuralnetworks_1_1decoders_1_1ctc__decoder_1_1CTCDecoder.html',1,'nabu::neuralnetworks::decoders::ctc_decoder']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /nabu/processing/processors/defaults/alignmentprocessor.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #a maximum length of the utterance, set None for no max length 3 | #if a sequence exeeds this length it will be thrown away 4 | max_length = None 5 | -------------------------------------------------------------------------------- /nabu/processing/processors/defaults/binaryprocessor.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #a maximum length of the utterance, set None for no max length 3 | #if a sequence exeeds this length it will be thrown away 4 | max_length = None 5 | -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1LocationAwareAttention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1LocationAwareAttention.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1trainers_1_1standard__trainer_1_1StandardTrainer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1trainers_1_1standard__trainer_1_1StandardTrainer.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1decoders_1_1alignment__decoder_1_1AlignmentDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1decoders_1_1alignment__decoder_1_1AlignmentDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1decoders_1_1threshold__decoder_1_1ThresholdDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1decoders_1_1threshold__decoder_1_1ThresholdDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1evaluators_1_1decoder__evaluator_1_1DecoderEvaluator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1evaluators_1_1decoder__evaluator_1_1DecoderEvaluator.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1dnn__decoder_1_1DNNDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1dnn__decoder_1_1DNNDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1ed__decoder_1_1EDDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1ed__decoder_1_1EDDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1rnn__decoder_1_1RNNDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1rnn__decoder_1_1RNNDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1ed__encoder_1_1EDEncoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1ed__encoder_1_1EDEncoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1processors_1_1alignment__processor_1_1AlignmentProcessor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1processors_1_1alignment__processor_1_1AlignmentProcessor.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1processors_1_1textfile__processor_1_1TextFileProcessor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1processors_1_1textfile__processor_1_1TextFileProcessor.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchState.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchState.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1AttentionProjectionWrapper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1rnn__cell_1_1AttentionProjectionWrapper.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1decoders_1_1beam__search__decoder_1_1BeamSearchDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1decoders_1_1beam__search__decoder_1_1BeamSearchDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1tfreaders_1_1audio__feature__reader_1_1AudioFeatureReader.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1tfreaders_1_1audio__feature__reader_1_1AudioFeatureReader.png -------------------------------------------------------------------------------- /docs/search/classes_d.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['validationsavehook',['ValidationSaveHook',['../classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1ValidationSaveHook.html',1,'nabu::neuralnetworks::components::hooks']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/evaluators/defaults/decoderevaluator.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #the number of utterances that are processed simultaniously 3 | batch_size = 1 4 | #a space seperated list of target names used by the evaluator 5 | targets = 6 | -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dummy__encoder_1_1DummyEncoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dummy__encoder_1_1DummyEncoder.png -------------------------------------------------------------------------------- /docs/search/classes_e.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['windowedattention',['WindowedAttention',['../classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1WindowedAttention.html',1,'nabu::neuralnetworks::components::attention']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /docs/search/pages_0.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['computing_20configuration',['Computing configuration',['../md_config_computing_README.html',1,'']]], 4 | ['computing',['Computing',['../md_nabu_computing_README.html',1,'']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /config/computing/README.md: -------------------------------------------------------------------------------- 1 | # Computing configuration 2 | 3 | This directory contains the configuration files for the different computing 4 | modes. You can find more information about these modes 5 | [here](../../nabu/computing/README.md). 6 | -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoderOutput.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoderOutput.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1hotstart__decoder_1_1HotstartDecoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1hotstart__decoder_1_1HotstartDecoder.png -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1hotstart__encoder_1_1HotstartEncoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1hotstart__encoder_1_1HotstartEncoder.png -------------------------------------------------------------------------------- /nabu/processing/processors/feature_computers/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package feature_computers 2 | contains all the feature computers that are used in feature computation''' 3 | 4 | from . import base, sigproc, feature_computer, feature_computer_factory 5 | -------------------------------------------------------------------------------- /docs/search/all_a.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['join',['join',['../classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1ParameterServer.html#af0e95d63b397aed2114b4f0bc87a2ecf',1,'nabu::neuralnetworks::trainers::trainer::ParameterServer']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /docs/search/files_a.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['max_5fdecoder_2epy',['max_decoder.py',['../max__decoder_8py.html',1,'']]], 4 | ['mfcc_2epy',['mfcc.py',['../mfcc_8py.html',1,'']]], 5 | ['model_2epy',['model.py',['../model_8py.html',1,'']]] 6 | ]; 7 | -------------------------------------------------------------------------------- /docs/classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoderFinalOutput.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoderFinalOutput.png -------------------------------------------------------------------------------- /docs/search/functions_a.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['join',['join',['../classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1ParameterServer.html#af0e95d63b397aed2114b4f0bc87a2ecf',1,'nabu::neuralnetworks::trainers::trainer::ParameterServer']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /config/computing/standard/single_machine.cfg: -------------------------------------------------------------------------------- 1 | [computing] 2 | #the number of parameter servers (minimum 1) 3 | numps = 1 4 | #the number of workers (minimum 1) 5 | numworkers = 2 6 | #the IDS of the worker GPUs, set to non existing IDs to use CPU 7 | gpus = 0 1 8 | -------------------------------------------------------------------------------- /docs/classnabu_1_1processing_1_1processors_1_1feature__computers_1_1feature__computer_1_1FeatureComputer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vrenkens/nabu/HEAD/docs/classnabu_1_1processing_1_1processors_1_1feature__computers_1_1feature__computer_1_1FeatureComputer.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # ignore compiled code 2 | *.pyc 3 | *.conf 4 | cluster 5 | nabu/computing/condor/create_environment.sh 6 | config/recipes/DBLSTM/GP 7 | config/recipes/phonology 8 | .gitignore 9 | config/recipes/phonology* 10 | sweeps 11 | config/recipes/dynamic* 12 | -------------------------------------------------------------------------------- /docs/search/namespaces_2.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['ed_5fdecoders',['ed_decoders',['../namespaceed__decoders.html',1,'']]], 4 | ['encoders',['encoders',['../namespaceencoders.html',1,'']]], 5 | ['evaluators',['evaluators',['../namespaceevaluators.html',1,'']]] 6 | ]; 7 | -------------------------------------------------------------------------------- /nabu/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package nabu 2 | Nabu is a toolbox for designing, training and using end-to-end neural network 3 | systems for Automatic Speech Recognition. Nabu is built on top of TensorFlow. 4 | ''' 5 | 6 | from . import computing, neuralnetworks, processing, scripts, tools 7 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_encoders/defaults/dblstm.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #number of neurons in the hidden layers 3 | num_units = 128 4 | #number of hidden layers 5 | num_layers = 3 6 | #input noise standart deviation 7 | input_noise = 0.6 8 | #dropout rate 9 | dropout = 0.5 10 | -------------------------------------------------------------------------------- /docs/search/files_d.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['phones_2epy',['phones.py',['../phones_8py.html',1,'']]], 4 | ['processor_2epy',['processor.py',['../processor_8py.html',1,'']]], 5 | ['processor_5ffactory_2epy',['processor_factory.py',['../processor__factory_8py.html',1,'']]] 6 | ]; 7 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/evaluators/defaults/lossevaluator.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #the number of utterances that are processed simultaniously 3 | batch_size = 1 4 | #the loss that is evaluated by this evaluator 5 | loss = 6 | #a space seperated list of target names used by the evaluator 7 | targets = 8 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/TU_phone_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = text_processor 4 | #the normalizer to be used 5 | normalizer = phones 6 | #the alphabet used by the processor (29) 7 | alphabet = ab spn ts sil sft i2 dz b e d g f i h k j m l o n p s r u t v ue z oe 8 | -------------------------------------------------------------------------------- /docs/search/all_b.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['kill_5fprocesses',['kill_processes',['../kill__processes_8py.html#ab534ec51738690d331570ef14324edaf',1,'nabu::computing::static::kill_processes']]], 4 | ['kill_5fprocesses_2epy',['kill_processes.py',['../kill__processes_8py.html',1,'']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /nabu/processing/processors/defaults/audioprocessor.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #a maximum length of the utterance, set None for no max length 3 | #if a sequence exeeds this length it will be thrown away 4 | max_length = None 5 | #if you want to apply mean and variance normalisation set to True 6 | mvn = True 7 | -------------------------------------------------------------------------------- /docs/search/files_6.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['hooks_2epy',['hooks.py',['../hooks_8py.html',1,'']]], 4 | ['hotstart_5fdecoder_2epy',['hotstart_decoder.py',['../hotstart__decoder_8py.html',1,'']]], 5 | ['hotstart_5fencoder_2epy',['hotstart_encoder.py',['../hotstart__encoder_8py.html',1,'']]] 6 | ]; 7 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/SP_phone_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = text_processor 4 | #the normalizer to be used 5 | normalizer = phones 6 | #the alphabet used by the processor (36) 7 | alphabet = ei ai au spn eu o+ ts ng rf n~ sil e+ oi i+ a b e d g f i k j m l o n p s r u t w v x z 8 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/FR_phone_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = text_processor 4 | #the normalizer to be used 5 | normalizer = phones 6 | #the alphabet used by the processor (37) 7 | alphabet = zh e~ spn ax eu ae a~ nj o~ ng sh sil oe~ a b e d g f i h k j m l o n p s r u t w v y z oe 8 | -------------------------------------------------------------------------------- /config/recipes/DBLSTM/TIMIT/text_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = text_processor 4 | #the normalizer to be used 5 | normalizer = phones 6 | #the alphabet used by the processor 7 | alphabet = sil aa ae ah aw ay b ch d dh dx eh er ey f g hh ih iy jh k l m n ng ow oy p r s sh t th uh uw v w y z 8 | -------------------------------------------------------------------------------- /config/recipes/LAS/TIMIT/text_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = text_processor 4 | #the normalizer to be used 5 | normalizer = phones 6 | #the alphabet used by the processor 7 | alphabet = sil aa ae ah aw ay b ch d dh dx eh er ey f g hh ih iy jh k l m n ng ow oy p r s sh t th uh uw v w y z 8 | -------------------------------------------------------------------------------- /nabu/processing/__init__.py: -------------------------------------------------------------------------------- 1 | '''@package processing 2 | This package contains all the functionality for data processing: 3 | - feature computation 4 | - feature storing and loading 5 | - file interpretation 6 | ''' 7 | 8 | from . import target_normalizers, processors, tfreaders, tfwriters, \ 9 | input_pipeline 10 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/AR_phone_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = text_processor 4 | #the normalizer to be used 5 | normalizer = phones 6 | #the alphabet used by the processor (39) 7 | alphabet = cl ai al au il spn nl rr ll tl rl sil dl sl hq alal a c b d g f i h k j m l n q ul s r u t w ml x z 8 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/GE_phone_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = text_processor 4 | #the normalizer to be used 5 | normalizer = phones 6 | #the alphabet used by the processor (42) 7 | alphabet = el j ae uel ai al au il spn eu ul ts ng atu sil b oel a c ol e d g etu i h k f m l o n p s r u t v ue x z oe 8 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_decoders/defaults/hotstartdecoder.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #the decoder that is wrapped by this hotstart decoder 3 | decoder = 4 | #the directory where the model of the pre-trained decoder is stored 5 | modeldir = 6 | #wether or not the variables in this decoder are trainable i.e. updated 7 | trainable = True 8 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_encoders/defaults/hotstartencoder.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #the encoder that is wrapped by this hotstart encoder 3 | encoder = 4 | #the directory where the model of the pre-trained encoder is stored 5 | modeldir = 6 | #wether or not the variables in this encoder are trainable i.e. updated 7 | trainable = True 8 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/PO_phone_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = text_processor 4 | #the normalizer to be used 5 | normalizer = phones 6 | #the alphabet used by the processor (47) 7 | alphabet = ix sch i~ e~ spn ax a~ nj o+ rr o~ lj o~+ tj u+ sil dj e+ a+ u~+ i+ u~ w~ e~+ ux a b e d g f i a~+ k i~+ m l o n p s r u t w v z 8 | -------------------------------------------------------------------------------- /docs/search/namespaces_8.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['target_5fnormalizers',['target_normalizers',['../namespacetarget__normalizers.html',1,'']]], 4 | ['tfreaders',['tfreaders',['../namespacetfreaders.html',1,'']]], 5 | ['tfwriters',['tfwriters',['../namespacetfwriters.html',1,'']]], 6 | ['tools',['tools',['../namespacetools.html',1,'']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_decoders/defaults/dnndecoder.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #the number of hidden layers in the DNN 3 | num_layers = 1 4 | #the number of hidden units in the DNN 5 | num_units = 1024 6 | #wether or not to use layer normalisation in between the layers 7 | layer_norm = True 8 | #the dropout rate between the layers 9 | dropout = 1 10 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/SW_phone_processor.cfg: -------------------------------------------------------------------------------- 1 | [processor] 2 | #type of processor 3 | processor = text_processor 4 | #the normalizer to be used 5 | normalizer = phones 6 | #the alphabet used by the processor (50) 7 | alphabet = el ae ole uel oc ale al il spn uxl ng alel sr ul abl tr lr nr sil oe ox b oel dr a c ol e d g f i h k j m l o n ks p s r u t v ue olel ael etu 8 | -------------------------------------------------------------------------------- /docs/search/files_9.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['layer_2epy',['layer.py',['../layer_8py.html',1,'']]], 4 | ['listener_2epy',['listener.py',['../listener_8py.html',1,'']]], 5 | ['loss_5fevaluator_2epy',['loss_evaluator.py',['../loss__evaluator_8py.html',1,'']]], 6 | ['loss_5ffunctions_2epy',['loss_functions.py',['../loss__functions_8py.html',1,'']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | command=$1 4 | arguments="${@:2}" 5 | basedir=$(dirname "$0") 6 | pwd=$(pwd) 7 | 8 | allowed="train test decode data sweep" 9 | 10 | if ! [[ $allowed =~ (^|[[:space:]])"$command"($|[[:space:]]) ]]; then 11 | echo "ERROR: unknown command $command" 12 | exit 1 13 | fi 14 | 15 | python nabu/scripts/prepare_$command.py $arguments 16 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_encoders/defaults/listener.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #number of neurons in the hidden layers 3 | num_units = 128 4 | #number of hidden layers 5 | num_layers = 2 6 | #number of timesteps that are concatenated in each layer 7 | pyramid_steps = 2 8 | #input noise standart deviation 9 | input_noise = 0.6 10 | #dropout rate 11 | dropout = 0.5 12 | -------------------------------------------------------------------------------- /docs/search/classes_9.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['parameterserver',['ParameterServer',['../classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1ParameterServer.html',1,'nabu::neuralnetworks::trainers::trainer']]], 4 | ['processor',['Processor',['../classnabu_1_1processing_1_1processors_1_1processor_1_1Processor.html',1,'nabu::processing::processors::processor']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /config/computing/condor/single_machine.cfg: -------------------------------------------------------------------------------- 1 | [computing] 2 | #build the cluster with HTCondor. HTCondor will be used to dynamically assign 3 | #machines to the cluster and will then execute on that cluster 4 | #the number of parameter servers (minimum 1) 5 | numps = 2 6 | #the number of workers (minimum 1) 7 | numworkers = 1 8 | #the minimum memory requirement of the GPUs in Mb 9 | minmemory = 6000 10 | -------------------------------------------------------------------------------- /docs/search/files_4.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['ed_5fencoder_2epy',['ed_encoder.py',['../ed__encoder_8py.html',1,'']]], 4 | ['ed_5fencoder_5ffactory_2epy',['ed_encoder_factory.py',['../ed__encoder__factory_8py.html',1,'']]], 5 | ['evaluator_2epy',['evaluator.py',['../evaluator_8py.html',1,'']]], 6 | ['evaluator_5ffactory_2epy',['evaluator_factory.py',['../evaluator__factory_8py.html',1,'']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /docs/search/pages_2.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['evaluators',['Evaluators',['../md_nabu_neuralnetworks_evaluators_README.html',1,'']]], 4 | ['encoder_2ddecoder_20decoder',['Encoder-Decoder Decoder',['../md_nabu_neuralnetworks_models_ed_decoders_README.html',1,'']]], 5 | ['encoder_2ddecoder_20encoders',['Encoder-Decoder Encoders',['../md_nabu_neuralnetworks_models_ed_encoders_README.html',1,'']]] 6 | ]; 7 | -------------------------------------------------------------------------------- /docs/search/files_1.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['binary_5fprocessor_2epy',['binary_processor.py',['../binary__processor_8py.html',1,'']]], 4 | ['binary_5freader_2epy',['binary_reader.py',['../binary__reader_8py.html',1,'']]], 5 | ['binary_5fwriter_2epy',['binary_writer.py',['../binary__writer_8py.html',1,'']]], 6 | ['build_5fcluster_2epy',['build_cluster.py',['../build__cluster_8py.html',1,'']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /docs/search/files_5.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['fbank_2epy',['fbank.py',['../fbank_8py.html',1,'']]], 4 | ['feature_5fcomputer_2epy',['feature_computer.py',['../feature__computer_8py.html',1,'']]], 5 | ['feature_5fcomputer_5ffactory_2epy',['feature_computer_factory.py',['../feature__computer__factory_8py.html',1,'']]], 6 | ['feature_5fdecoder_2epy',['feature_decoder.py',['../feature__decoder_8py.html',1,'']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /docs/search/pages_9.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['trainers',['Trainers',['../md_nabu_neuralnetworks_trainers_README.html',1,'']]], 4 | ['target_20normalizers',['Target Normalizers',['../md_nabu_processing_target_normalizers_README.html',1,'']]], 5 | ['tf_20readers',['TF Readers',['../md_nabu_processing_tfreaders_README.html',1,'']]], 6 | ['tf_20writers',['TF Writers',['../md_nabu_processing_tfwriters_README.html',1,'']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /docs/search/files_2.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['character_2epy',['character.py',['../character_8py.html',1,'']]], 4 | ['cluster_2epy',['cluster.py',['../cluster_8py.html',1,'']]], 5 | ['constraints_2epy',['constraints.py',['../constraints_8py.html',1,'']]], 6 | ['create_5fresweep_2epy',['create_resweep.py',['../create__resweep_8py.html',1,'']]], 7 | ['ctc_5fdecoder_2epy',['ctc_decoder.py',['../ctc__decoder_8py.html',1,'']]] 8 | ]; 9 | -------------------------------------------------------------------------------- /docs/search/functions_8.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['hooks',['hooks',['../classnabu_1_1neuralnetworks_1_1trainers_1_1standard__trainer_1_1StandardTrainer.html#aff614cac68779a4ba17f933b81e9254d',1,'nabu.neuralnetworks.trainers.standard_trainer.StandardTrainer.hooks()'],['../classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1Trainer.html#a4d7438ce4a5f233e92c33fb851a1e346',1,'nabu.neuralnetworks.trainers.trainer.Trainer.hooks()']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /nabu/processing/processors/defaults/textprocessor.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #a maximum length of the utterance, set None for no max length 3 | #if a sequence exeeds this length it will be thrown away 4 | max_length = None 5 | #the text normalizer to be used 6 | normalizer = 7 | #the alphabet used by the processor 8 | alphabet = 9 | #a symbol that will be ignored by the trainer, if no such symbol is needed set 10 | #to None 11 | nonesymbol = None 12 | -------------------------------------------------------------------------------- /docs/search/files_e.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['random_5fdecoder_2epy',['random_decoder.py',['../random__decoder_8py.html',1,'']]], 4 | ['recognizer_2epy',['recognizer.py',['../recognizer_8py.html',1,'']]], 5 | ['rnn_5fcell_2epy',['rnn_cell.py',['../rnn__cell_8py.html',1,'']]], 6 | ['rnn_5fdecoder_2epy',['rnn_decoder.py',['../rnn__decoder_8py.html',1,'']]], 7 | ['run_5fremote_2epy',['run_remote.py',['../run__remote_8py.html',1,'']]] 8 | ]; 9 | -------------------------------------------------------------------------------- /nabu/processing/processors/defaults/textfileprocessor.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #a maximum length of the utterance, set None for no max length 3 | #if a sequence exeeds this length it will be thrown away 4 | max_length = None 5 | #the text normalizer to be used 6 | normalizer = 7 | #the alphabet used by the processor 8 | alphabet = 9 | #a symbol that will be ignored by the trainer, if no such symbol is needed set 10 | #to None 11 | nonesymbol = None 12 | -------------------------------------------------------------------------------- /nabu/processing/target_normalizers/phones.py: -------------------------------------------------------------------------------- 1 | '''@file phones.py 2 | contains the phones target normalizer''' 3 | 4 | def normalize(transcription, _): 5 | '''normalize a phonetic transcription 6 | 7 | Args: 8 | transcription: the transcription to be normalized as a string 9 | 10 | Returns: 11 | the normalized transcription as a string space seperated per 12 | character''' 13 | 14 | return transcription 15 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_encoders/defaults/dnn.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #number of neurons in the hidden layers 3 | num_units = 2048 4 | #number of hidden layers 5 | num_layers = 5 6 | #input noise standart deviation 7 | input_noise = 0 8 | #dropout rate between layers 9 | dropout = 1 10 | #number of left and right context windows to take into account in input 11 | context = 5 12 | #wheter layer normalization should be applied 13 | layer_norm = True 14 | -------------------------------------------------------------------------------- /config/recipes/DNN/WSJ/recognizer.cfg: -------------------------------------------------------------------------------- 1 | [recognizer] 2 | #the number of utterances that are processed simultaniously 3 | batch_size = 8 4 | #link the input names defined in the classifier config to sections defined in 5 | #the database config 6 | features = test93fbank 7 | 8 | [decoder] 9 | #name of the decoder that should be used 10 | decoder = alignment_decoder 11 | prior = /users/spraak/vrenkens/spchtemp/Nabu/data/wsj/kaldi_alignments/train_si284/prior.npy 12 | -------------------------------------------------------------------------------- /nabu/scripts/duplicate_model.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | #this file will duplicate the model to several subfolders to allow seperate 4 | #testing on multiple datasets 5 | # 6 | #usage: duplicate_model.sh 7 | # expdir: points to the expdir containng the model to be duplicated 8 | # target: the target directory 9 | 10 | expdir=$(readlink -m $1) 11 | target=$(readlink -m $2) 12 | 13 | mkdir -p $target 14 | ln -s $expdir/model $target/model 15 | -------------------------------------------------------------------------------- /docs/search/classes_6.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['hotstartdecoder',['HotstartDecoder',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1hotstart__decoder_1_1HotstartDecoder.html',1,'nabu::neuralnetworks::models::ed_decoders::hotstart_decoder']]], 4 | ['hotstartencoder',['HotstartEncoder',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1hotstart__encoder_1_1HotstartEncoder.html',1,'nabu::neuralnetworks::models::ed_encoders::hotstart_encoder']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /docs/search/functions_4.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['decode',['decode',['../decode_8py.html#a30034e7692123a41cacf8d2e4c201331',1,'nabu::scripts::decode']]], 4 | ['deframesig',['deframesig',['../sigproc_8py.html#a20b903a0d7065c999d249590943deb43',1,'nabu::processing::processors::feature_computers::sigproc']]], 5 | ['dense_5fsequence_5fto_5fsparse',['dense_sequence_to_sparse',['../ops_8py.html#a2f3941f9c4455335eb5ae4ebcea4f4b8',1,'nabu::neuralnetworks::components::ops']]] 6 | ]; 7 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/trainers/trainer_factory.py: -------------------------------------------------------------------------------- 1 | '''@file trainer_factory.py 2 | contains the Trainer factory mehod''' 3 | 4 | def factory(trainer): 5 | '''gets a Trainer class 6 | 7 | Args: 8 | trainer: the trainer type 9 | 10 | Returns: a Trainer class 11 | ''' 12 | 13 | if trainer == 'standard': 14 | import standard_trainer 15 | return standard_trainer.StandardTrainer 16 | else: 17 | raise Exception('Undefined trainer type: %s' % trainer) 18 | -------------------------------------------------------------------------------- /config/recipes/DBLSTM/TIMIT/recognizer.cfg: -------------------------------------------------------------------------------- 1 | [recognizer] 2 | #the number of utterances that are processed simultaniously 3 | batch_size = 32 4 | #link the input names defined in the classifier config to sections defined in 5 | #the database config 6 | features = testfbank 7 | 8 | [decoder] 9 | #name of the decoder that should be used 10 | decoder = ctc_decoder 11 | #the alphabet used by the decoder 12 | text_alphabet = sil aa ae ah aw ay b ch d dh dx eh er ey f g hh ih iy jh k l m n ng ow oy p r s sh t th uh uw v w y z 13 | -------------------------------------------------------------------------------- /config/computing/condor/multi_machine.cfg: -------------------------------------------------------------------------------- 1 | [computing] 2 | #build the cluster with HTCondor. HTCondor will be used to dynamically assign 3 | #machines to the cluster and will then execute on that cluster 4 | #the number of parameter servers (minimum 1) 5 | numps = 1 6 | #the number of workers (minimum 1) 7 | numworkers = 1 8 | #the minimum memory requirement of the GPUs in Mb 9 | minmemory = 1000 10 | #the command to use for creating ssh tunnels, if no tunnels should be created 11 | # set to None 12 | ssh_command=/tmp/ssh 13 | -------------------------------------------------------------------------------- /docs/search/nomatches.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
9 |
No Matches
10 |
11 | 12 | 13 | -------------------------------------------------------------------------------- /config/computing/standard/multi_machine.cfg: -------------------------------------------------------------------------------- 1 | [computing] 2 | #use a cluster that is statically defined in a cluster file containg a line for 3 | #for each task, with the job name, the machine, the port and the GPU index. 4 | #For example: 5 | #ps,ps.machines.com,1024 6 | #worker,worker1.machines.com,1024,0 7 | #worker,worker2.machines.com,1024,0 8 | #the file where the cluster is defined 9 | clusterfile = cluster 10 | #the command to use for creating ssh tunnels, if no tunnels should be created 11 | # set to None 12 | ssh_command=ssh 13 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/decoders/README.md: -------------------------------------------------------------------------------- 1 | # Decoders 2 | 3 | A Decoder uses a model to convert the inputs to sequences of output labels. 4 | To create a new decoder you should inherit from the general Decoder class 5 | defined in decoder.py and overwrite the abstract methods. Afterwards you should 6 | add the decoder to the factory method in decoder_factory.py. It is also very 7 | helpful to create a default configuration in the defaults directory. The name of 8 | the file should be the name of the class in lower case with the .cfg extension. 9 | -------------------------------------------------------------------------------- /nabu/processing/tfreaders/README.md: -------------------------------------------------------------------------------- 1 | # TF Readers 2 | 3 | a TensorFlow Reader is used to read TFRecord files directly into the graph. 4 | To create a new TFReader you should inherit from the general TFReader class 5 | defined in tfreader.py and overwrite the abstract methods. You should then add 6 | it to the factory method in tfreader_factory.py. 7 | It is also very helpful to create a default configuration in the defaults 8 | directory. The name of the file should be the name of the class in lower case 9 | with the .cfg extension. 10 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/trainers/README.md: -------------------------------------------------------------------------------- 1 | # Trainers 2 | 3 | A trainer is used to update the model parameters to minimize some loss function. 4 | To create a new trainer you should inherit from the general Trainer class 5 | defined in trainer.py and overwrite the abstract methods. Afterwards yo should 6 | add the trainer to the factory method in trainer_factory.py. 7 | It is also very helpful to create a default configuration in the defaults 8 | directory. The name of the file should be the name of the class in lower case 9 | with the .cfg extension. 10 | -------------------------------------------------------------------------------- /docs/search/classes_a.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['randomdecoder',['RandomDecoder',['../classnabu_1_1neuralnetworks_1_1decoders_1_1random__decoder_1_1RandomDecoder.html',1,'nabu::neuralnetworks::decoders::random_decoder']]], 4 | ['recognizer',['Recognizer',['../classnabu_1_1neuralnetworks_1_1recognizer_1_1Recognizer.html',1,'nabu::neuralnetworks::recognizer']]], 5 | ['rnndecoder',['RNNDecoder',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1rnn__decoder_1_1RNNDecoder.html',1,'nabu::neuralnetworks::models::ed_decoders::rnn_decoder']]] 6 | ]; 7 | -------------------------------------------------------------------------------- /docs/search/functions_13.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['test',['test',['../test_8py.html#aeab5d45bd879527483f91ce3ad3fab20',1,'nabu::scripts::test']]], 4 | ['test_5fdir',['test_dir',['../test__recipes_8py.html#a69ac25a5d69cd1ef42ccbb1a4059bb2f',1,'nabu::scripts::test_recipes']]], 5 | ['train',['train',['../classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1Trainer.html#a43be632325d3b192de1eb48ad1e9794a',1,'nabu.neuralnetworks.trainers.trainer.Trainer.train()'],['../train_8py.html#ad0205c4e4248508656a1fb751db7baf2',1,'nabu.scripts.train.train()']]] 6 | ]; 7 | -------------------------------------------------------------------------------- /nabu/processing/tfwriters/README.md: -------------------------------------------------------------------------------- 1 | # TF Writers 2 | 3 | A TensorFlow writer is used to write processed data to TFRecord files in the 4 | data preparation. To create a new writer you should inherit from the general 5 | TFWriter class defined in tfwriter.py and overwrite the abstract methods. You 6 | should then add it to the factory method in tfwriter_factory.py. 7 | It is also very helpful to create a default configuration in the defaults 8 | directory. The name of the file should be the name of the class in lower case 9 | with the .cfg extension. 10 | -------------------------------------------------------------------------------- /docs/search/files_f.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['sigproc_2epy',['sigproc.py',['../sigproc_8py.html',1,'']]], 4 | ['speller_2epy',['speller.py',['../speller_8py.html',1,'']]], 5 | ['standard_5ftrainer_2epy',['standard_trainer.py',['../standard__trainer_8py.html',1,'']]], 6 | ['string_5freader_2epy',['string_reader.py',['../string__reader_8py.html',1,'']]], 7 | ['string_5freader_5feos_2epy',['string_reader_eos.py',['../string__reader__eos_8py.html',1,'']]], 8 | ['string_5fwriter_2epy',['string_writer.py',['../string__writer_8py.html',1,'']]] 9 | ]; 10 | -------------------------------------------------------------------------------- /config/recipes/DBLSTM/TIMIT/validation_evaluator.cfg: -------------------------------------------------------------------------------- 1 | [evaluator] 2 | #name of the evaluator that should be used 3 | evaluator = loss_evaluator 4 | #the loss to be evaluated 5 | loss = CTC 6 | #the number of utterances that are processed simultaniously 7 | batch_size = 8 8 | #link the input names defined in the classifier config to sections defined in 9 | #the database config 10 | features = devfbank 11 | #a space seperated list of target names used by the evaluator 12 | targets = text 13 | #a mapping between the target names and database sections 14 | text = devtext 15 | -------------------------------------------------------------------------------- /docs/search/classes_4.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['eddecoder',['EDDecoder',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1ed__decoder_1_1EDDecoder.html',1,'nabu::neuralnetworks::models::ed_decoders::ed_decoder']]], 4 | ['edencoder',['EDEncoder',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1ed__encoder_1_1EDEncoder.html',1,'nabu::neuralnetworks::models::ed_encoders::ed_encoder']]], 5 | ['evaluator',['Evaluator',['../classnabu_1_1neuralnetworks_1_1evaluators_1_1evaluator_1_1Evaluator.html',1,'nabu::neuralnetworks::evaluators::evaluator']]] 6 | ]; 7 | -------------------------------------------------------------------------------- /nabu/processing/processors/feature_computers/feature_computer_factory.py: -------------------------------------------------------------------------------- 1 | '''@file feature_computer_factory.py 2 | contains the FeatureComputer factory''' 3 | 4 | def factory(feature): 5 | ''' 6 | create a FeatureComputer 7 | 8 | Args: 9 | feature: the feature computer type 10 | ''' 11 | 12 | if feature == 'fbank': 13 | import fbank 14 | return fbank.Fbank 15 | elif feature == 'mfcc': 16 | import mfcc 17 | return mfcc.Mfcc 18 | else: 19 | raise Exception('Undefined feature type: %s' % feature) 20 | -------------------------------------------------------------------------------- /nabu/processing/processors/feature_computers/README.md: -------------------------------------------------------------------------------- 1 | # Feature Computers 2 | 3 | a feature computer is used to compute audio features. To create your own feature 4 | computer you can inherit from the general FeatureComputer class defined in 5 | feature_computer.py and overwrite the abstract methods. 6 | Afterwards you should add it to the factory method in 7 | feature_computer_factory.py. 8 | It is also very helpful to create a default configuration in the defaults 9 | directory. The name of the file should be the name of the class in lower case 10 | with the .cfg extension. 11 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_encoders/README.md: -------------------------------------------------------------------------------- 1 | # Encoder-Decoder Encoders 2 | 3 | an encoder-decoder encoder (ed-encoder) encoder the inputs into a hidden 4 | representation. To create a new ed-encoder you should inherit from the general 5 | EDEncoder class defined in ed_encoder.py and overwrite the abstract methods. 6 | Afterwards you should add it to the factory method in ed_encoder_factory.py. 7 | It is also very helpful to create a default configuration in the defaults 8 | directory. The name of the file should be the name of the class in lower case 9 | with the .cfg extension. 10 | -------------------------------------------------------------------------------- /docs/search/functions_e.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['normalize',['normalize',['../aurora4_8py.html#ada3976e17dcef9df450e46cb0cf0cf5e',1,'nabu.processing.target_normalizers.aurora4.normalize()'],['../character_8py.html#a67fa017056f714ca3b68d50485aedcae',1,'nabu.processing.target_normalizers.character.normalize()'],['../phones_8py.html#a1d708b602a321e8b76ee1a16f6fabb77',1,'nabu.processing.target_normalizers.phones.normalize()']]], 4 | ['normalized_5fsigmoid',['normalized_sigmoid',['../attention_8py.html#aad678aa8780f621d8cb9b94eabd235bb',1,'nabu::neuralnetworks::components::attention']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /docs/search/searchdata.js: -------------------------------------------------------------------------------- 1 | var indexSectionsWithContent = 2 | { 3 | 0: "_abcdefghijklmnoprstuvwz", 4 | 1: "abcdefhlmprstvw", 5 | 2: "cdefmnpst", 6 | 3: "abcdefhiklmnoprst", 7 | 4: "_abcdefghijklmnoprstuvwz", 8 | 5: "cdeflmnprt" 9 | }; 10 | 11 | var indexSectionNames = 12 | { 13 | 0: "all", 14 | 1: "classes", 15 | 2: "namespaces", 16 | 3: "files", 17 | 4: "functions", 18 | 5: "pages" 19 | }; 20 | 21 | var indexSectionLabels = 22 | { 23 | 0: "All", 24 | 1: "Classes", 25 | 2: "Namespaces", 26 | 3: "Files", 27 | 4: "Functions", 28 | 5: "Pages" 29 | }; 30 | 31 | -------------------------------------------------------------------------------- /nabu/processing/processors/feature_computers/defaults/fbank.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #the dynamic information that is added to the features, options are nodelta, 3 | #delta and ddelta 4 | dynamic = ddelta 5 | #length of the sliding window (seconds) 6 | winlen = 0.025 7 | #step of the sliding window (seconds) 8 | winstep = 0.01 9 | #number of fbank filters 10 | nfilt = 40 11 | #number of fft bins 12 | nfft = 512 13 | #low cuttof frequency 14 | lowfreq = 0 15 | #hight cutoff frequency, if -1 means None 16 | #premphesis 17 | preemph = 0.97 18 | #include energy in features 19 | include_energy = True 20 | -------------------------------------------------------------------------------- /config/recipes/DNN/WSJ/test_evaluator.cfg: -------------------------------------------------------------------------------- 1 | [evaluator] 2 | #name of the evaluator that should be used 3 | evaluator = loss_evaluator 4 | #the loss to be evaluated 5 | loss = average_cross_entropy 6 | #the number of utterances that are processed simultaniously 7 | batch_size = 8 8 | #link the input names defined in the classifier config to sections defined in 9 | #the database config 10 | features = test93fbank 11 | #a space seperated list of target names used by the evaluator 12 | targets = alignments 13 | #a mapping between the target names and database sections 14 | alignments = test93alignments 15 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_decoders/defaults/speller.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #number of rnn layers 3 | num_layers = 2 4 | #number of units in each layer 5 | num_units = 128 6 | #the dropout rate between the layers 7 | dropout = 0.5 8 | #the attention mechanism that should be used, one of vanilla, location_aware, 9 | #monotonic or windowed 10 | attention = vanilla 11 | #the probability function that should be used for the alignments, one of 12 | #softmax or sigmoid 13 | probability_fn = softmax 14 | #the probability that the network will sample from the output during training 15 | sample_prob = 0.1 16 | -------------------------------------------------------------------------------- /config/recipes/DNN/WSJ/validation_evaluator.cfg: -------------------------------------------------------------------------------- 1 | [evaluator] 2 | #name of the evaluator that should be used 3 | evaluator = loss_evaluator 4 | #the loss that is evaluated 5 | loss = average_cross_entropy 6 | #the number of utterances that are processed simultaniously 7 | batch_size = 32 8 | #link the input names defined in the classifier config to sections defined in 9 | #the database config 10 | features = devfbank 11 | #a space seperated list of target names used by the evaluator 12 | targets = alignments 13 | #a mapping between the target names and database sections 14 | alignments = devalignments 15 | -------------------------------------------------------------------------------- /config/recipes/LAS/TIMIT/recognizer.cfg: -------------------------------------------------------------------------------- 1 | [recognizer] 2 | #the number of utterances that are processed simultaniously 3 | batch_size = 32 4 | #link the input names defined in the classifier config to sections defined in 5 | #the database config 6 | features = testfbank 7 | 8 | [decoder] 9 | #name of the decoder that should be used 10 | decoder = beam_search_decoder 11 | #the maximum number of output steps 12 | max_steps = 100 13 | #the beam width 14 | beam_width = 16 15 | #the alphabet used by the decoder 16 | alphabet = sil aa ae ah aw ay b ch d dh dx eh er ey f g hh ih iy jh k l m n ng ow oy p r s sh t th uh uw v w y z 17 | -------------------------------------------------------------------------------- /docs/search/classes_5.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['fbank',['Fbank',['../classnabu_1_1processing_1_1processors_1_1feature__computers_1_1fbank_1_1Fbank.html',1,'nabu::processing::processors::feature_computers::fbank']]], 4 | ['featurecomputer',['FeatureComputer',['../classnabu_1_1processing_1_1processors_1_1feature__computers_1_1feature__computer_1_1FeatureComputer.html',1,'nabu::processing::processors::feature_computers::feature_computer']]], 5 | ['featuredecoder',['FeatureDecoder',['../classnabu_1_1neuralnetworks_1_1decoders_1_1feature__decoder_1_1FeatureDecoder.html',1,'nabu::neuralnetworks::decoders::feature_decoder']]] 6 | ]; 7 | -------------------------------------------------------------------------------- /docs/search/files_3.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['data_2epy',['data.py',['../data_8py.html',1,'']]], 4 | ['dblstm_2epy',['dblstm.py',['../dblstm_8py.html',1,'']]], 5 | ['decode_2epy',['decode.py',['../decode_8py.html',1,'']]], 6 | ['decoder_2epy',['decoder.py',['../decoder_8py.html',1,'']]], 7 | ['decoder_5fevaluator_2epy',['decoder_evaluator.py',['../decoder__evaluator_8py.html',1,'']]], 8 | ['decoder_5ffactory_2epy',['decoder_factory.py',['../decoder__factory_8py.html',1,'']]], 9 | ['default_5fconf_2epy',['default_conf.py',['../default__conf_8py.html',1,'']]], 10 | ['dnn_2epy',['dnn.py',['../dnn_8py.html',1,'']]] 11 | ]; 12 | -------------------------------------------------------------------------------- /docs/search/functions_15.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['variables',['variables',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1ed__decoder_1_1EDDecoder.html#a1894af6a716f3b74fb6d7e18486db57c',1,'nabu.neuralnetworks.models.ed_decoders.ed_decoder.EDDecoder.variables()'],['../classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1ed__encoder_1_1EDEncoder.html#a0b9ace4ff2f26b62b52f177898c8f442',1,'nabu.neuralnetworks.models.ed_encoders.ed_encoder.EDEncoder.variables()'],['../classnabu_1_1neuralnetworks_1_1models_1_1model_1_1Model.html#a74bcb6ff6d27ccf8980d63d34ccc92a1',1,'nabu.neuralnetworks.models.model.Model.variables()']]] 4 | ]; 5 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_decoders/README.md: -------------------------------------------------------------------------------- 1 | # Encoder-Decoder Decoder 2 | 3 | An encoder-decoder decoder (ed-decoder) outputs labels based on the hidden 4 | representation of the encoder and the history of decoded labels. To create a new 5 | ed-decoder you should inherit from the general EDDecoder class defined in 6 | ed-decoder.py and overwrite the abstract methods. 7 | Afterwards you should add it to the factory method in ed_decoder_factory.py. 8 | It is also very helpful to create a default configuration in the defaults 9 | directory. The name of the file should be the name of the class in lower case 10 | with the .cfg extension. 11 | -------------------------------------------------------------------------------- /config/recipes/DBLSTM/TIMIT/model.cfg: -------------------------------------------------------------------------------- 1 | [io] 2 | #a space seperated list of input names 3 | inputs = features 4 | #a space seperated list of output names 5 | outputs = text 6 | #a space seperated list of model output dimensions (exluding eos) 7 | output_dims = 39 8 | 9 | [encoder] 10 | #type of encoder 11 | encoder = dblstm 12 | #number of neurons in the hidden layers 13 | num_units = 128 14 | #number of hidden layers 15 | num_layers = 3 16 | #input noise standart deviation 17 | input_noise = 0.6 18 | #dropout rate 19 | dropout = 0.5 20 | 21 | [decoder] 22 | #type of decoder 23 | decoder = dnn_decoder 24 | #no hidden layers 25 | num_layers = 0 26 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/recognizer.cfg: -------------------------------------------------------------------------------- 1 | [recognizer] 2 | #the number of utterances that are processed simultaniously 3 | batch_size = 32 4 | #link the input names defined in the classifier config to sections defined in 5 | #the database config 6 | features = POevalfbank 7 | 8 | [decoder] 9 | #name of the decoder that should be used 10 | decoder = beam_search_decoder 11 | #the maximum number of output steps 12 | max_steps = 400 13 | #the beam width 14 | beam_width = 16 15 | #the alphabet used by the decoder 16 | alphabet = ix sch i~ e~ spn ax a~ nj o+ rr o~ lj o~+ tj u+ sil dj e+ a+ u~+ i+ u~ w~ e~+ ux a b e d g f i a~+ k i~+ m l o n p s r u t w v z 17 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/decoders/defaults/beamsearchdecoder.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #the maximum number of output steps 3 | max_steps = 4 | #the beam width 5 | beam_width = 16 6 | #the length penalty weight as defined in https://arxiv.org/abs/1609.08144 7 | length_penalty = 1 8 | #the temperature of the softmax during decoding. 9 | #1 means no change, a higher temperature means a smoother sitribution and a 10 | #lower one means a sharper distribution 11 | temperature = 1 12 | #if you want to visualize the alignments in TensorBoard set to True 13 | visualize_alignments = True 14 | #the alphabet used by the decoder as a space seperated list of symbols 15 | alphabet = 16 | -------------------------------------------------------------------------------- /docs/search/classes_8.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['maxdecoder',['MaxDecoder',['../classnabu_1_1neuralnetworks_1_1decoders_1_1max__decoder_1_1MaxDecoder.html',1,'nabu::neuralnetworks::decoders::max_decoder']]], 4 | ['maxnorm',['MaxNorm',['../classnabu_1_1neuralnetworks_1_1components_1_1constraints_1_1MaxNorm.html',1,'nabu::neuralnetworks::components::constraints']]], 5 | ['mfcc',['Mfcc',['../classnabu_1_1processing_1_1processors_1_1feature__computers_1_1mfcc_1_1Mfcc.html',1,'nabu::processing::processors::feature_computers::mfcc']]], 6 | ['model',['Model',['../classnabu_1_1neuralnetworks_1_1models_1_1model_1_1Model.html',1,'nabu::neuralnetworks::models::model']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/evaluators/evaluator_factory.py: -------------------------------------------------------------------------------- 1 | '''@file evaluator_factory.py 2 | contains the Evaluator factory''' 3 | 4 | def factory(evaluator): 5 | ''' 6 | gets an evaluator class 7 | 8 | Args: 9 | evaluator: the evaluator type 10 | 11 | Returns: 12 | an evaluator class 13 | ''' 14 | 15 | if evaluator == 'decoder_evaluator': 16 | import decoder_evaluator 17 | return decoder_evaluator.DecoderEvaluator 18 | elif evaluator == 'loss_evaluator': 19 | import loss_evaluator 20 | return loss_evaluator.LossEvaluator 21 | else: 22 | raise Exception('Undefined evaluator type: %s' % evaluator) 23 | -------------------------------------------------------------------------------- /docs/search/functions_11.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['read_5fcluster',['read_cluster',['../cluster_8py.html#a9e150fc09bf176718319016ed77d86bb',1,'nabu::computing::cluster']]], 4 | ['recognize',['recognize',['../classnabu_1_1neuralnetworks_1_1recognizer_1_1Recognizer.html#ab95ac6d35a714605fdad9622991e9bc0',1,'nabu::neuralnetworks::recognizer::Recognizer']]], 5 | ['restore',['restore',['../classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1ValidationSaveHook.html#ac4448a347a69293a08ab95c0975fca24',1,'nabu::neuralnetworks::components::hooks::ValidationSaveHook']]], 6 | ['run_5fremote',['run_remote',['../run__remote_8py.html#a90fafe53ccf8a35217a633a3107a1dbc',1,'nabu::computing::static::run_remote']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /nabu/processing/processors/feature_computers/defaults/mfcc.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #the dynamic information that is added to the features, options are nodelta, 3 | #delta and ddelta 4 | dynamic = ddelta 5 | #length of the sliding window (seconds) 6 | winlen = 0.025 7 | #step of the sliding window (seconds) 8 | winstep = 0.01 9 | #number of fbank filters 10 | nfilt = 40 11 | #number of cepstral coefficients 12 | numcep = 12 13 | #cepstral lifter (used to scale the mfccs) 14 | ceplifter = 22 15 | #number of fft bins 16 | nfft = 512 17 | #low cuttof frequency 18 | lowfreq = 0 19 | #hight cutoff frequency, if -1 means None 20 | #premphesis 21 | preemph = 0.97 22 | #include energy in features 23 | include_energy = True 24 | -------------------------------------------------------------------------------- /nabu/scripts/kaldi/compute_prior.py: -------------------------------------------------------------------------------- 1 | '''@file counpute_prior.py 2 | this script can be used to compute pdf priors for kaldi''' 3 | 4 | import os 5 | import sys 6 | import itertools 7 | import numpy as np 8 | 9 | traindir = sys.argv[1] 10 | 11 | #read the pdffile 12 | with open(os.path.join(traindir, 'pdfs')) as fid: 13 | pdfs = fid.readlines() 14 | pdfs = [pdf.split()[1:] for pdf in pdfs] 15 | pdfs = list(itertools.chain.from_iterable(pdfs)) 16 | pdfs = map(int, pdfs) 17 | 18 | #count each pdf occurrence 19 | counts, _ = np.histogram(pdfs, range(max(pdfs)+2)) 20 | 21 | #normalize the counts to get the priors 22 | prior = counts.astype(np.float32)/counts.sum() 23 | 24 | np.save(os.path.join(traindir, 'prior.npy'), prior) 25 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_decoders/ed_decoder_factory.py: -------------------------------------------------------------------------------- 1 | '''@file ed_decoder_factory 2 | contains the ed decoder factory''' 3 | 4 | 5 | def factory(decoder): 6 | '''gets an ed decoder class 7 | 8 | Args: 9 | decoder: the decoder type 10 | 11 | Returns: 12 | An EDDecoder class''' 13 | 14 | if decoder == 'speller': 15 | import speller 16 | return speller.Speller 17 | elif decoder == 'dnn_decoder': 18 | import dnn_decoder 19 | return dnn_decoder.DNNDecoder 20 | elif decoder == 'hotstart_decoder': 21 | import hotstart_decoder 22 | return hotstart_decoder.HotstartDecoder 23 | else: 24 | raise Exception('undefined decoder type: %s' % decoder) 25 | -------------------------------------------------------------------------------- /docs/search/classes_7.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['listener',['Listener',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1listener_1_1Listener.html',1,'nabu::neuralnetworks::models::ed_encoders::listener']]], 4 | ['loadatbegin',['LoadAtBegin',['../classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1LoadAtBegin.html',1,'nabu::neuralnetworks::components::hooks']]], 5 | ['locationawareattention',['LocationAwareAttention',['../classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1LocationAwareAttention.html',1,'nabu::neuralnetworks::components::attention']]], 6 | ['lossevaluator',['LossEvaluator',['../classnabu_1_1neuralnetworks_1_1evaluators_1_1loss__evaluator_1_1LossEvaluator.html',1,'nabu::neuralnetworks::evaluators::loss_evaluator']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /config/recipes/DBLSTM/TIMIT/test_evaluator.cfg: -------------------------------------------------------------------------------- 1 | [evaluator] 2 | #name of the evaluator that should be used 3 | evaluator = decoder_evaluator 4 | #the number of utterances that are processed simultaniously 5 | batch_size = 32 6 | #link the input names defined in the classifier config to sections defined in 7 | #the database config 8 | features = testfbank 9 | #a space seperated list of target names used by the evaluator 10 | targets = text 11 | #a mapping between the target names and database sections 12 | text = testtext 13 | 14 | [decoder] 15 | #name of the decoder that should be used 16 | decoder = ctc_decoder 17 | #the alphabet used by the decoder 18 | text_alphabet = sil aa ae ah aw ay b ch d dh dx eh er ey f g hh ih iy jh k l m n ng ow oy p r s sh t th uh uw v w y z 19 | -------------------------------------------------------------------------------- /nabu/processing/processors/README.md: -------------------------------------------------------------------------------- 1 | # Processors 2 | 3 | A processor is used to process data in the data preperation script. For example, 4 | feature computation for audio or normalization for text. To create a new 5 | processor you should inherit from the general Processor class defined in 6 | processor.py and overwrite the abstract methods. You should then add it to the 7 | factory method in processor_factory.py. 8 | It is also very helpful to create a default configuration in the defaults 9 | directory. The name of the file should be the name of the class in lower case 10 | with the .cfg extension. 11 | 12 | You can find more information about feature computers 13 | [here](../feature_computers/README.md) and target normalizers 14 | [here](../target_normalizers/README.md) 15 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/evaluators/README.md: -------------------------------------------------------------------------------- 1 | # Evaluators 2 | 3 | An evaluator is used to evaluate the performance of the model during training 4 | or at test time. To create a new evaluator you should inherit from the general 5 | Evaluator class defined in evaluator.py and overwrite all the abstract methods. 6 | Afterwards you should add it to the factory method in evaluator_factory.py. 7 | It is also very helpful to create a default configuration in the defaults 8 | directory. The name of the file should be the name of the class in lower case 9 | with the .cfg extension. 10 | 11 | The decoder_evaluator will use a decoder to decode the validation set and 12 | compare the results with the ground truth. You can find more information about 13 | decoders [here](../decoders/README.md). 14 | -------------------------------------------------------------------------------- /nabu/computing/static/kill_processes.py: -------------------------------------------------------------------------------- 1 | '''@file kill_processes.py 2 | contains the kill_processes method''' 3 | 4 | import os 5 | import socket 6 | 7 | def kill_processes(processdir): 8 | '''kill all processes that reported in the processdir''' 9 | 10 | files = os.listdir(processdir) 11 | 12 | for f in files: 13 | splitfile = f.split('-') 14 | machine = splitfile[0] 15 | pid = splitfile[1] 16 | if (machine == socket.gethostbyname(socket.gethostname()) 17 | or machine == '127.0.0.1'): 18 | os.system('kill %s' % pid) 19 | else: 20 | os.system('ssh -o StrictHostKeyChecking=no -o ' 21 | 'UserKnownHostsFile=/dev/null %s "kill %s"' 22 | % (machine, pid)) 23 | -------------------------------------------------------------------------------- /nabu/processing/target_normalizers/README.md: -------------------------------------------------------------------------------- 1 | # Target Normalizers 2 | 3 | A target normalizer is used to normalize targets to make them usable for 4 | training. Some examples of normalization steps are replacing unknown 5 | characters with a fixed label or making everyting lower case. A target 6 | normalizer can be different for each database. To create a new target normalizer 7 | you should create a file with normalize method that takes a transcription and 8 | an alphabet as input and returns the normalized transcription. You should then 9 | add it to the factory method in normalizer_factory.py. 10 | It is also very helpful to create a default configuration in the defaults 11 | directory. The name of the file should be the name of the class in lower case 12 | with the .cfg extension. 13 | -------------------------------------------------------------------------------- /nabu/processing/tfwriters/alignment_writer.py: -------------------------------------------------------------------------------- 1 | '''@file alignment_writer.py 2 | contains the AlignmentWriter class''' 3 | 4 | import numpy as np 5 | import tensorflow as tf 6 | import tfwriter 7 | 8 | class AlignmentWriter(tfwriter.TfWriter): 9 | '''a TfWriter to write kaldi alignments''' 10 | 11 | def _get_example(self, data): 12 | '''write data to a file 13 | 14 | Args: 15 | data: the data to be written''' 16 | 17 | data_feature = tf.train.Feature(bytes_list=tf.train.BytesList( 18 | value=[data.reshape([-1]).astype(np.int32).tostring()])) 19 | 20 | #create the example proto 21 | example = tf.train.Example(features=tf.train.Features(feature={ 22 | 'data': data_feature})) 23 | 24 | return example 25 | -------------------------------------------------------------------------------- /docs/search/functions_9.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['initial_5falignments',['initial_alignments',['../classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1WindowedAttention.html#a17a840d2cf3ad36cb97ae5f72ec2aae6',1,'nabu::neuralnetworks::components::attention::WindowedAttention']]], 4 | ['initialize',['initialize',['../classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoder.html#a418ec10b8af94e9930fb5c3fd5c77c6e',1,'nabu::neuralnetworks::components::beam_search_decoder::BeamSearchDecoder']]], 5 | ['input_5fpipeline',['input_pipeline',['../input__pipeline_8py.html#a9c06d205001055093670c3910e930699',1,'nabu::processing::input_pipeline']]], 6 | ['is_5frecipe',['is_recipe',['../test__recipes_8py.html#a02f8195d576a47f4836c43db0fcbb464',1,'nabu::scripts::test_recipes']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /docs/search/all_15.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['validationsavehook',['ValidationSaveHook',['../classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1ValidationSaveHook.html',1,'nabu::neuralnetworks::components::hooks']]], 4 | ['variables',['variables',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1ed__decoder_1_1EDDecoder.html#a1894af6a716f3b74fb6d7e18486db57c',1,'nabu.neuralnetworks.models.ed_decoders.ed_decoder.EDDecoder.variables()'],['../classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1ed__encoder_1_1EDEncoder.html#a0b9ace4ff2f26b62b52f177898c8f442',1,'nabu.neuralnetworks.models.ed_encoders.ed_encoder.EDEncoder.variables()'],['../classnabu_1_1neuralnetworks_1_1models_1_1model_1_1Model.html#a74bcb6ff6d27ccf8980d63d34ccc92a1',1,'nabu.neuralnetworks.models.model.Model.variables()']]] 5 | ]; 6 | -------------------------------------------------------------------------------- /nabu/processing/target_normalizers/normalizer_factory.py: -------------------------------------------------------------------------------- 1 | '''@file normalizer_factory.py 2 | Contains the normalizer factory 3 | ''' 4 | 5 | def factory(normalizer): 6 | '''get a normalizer class 7 | 8 | Args: 9 | normalizer_type: the type of normalizer_type 10 | 11 | Returns: 12 | a normalizer class''' 13 | 14 | if normalizer == 'aurora4': 15 | import aurora4 16 | return aurora4.normalize 17 | elif normalizer == 'phones': 18 | import phones 19 | return phones.normalize 20 | elif normalizer == 'character': 21 | import character 22 | return character.normalize 23 | elif normalizer == 'gp': 24 | import gp 25 | return gp.normalize 26 | else: 27 | raise Exception('Undefined normalizer: %s' % normalizer) 28 | -------------------------------------------------------------------------------- /nabu/processing/tfwriters/binary_writer.py: -------------------------------------------------------------------------------- 1 | '''@file binary_writer.py 2 | contains the BinaryWriter class''' 3 | 4 | import tensorflow as tf 5 | import tfwriter 6 | 7 | class BinaryWriter(tfwriter.TfWriter): 8 | '''a TfWriter to write strings''' 9 | 10 | def _get_example(self, data): 11 | '''write data to a file 12 | 13 | Args: 14 | data: the data to be written 15 | ''' 16 | 17 | length_feature = tf.train.Feature(int64_list=tf.train.Int64List( 18 | value=[data.shape[0]])) 19 | data_feature = tf.train.Feature(bytes_list=tf.train.BytesList( 20 | value=[data.tostring()])) 21 | 22 | #create the example proto 23 | example = tf.train.Example(features=tf.train.Features(feature={ 24 | 'data': data_feature})) 25 | 26 | return example 27 | -------------------------------------------------------------------------------- /nabu/processing/tfwriters/tfwriter_factory.py: -------------------------------------------------------------------------------- 1 | '''@file tfwriter_factory 2 | contains the tfwriter factory''' 3 | 4 | def factory(datatype): 5 | ''' 6 | Args: 7 | datatype: the type of data to be written 8 | 9 | Returns: 10 | a tfwriter class 11 | ''' 12 | 13 | if datatype == 'audio_feature': 14 | import array_writer 15 | return array_writer.ArrayWriter 16 | elif datatype == 'string' or datatype == 'string_eos': 17 | import string_writer 18 | return string_writer.StringWriter 19 | elif datatype == 'binary': 20 | import binary_writer 21 | return binary_writer.BinaryWriter 22 | elif datatype == 'alignment': 23 | import alignment_writer 24 | return alignment_writer.AlignmentWriter 25 | else: 26 | raise Exception('unknown data type: %s' % datatype) 27 | -------------------------------------------------------------------------------- /config/recipes/LAS/TIMIT/test_evaluator.cfg: -------------------------------------------------------------------------------- 1 | [evaluator] 2 | #name of the evaluator that should be used 3 | evaluator = decoder_evaluator 4 | #the number of utterances that are processed simultaniously 5 | batch_size = 32 6 | #link the input names defined in the classifier config to sections defined in 7 | #the database config 8 | features = testfbank 9 | #a space seperated list of target names used by the evaluator 10 | targets = text 11 | #a mapping between the target names and database sections 12 | text = testtext 13 | 14 | [decoder] 15 | #name of the decoder that should be used 16 | decoder = beam_search_decoder 17 | #the maximum number of output steps 18 | max_steps = 100 19 | #the beam width 20 | beam_width = 16 21 | #the alphabet used by the decoder 22 | alphabet = sil aa ae ah aw ay b ch d dh dx eh er ey f g hh ih iy jh k l m n ng ow oy p r s sh t th uh uw v w y z 23 | -------------------------------------------------------------------------------- /nabu/processing/tfwriters/string_writer.py: -------------------------------------------------------------------------------- 1 | '''@file string_writer.py 2 | contains the StringWriter class''' 3 | 4 | import tensorflow as tf 5 | import tfwriter 6 | 7 | class StringWriter(tfwriter.TfWriter): 8 | '''a TfWriter to write strings''' 9 | 10 | def _get_example(self, data): 11 | '''write data to a file 12 | 13 | Args: 14 | data: the data to be written 15 | ''' 16 | 17 | length_feature = tf.train.Feature(int64_list=tf.train.Int64List( 18 | value=[len(data)])) 19 | data_feature = tf.train.Feature(bytes_list=tf.train.BytesList( 20 | value=[data])) 21 | 22 | #create the example proto 23 | example = tf.train.Example(features=tf.train.Features(feature={ 24 | 'length': length_feature, 25 | 'data': data_feature})) 26 | 27 | return example 28 | -------------------------------------------------------------------------------- /docs/search/files_0.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['alignment_5fdecoder_2epy',['alignment_decoder.py',['../alignment__decoder_8py.html',1,'']]], 4 | ['alignment_5fprocessor_2epy',['alignment_processor.py',['../alignment__processor_8py.html',1,'']]], 5 | ['alignment_5freader_2epy',['alignment_reader.py',['../alignment__reader_8py.html',1,'']]], 6 | ['alignment_5fwriter_2epy',['alignment_writer.py',['../alignment__writer_8py.html',1,'']]], 7 | ['array_5fwriter_2epy',['array_writer.py',['../array__writer_8py.html',1,'']]], 8 | ['attention_2epy',['attention.py',['../attention_8py.html',1,'']]], 9 | ['audio_5ffeature_5freader_2epy',['audio_feature_reader.py',['../audio__feature__reader_8py.html',1,'']]], 10 | ['audio_5fprocessor_2epy',['audio_processor.py',['../audio__processor_8py.html',1,'']]], 11 | ['aurora4_2epy',['aurora4.py',['../aurora4_8py.html',1,'']]] 12 | ]; 13 | -------------------------------------------------------------------------------- /docs/search/functions_10.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['pad_5fto',['pad_to',['../ops_8py.html#a8bf6b06b5c1d6e69ad73f2101f2b478a',1,'nabu::neuralnetworks::components::ops']]], 4 | ['pblstm',['pblstm',['../layer_8py.html#ab5617673c223366a9f9e9cdc94b3a896',1,'nabu::neuralnetworks::components::layer']]], 5 | ['port_5favailable',['port_available',['../cluster_8py.html#a948195ea8e833af76f137aaef3df64aa',1,'nabu::computing::cluster']]], 6 | ['powspec',['powspec',['../sigproc_8py.html#aa723b5765e6537fdc8c85eeac3b105ef',1,'nabu::processing::processors::feature_computers::sigproc']]], 7 | ['preemphasis',['preemphasis',['../sigproc_8py.html#a0d2e2e6f8018a3cc47489bb6e7ca2853',1,'nabu::processing::processors::feature_computers::sigproc']]], 8 | ['pyramid_5fstack',['pyramid_stack',['../ops_8py.html#aa0526046773823364ea28b5ec2034677',1,'nabu::neuralnetworks::components::ops']]] 9 | ]; 10 | -------------------------------------------------------------------------------- /config/recipes/LAS/TIMIT/model.cfg: -------------------------------------------------------------------------------- 1 | [io] 2 | #a space seperated list of input names 3 | inputs = features 4 | #a space seperated list of output names 5 | outputs = text 6 | #a space seperated list of model output dimensions (exluding eos) 7 | output_dims = 39 8 | 9 | [encoder] 10 | #type of encoder 11 | encoder = listener 12 | #the standard deviation of the Gaussian input noise added during training 13 | input_noise = 0.6 14 | #number of pyramidal layers a non-pyramidal layer is added at the end 15 | num_layers = 2 16 | #number of units in each layer 17 | num_units = 128 18 | #number of timesteps to concatenate in each pyramidal layer 19 | pyramid_steps = 2 20 | #dropout rate 21 | dropout = 0.5 22 | 23 | [decoder] 24 | #type of decoder 25 | decoder = speller 26 | #number of layers 27 | num_layers = 2 28 | #number of units 29 | num_units = 128 30 | #the dropout rate in the rnn 31 | dropout = 0.5 32 | -------------------------------------------------------------------------------- /docs/search/all_9.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['initial_5falignments',['initial_alignments',['../classnabu_1_1neuralnetworks_1_1components_1_1attention_1_1WindowedAttention.html#a17a840d2cf3ad36cb97ae5f72ec2aae6',1,'nabu::neuralnetworks::components::attention::WindowedAttention']]], 4 | ['initialize',['initialize',['../classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoder.html#a418ec10b8af94e9930fb5c3fd5c77c6e',1,'nabu::neuralnetworks::components::beam_search_decoder::BeamSearchDecoder']]], 5 | ['input_5fpipeline',['input_pipeline',['../input__pipeline_8py.html#a9c06d205001055093670c3910e930699',1,'nabu::processing::input_pipeline']]], 6 | ['input_5fpipeline_2epy',['input_pipeline.py',['../input__pipeline_8py.html',1,'']]], 7 | ['is_5frecipe',['is_recipe',['../test__recipes_8py.html#a02f8195d576a47f4836c43db0fcbb464',1,'nabu::scripts::test_recipes']]] 8 | ]; 9 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/test_evaluator.cfg: -------------------------------------------------------------------------------- 1 | [evaluator] 2 | #name of the evaluator that should be used 3 | evaluator = decoder_evaluator 4 | #the number of utterances that are processed simultaniously 5 | batch_size = 32 6 | #link the input names defined in the classifier config to sections defined in 7 | #the database config 8 | features = POevalfbank 9 | #a space seperated list of target names used by the evaluator 10 | targets = phones 11 | #a mapping between the target names and database sections 12 | phones = POevalphones 13 | 14 | [decoder] 15 | #name of the decoder that should be used 16 | decoder = beam_search_decoder 17 | #the maximum number of output steps 18 | max_steps = 400 19 | #the beam width 20 | beam_width = 16 21 | #the alphabet used by the decoder 22 | alphabet = ix sch i~ e~ spn ax a~ nj o+ rr o~ lj o~+ tj u+ sil dj e+ a+ u~+ i+ u~ w~ e~+ ux a b e d g f i a~+ k i~+ m l o n p s r u t w v z 23 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_encoders/ed_encoder_factory.py: -------------------------------------------------------------------------------- 1 | '''@file ed_encoder_factory.py 2 | contains the EDEncoder factory''' 3 | 4 | def factory(encoder): 5 | '''get an EDEncoder class 6 | 7 | Args: 8 | encoder: the encoder type 9 | 10 | Returns: 11 | an EDEncoder class''' 12 | 13 | if encoder == 'listener': 14 | import listener 15 | return listener.Listener 16 | elif encoder == 'dummy_encoder': 17 | import dummy_encoder 18 | return dummy_encoder.DummyEncoder 19 | elif encoder == 'dblstm': 20 | import dblstm 21 | return dblstm.DBLSTM 22 | elif encoder == 'dnn': 23 | import dnn 24 | return dnn.DNN 25 | elif encoder == 'hotstart_encoder': 26 | import hotstart_encoder 27 | return hotstart_encoder.HotstartEncoder 28 | else: 29 | raise Exception('undefined encoder type: %s' % encoder) 30 | -------------------------------------------------------------------------------- /docs/search/all_e.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['nabu',['Nabu',['../index.html',1,'']]], 4 | ['nabu',['Nabu',['../md_README.html',1,'']]], 5 | ['nabu',['nabu',['../namespacenabu.html',1,'']]], 6 | ['neuralnetworks',['neuralnetworks',['../namespaceneuralnetworks.html',1,'']]], 7 | ['normalize',['normalize',['../aurora4_8py.html#ada3976e17dcef9df450e46cb0cf0cf5e',1,'nabu.processing.target_normalizers.aurora4.normalize()'],['../character_8py.html#a67fa017056f714ca3b68d50485aedcae',1,'nabu.processing.target_normalizers.character.normalize()'],['../phones_8py.html#a1d708b602a321e8b76ee1a16f6fabb77',1,'nabu.processing.target_normalizers.phones.normalize()']]], 8 | ['normalized_5fsigmoid',['normalized_sigmoid',['../attention_8py.html#aad678aa8780f621d8cb9b94eabd235bb',1,'nabu::neuralnetworks::components::attention']]], 9 | ['normalizer_5ffactory_2epy',['normalizer_factory.py',['../normalizer__factory_8py.html',1,'']]] 10 | ]; 11 | -------------------------------------------------------------------------------- /nabu/processing/tfwriters/array_writer.py: -------------------------------------------------------------------------------- 1 | '''@file array_writer.py 2 | contains the ArrayWriter class''' 3 | 4 | import numpy as np 5 | import tensorflow as tf 6 | import tfwriter 7 | 8 | class ArrayWriter(tfwriter.TfWriter): 9 | '''a TfWriter to write numpy arrays''' 10 | 11 | def _get_example(self, data): 12 | '''write data to a file 13 | 14 | Args: 15 | data: the data to be written''' 16 | 17 | shape_feature = tf.train.Feature(bytes_list=tf.train.BytesList( 18 | value=[np.array(data.astype(np.int32).shape).tostring()])) 19 | data_feature = tf.train.Feature(bytes_list=tf.train.BytesList( 20 | value=[data.reshape([-1]).astype(np.float32).tostring()])) 21 | 22 | #create the example proto 23 | example = tf.train.Example(features=tf.train.Features(feature={ 24 | 'shape': shape_feature, 25 | 'data': data_feature})) 26 | 27 | return example 28 | -------------------------------------------------------------------------------- /config/recipes/LAS/TIMIT/validation_evaluator.cfg: -------------------------------------------------------------------------------- 1 | [evaluator] 2 | #name of the evaluator that should be used 3 | evaluator = decoder_evaluator 4 | #the number of utterances that are processed simultaniously 5 | batch_size = 32 6 | #link the input names defined in the classifier config to sections defined in 7 | #the database config 8 | features = devfbank 9 | #a space seperated list of target names used by the evaluator 10 | targets = text 11 | #a mapping between the target names and database sections 12 | text = devtext 13 | 14 | [decoder] 15 | #name of the decoder that should be used 16 | decoder = beam_search_decoder 17 | #the maximum number of output steps 18 | max_steps = 100 19 | #the beam width 20 | beam_width = 16 21 | #if you want to visualize the alignments set to True 22 | visualize_alignments = True 23 | #the alphabet used by the decoder 24 | alphabet = sil aa ae ah aw ay b ch d dh dx eh er ey f g hh ih iy jh k l m n ng ow oy p r s sh t th uh uw v w y z 25 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/validation_evaluator.cfg: -------------------------------------------------------------------------------- 1 | [evaluator] 2 | #name of the evaluator that should be used 3 | evaluator = decoder_evaluator 4 | #the loss to be evaluated 5 | loss = average_cross_entropy 6 | #the number of utterances that are processed simultaniously 7 | batch_size = 16 8 | #link the input names defined in the classifier config to sections defined in 9 | #the database config 10 | features = POdevfbank 11 | #a space seperated list of target names used by the evaluator 12 | targets = phones 13 | #a mapping between the target names and database sections 14 | phones = POdevphones 15 | 16 | [decoder] 17 | #name of the decoder that should be used 18 | decoder = beam_search_decoder 19 | #the maximum number of output steps 20 | max_steps = 400 21 | #the beam width 22 | beam_width = 16 23 | #the alphabet used by the decoder 24 | alphabet = ix sch i~ e~ spn ax a~ nj o+ rr o~ lj o~+ tj u+ sil dj e+ a+ u~+ i+ u~ w~ e~+ ux a b e d g f i a~+ k i~+ m l o n p s r u t w v z 25 | -------------------------------------------------------------------------------- /nabu/computing/static/run_remote.py: -------------------------------------------------------------------------------- 1 | '''@file run_remote.py 2 | contains the run_remote function''' 3 | 4 | import os 5 | import subprocess 6 | import socket 7 | 8 | def run_remote(host, command): 9 | '''run a command remotely over ssh 10 | 11 | Args: 12 | host: the host where the command should be ran 13 | command: the command that should be executed 14 | out: the file where the output should be written 15 | 16 | Returns: 17 | a popen process''' 18 | 19 | if (host == socket.gethostbyname(socket.gethostname()) 20 | or host == '127.0.0.1'): 21 | pid = subprocess.Popen(command.split(' ')) 22 | else: 23 | print 'running remote' 24 | 25 | pid = subprocess.Popen(['ssh', '-o', 'StrictHostKeyChecking=no', '-o', 26 | 'UserKnownHostsFile=/dev/null', 27 | host, 'cd %s && %s' % (os.getcwd(), command)]) 28 | 29 | return pid 30 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/components/constraints.py: -------------------------------------------------------------------------------- 1 | '''@file constraints.py 2 | contains constraints for the model weights''' 3 | 4 | import tensorflow as tf 5 | 6 | class MaxNorm(object): 7 | '''a constraint for a maximum norm of the weights''' 8 | 9 | def __init__(self, max_norm=1, axis=0): 10 | ''' 11 | constructor 12 | 13 | args: 14 | max_norm: the maximum norm 15 | axis: the axis to compute the norm 16 | ''' 17 | 18 | self._axis = axis 19 | self._max_norm = max_norm 20 | 21 | def __call__(self, tensor): 22 | '''apply the constraint 23 | 24 | args: 25 | tensor: the tensor to apply the constraint to''' 26 | 27 | with tf.name_scope('MaxNorm'): 28 | norms = tf.norm(tensor, axis=self._axis) 29 | clipped = tf.clip_by_value(norms, 0, 1) 30 | out = tensor*clipped/(norms + tensor.dtype.min) 31 | 32 | return out 33 | -------------------------------------------------------------------------------- /docs/search/functions_d.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['magspec',['magspec',['../sigproc_8py.html#a4fba71fc47e9cb1dc0f5427d6a7612af',1,'nabu::processing::processors::feature_computers::sigproc']]], 4 | ['main',['main',['../build__cluster_8py.html#a07b14a8388466ab52f9fdedbe6cc0839',1,'nabu.computing.condor.build_cluster.main()'],['../create__resweep_8py.html#a8db5a7043d626a7ebf4c1210f8de62c0',1,'nabu.scripts.create_resweep.main()'],['../data_8py.html#a904c316f3816c19a57e51d996b3c1b56',1,'nabu.scripts.data.main()'],['../test__recipe_8py.html#ab73d1403f3d2969ec57d8395c71e4c15',1,'nabu.scripts.test_recipe.main()'],['../test__recipes_8py.html#a6a3d7a6587e8e6b0c4969ac5302c0779',1,'nabu.scripts.test_recipes.main()']]], 5 | ['map_5fta',['map_ta',['../ops_8py.html#a71d8d0f12f6d57670eecfa88b2f8b41e',1,'nabu::neuralnetworks::components::ops']]], 6 | ['marigin_5floss',['marigin_loss',['../loss__functions_8py.html#a2024ac6dd991d2a8d7083b5a60c4f576',1,'nabu::neuralnetworks::trainers::loss_functions']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /config/recipes/DNN/WSJ/model.cfg: -------------------------------------------------------------------------------- 1 | [io] 2 | #a space seperated list of input names 3 | inputs = features 4 | #a space seperated list of output names 5 | outputs = alignments 6 | #a space seperated list of model output dimensions (equal to #HMM states) 7 | output_dims = 3100 8 | 9 | [encoder] 10 | #type of encoder 11 | encoder = dnn 12 | #number of neurons in the hidden layers 13 | num_units = 2048 14 | #number of hidden layers 15 | num_layers = 5 16 | #input noise standart deviation 17 | input_noise = 0 18 | #dropout rate 19 | dropout = 1 20 | #number of left and right context windows to take into account 21 | context = 5 22 | #wheter layer normalization should be applied 23 | layer_norm = True 24 | 25 | [decoder] 26 | #type of decoder 27 | decoder = dnn_decoder 28 | #the number of layers in each detector 29 | num_layers = 0 30 | #the number of units in each detector 31 | num_units = 2024 32 | #wheter layer normalization should be applied 33 | layer_norm = True 34 | #dropout rate 35 | dropout = 1 36 | -------------------------------------------------------------------------------- /config/recipes/DBLSTM/TIMIT/trainer.cfg: -------------------------------------------------------------------------------- 1 | [trainer] 2 | #name of the trainer that should be used 3 | trainer = standard 4 | #the loss function to be minimized 5 | loss = CTC 6 | #the amount of training labels that need to be added to the output 7 | trainlabels = 1 8 | #link the input names defined in the classifier config to sections defined in 9 | #the database config 10 | features = trainfbank 11 | #a space seperated list of target names used by the trainer 12 | targets = text 13 | #a mapping between the target names and database sections 14 | text = traintext 15 | #number of passes over the entire database 16 | num_epochs = 30 17 | #exponential weight decay parameter 18 | learning_rate_decay = 1 19 | #size of the minibatch (#utterances) 20 | batch_size = 8 21 | 22 | ###VALIDATION PART### 23 | #frequency of evaluating the validation set. 24 | valid_frequency = 500 25 | #the number of times validation performance can be worse before terminating 26 | #training, set to None to disable early stopping 27 | num_tries = 5 28 | -------------------------------------------------------------------------------- /nabu/processing/tfreaders/tfreader_factory.py: -------------------------------------------------------------------------------- 1 | '''@file tfreader_factory.py 2 | contains the tfreader factory''' 3 | 4 | def factory(datatype): 5 | '''factory for tfreaders 6 | 7 | Args: 8 | dataype: the type of data to be read 9 | 10 | Returns: 11 | a tfreader class 12 | ''' 13 | 14 | if datatype == 'audio_feature': 15 | import audio_feature_reader 16 | return audio_feature_reader.AudioFeatureReader 17 | elif datatype == 'string': 18 | import string_reader 19 | return string_reader.StringReader 20 | elif datatype == 'string_eos': 21 | import string_reader_eos 22 | return string_reader_eos.StringReaderEOS 23 | elif datatype == 'binary': 24 | import binary_reader 25 | return binary_reader.BinaryReader 26 | elif datatype == 'alignment': 27 | import alignment_reader 28 | return alignment_reader.AlignmentReader 29 | else: 30 | raise Exception('unknown data type: %s' % datatype) 31 | -------------------------------------------------------------------------------- /nabu/computing/condor/dataprep.job: -------------------------------------------------------------------------------- 1 | # Normal execution 2 | Universe = vanilla 3 | # number of CPUs is equal to the number of parameter servers and workers 4 | requestCpus = 1 5 | # need enough RAM to store the parameters 6 | RequestMemory = 4G 7 | #request a week of wall time 8 | +RequestWalltime = 604800 9 | 10 | 11 | #Send an email in case of an error 12 | Notification = Error 13 | 14 | initialdir = . 15 | 16 | #Run he build cluster script for the parameter server 17 | Arguments = "python -um nabu.scripts.data --expdir=$(expdir)" 18 | # This is the executable or script I want to run 19 | executable = nabu/computing/condor/create_environment.sh 20 | 21 | #Output of condors handling of the jobs, will be in 'initialdir' 22 | Log = $(expdir)/outputs/data.log 23 | #Standard output of the 'executable', in 'initialdir' 24 | Output = $(expdir)/outputs/data.out 25 | #Standard error of the 'executable', in 'initialdir' 26 | Error = $(expdir)/outputs/data.err 27 | 28 | # Queue the job 29 | Queue 30 | -------------------------------------------------------------------------------- /config/recipes/LAS/TIMIT/trainer.cfg: -------------------------------------------------------------------------------- 1 | [trainer] 2 | #name of the trainer that should be used 3 | trainer = standard 4 | #the loss function to be minimized 5 | loss = average_cross_entropy 6 | #the amount of training labels that need to be added to the output 7 | trainlabels = 1 8 | #link the input names defined in the classifier config to sections defined in 9 | #the database config 10 | features = trainfbank 11 | #a space seperated list of target names used by the trainer 12 | targets = text 13 | #a mapping between the target names and database sections 14 | text = traintext 15 | #number of passes over the entire database 16 | num_epochs = 300 17 | #exponential weight decay parameter 18 | learning_rate_decay = 0.1 19 | #size of the minibatch (#utterances) 20 | batch_size = 128 21 | 22 | ###VALIDATION PART### 23 | #frequency of evaluating the validation set. 24 | valid_frequency = 500 25 | #the number of times validation performance can be worse before terminating training, set to None to disable early stopping 26 | num_tries = 2 27 | -------------------------------------------------------------------------------- /docs/search/files_10.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['test_2epy',['test.py',['../test_8py.html',1,'']]], 4 | ['test_5frecipe_2epy',['test_recipe.py',['../test__recipe_8py.html',1,'']]], 5 | ['test_5frecipes_2epy',['test_recipes.py',['../test__recipes_8py.html',1,'']]], 6 | ['text_5fprocessor_2epy',['text_processor.py',['../text__processor_8py.html',1,'']]], 7 | ['textfile_5fprocessor_2epy',['textfile_processor.py',['../textfile__processor_8py.html',1,'']]], 8 | ['tfreader_2epy',['tfreader.py',['../tfreader_8py.html',1,'']]], 9 | ['tfreader_5ffactory_2epy',['tfreader_factory.py',['../tfreader__factory_8py.html',1,'']]], 10 | ['tfwriter_2epy',['tfwriter.py',['../tfwriter_8py.html',1,'']]], 11 | ['threshold_5fdecoder_2epy',['threshold_decoder.py',['../threshold__decoder_8py.html',1,'']]], 12 | ['train_2epy',['train.py',['../train_8py.html',1,'']]], 13 | ['trainer_2epy',['trainer.py',['../trainer_8py.html',1,'']]], 14 | ['trainer_5ffactory_2epy',['trainer_factory.py',['../trainer__factory_8py.html',1,'']]] 15 | ]; 16 | -------------------------------------------------------------------------------- /config/recipes/DNN/WSJ/trainer.cfg: -------------------------------------------------------------------------------- 1 | [trainer] 2 | #name of the trainer that should be used 3 | trainer = standard 4 | #the loss function to be minimized 5 | loss = average_cross_entropy 6 | #the amount of training labels that need to be added to the output 7 | trainlabels = 0 8 | #link the input names defined in the classifier config to sections defined in 9 | #the database config 10 | features = trainfbank 11 | #a space seperated list of target names used by the trainer 12 | targets = alignments 13 | #a mapping between the target names and database sections 14 | alignments = trainalignments 15 | #number of passes over the entire database 16 | num_epochs = 10 17 | #exponential weight decay parameter 18 | learning_rate_decay = 0.1 19 | #size of the minibatch (#utterances) 20 | batch_size = 32 21 | 22 | ###VALIDATION PART### 23 | #frequency of evaluating the validation set. 24 | valid_frequency = 1000 25 | #the number of times validation performance can be worse before terminating training, set to None to disable early stopping 26 | num_tries = 10 27 | -------------------------------------------------------------------------------- /docs/search/classes_c.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['textfileprocessor',['TextFileProcessor',['../classnabu_1_1processing_1_1processors_1_1textfile__processor_1_1TextFileProcessor.html',1,'nabu::processing::processors::textfile_processor']]], 4 | ['textprocessor',['TextProcessor',['../classnabu_1_1processing_1_1processors_1_1text__processor_1_1TextProcessor.html',1,'nabu::processing::processors::text_processor']]], 5 | ['tfreader',['TfReader',['../classnabu_1_1processing_1_1tfreaders_1_1tfreader_1_1TfReader.html',1,'nabu::processing::tfreaders::tfreader']]], 6 | ['tfwriter',['TfWriter',['../classnabu_1_1processing_1_1tfwriters_1_1tfwriter_1_1TfWriter.html',1,'nabu::processing::tfwriters::tfwriter']]], 7 | ['thresholddecoder',['ThresholdDecoder',['../classnabu_1_1neuralnetworks_1_1decoders_1_1threshold__decoder_1_1ThresholdDecoder.html',1,'nabu::neuralnetworks::decoders::threshold_decoder']]], 8 | ['trainer',['Trainer',['../classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1Trainer.html',1,'nabu::neuralnetworks::trainers::trainer']]] 9 | ]; 10 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/trainers/standard_trainer.py: -------------------------------------------------------------------------------- 1 | '''@file standard_trainer.py 2 | contains the StandardTrainer''' 3 | 4 | from nabu.neuralnetworks.trainers import trainer 5 | 6 | class StandardTrainer(trainer.Trainer): 7 | '''a trainer with no added functionality''' 8 | 9 | def aditional_loss(self): 10 | ''' 11 | add an aditional loss 12 | 13 | returns: 14 | the aditional loss or None 15 | ''' 16 | 17 | return None 18 | 19 | def chief_only_hooks(self, outputs): 20 | '''add hooks only for the chief worker 21 | 22 | Args: 23 | outputs: the outputs generated by the create graph method 24 | 25 | Returns: 26 | a list of hooks 27 | ''' 28 | 29 | return [] 30 | 31 | def hooks(self, outputs): 32 | '''add hooks for the session 33 | 34 | Args: 35 | outputs: the outputs generated by the create graph method 36 | 37 | Returns: 38 | a list of hooks 39 | ''' 40 | 41 | return [] 42 | -------------------------------------------------------------------------------- /config/recipes/LAS/GP/model.cfg: -------------------------------------------------------------------------------- 1 | [io] 2 | #a space seperated list of input names 3 | inputs = features 4 | #a space seperated list of output names 5 | outputs = phones 6 | #a space seperated list of model output dimensions (exluding eos) 7 | #depends on the language used for training 8 | output_dims = 47 9 | 10 | [encoder] 11 | #type of encoder 12 | encoder = listener 13 | #the standard deviation of the Gaussian input noise added during training 14 | input_noise = 0 15 | #number of pyramidal layers a non-pyramidal layer is added at the end 16 | num_layers = 2 17 | #number of units in each layer 18 | num_units = 128 19 | #dropout rate 20 | dropout = 0.5 21 | 22 | [decoder] 23 | #type of decoder 24 | decoder = speller 25 | #number of layers 26 | num_layers = 2 27 | #number of units 28 | num_units = 128 29 | #the attention mechanism that should be used, one of vanilla, location_aware 30 | #monotonic or windowed 31 | attention = windowed 32 | #number of timesteps to the left to consider 33 | left_window_width = 10 34 | #number of timesteps to the right 35 | right_window_width = 15 36 | -------------------------------------------------------------------------------- /nabu/processing/processors/processor_factory.py: -------------------------------------------------------------------------------- 1 | '''@file processor_factory.py 2 | contains the Processor factory method''' 3 | 4 | def factory(processor): 5 | '''gets a Processor class 6 | 7 | Args: 8 | processor: the processor type 9 | 10 | Returns: 11 | a Processor class''' 12 | 13 | if processor == 'audio_processor': 14 | import audio_processor 15 | return audio_processor.AudioProcessor 16 | elif processor == 'text_processor': 17 | import text_processor 18 | return text_processor.TextProcessor 19 | elif processor == 'binary_processor': 20 | import binary_processor 21 | return binary_processor.BinaryProcessor 22 | elif processor == 'alignment_processor': 23 | import alignment_processor 24 | return alignment_processor.AlignmentProcessor 25 | elif processor == 'textfile_processor': 26 | import textfile_processor 27 | return textfile_processor.TextFileProcessor 28 | else: 29 | raise Exception('unknown processor type: %s' % processor) 30 | -------------------------------------------------------------------------------- /nabu/processing/target_normalizers/gp.py: -------------------------------------------------------------------------------- 1 | '''@file aurora4.py 2 | contains the global phoneset target normalizer''' 3 | 4 | import unicodedata 5 | 6 | def normalize(transcription, alphabet): 7 | '''normalize for the Global Phoneset database 8 | 9 | Args: 10 | transcription: the transcription to be normalized as a string 11 | 12 | Returns: 13 | the normalized transcription as a string space seperated per 14 | character''' 15 | 16 | #remove accents 17 | normalized = unicodedata.normalize( 18 | form='NFKD', 19 | unistr=transcription.decode('utf-8')).encode('ASCII', 'ignore') 20 | 21 | normalized = list(normalized.lower()) 22 | 23 | 24 | #replace the spaces with 25 | normalized = [character if character is not ' ' else '' 26 | for character in normalized] 27 | 28 | #replace unknown characters with 29 | normalized = [character if character in alphabet else '' 30 | for character in normalized] 31 | 32 | return ' '.join(normalized) 33 | -------------------------------------------------------------------------------- /docs/search/all_7.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['get_5fdim',['get_dim',['../classnabu_1_1processing_1_1processors_1_1feature__computers_1_1fbank_1_1Fbank.html#a4cce577b36164fb38202c93c08ca01fc',1,'nabu.processing.processors.feature_computers.fbank.Fbank.get_dim()'],['../classnabu_1_1processing_1_1processors_1_1feature__computers_1_1feature__computer_1_1FeatureComputer.html#a47db7de9e87afbf4c1d3a7d0c3746ced',1,'nabu.processing.processors.feature_computers.feature_computer.FeatureComputer.get_dim()'],['../classnabu_1_1processing_1_1processors_1_1feature__computers_1_1mfcc_1_1Mfcc.html#a62c55172265a864ffd98f3cc49b4459d',1,'nabu.processing.processors.feature_computers.mfcc.Mfcc.get_dim()']]], 4 | ['get_5ffilenames',['get_filenames',['../input__pipeline_8py.html#ad9c5835552b79cc7e7a38853a753a862',1,'nabu::processing::input_pipeline']]], 5 | ['get_5findices',['get_indices',['../ops_8py.html#a259e1c1f8aa0f0fb28d06e562cf4c07e',1,'nabu::neuralnetworks::components::ops']]], 6 | ['get_5fmachines',['get_machines',['../cluster_8py.html#a8b907542543e98ab62f5e2216de101bb',1,'nabu::computing::cluster']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /nabu/neuralnetworks/models/ed_encoders/dummy_encoder.py: -------------------------------------------------------------------------------- 1 | '''@dummy_encoder.py 2 | contains the DummyEncoder class''' 3 | 4 | import ed_encoder 5 | 6 | class DummyEncoder(ed_encoder.EDEncoder): 7 | '''an encoder that does nothing''' 8 | 9 | def encode(self, inputs, input_seq_length, is_training=False): 10 | ''' 11 | Create the variables and do the forward computation 12 | 13 | Args: 14 | inputs: the inputs to the neural network, this is a dictionary of 15 | [batch_size x time x ...] tensors 16 | input_seq_length: The sequence lengths of the input utterances, this 17 | is a dictionary of [batch_size] vectors 18 | is_training: whether or not the network is in training mode 19 | 20 | Returns: 21 | - the outputs of the encoder as a dictionary of 22 | [bath_size x time x ...] tensors 23 | - the sequence lengths of the outputs as a dictionary of 24 | [batch_size] tensors 25 | ''' 26 | 27 | return inputs, input_seq_length 28 | -------------------------------------------------------------------------------- /docs/search/classes_3.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['dblstm',['DBLSTM',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dblstm_1_1DBLSTM.html',1,'nabu::neuralnetworks::models::ed_encoders::dblstm']]], 4 | ['decoder',['Decoder',['../classnabu_1_1neuralnetworks_1_1decoders_1_1decoder_1_1Decoder.html',1,'nabu::neuralnetworks::decoders::decoder']]], 5 | ['decoderevaluator',['DecoderEvaluator',['../classnabu_1_1neuralnetworks_1_1evaluators_1_1decoder__evaluator_1_1DecoderEvaluator.html',1,'nabu::neuralnetworks::evaluators::decoder_evaluator']]], 6 | ['dnn',['DNN',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dnn_1_1DNN.html',1,'nabu::neuralnetworks::models::ed_encoders::dnn']]], 7 | ['dnndecoder',['DNNDecoder',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1dnn__decoder_1_1DNNDecoder.html',1,'nabu::neuralnetworks::models::ed_decoders::dnn_decoder']]], 8 | ['dummyencoder',['DummyEncoder',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1dummy__encoder_1_1DummyEncoder.html',1,'nabu::neuralnetworks::models::ed_encoders::dummy_encoder']]] 9 | ]; 10 | -------------------------------------------------------------------------------- /docs/search/functions_7.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['get_5fdim',['get_dim',['../classnabu_1_1processing_1_1processors_1_1feature__computers_1_1fbank_1_1Fbank.html#a4cce577b36164fb38202c93c08ca01fc',1,'nabu.processing.processors.feature_computers.fbank.Fbank.get_dim()'],['../classnabu_1_1processing_1_1processors_1_1feature__computers_1_1feature__computer_1_1FeatureComputer.html#a47db7de9e87afbf4c1d3a7d0c3746ced',1,'nabu.processing.processors.feature_computers.feature_computer.FeatureComputer.get_dim()'],['../classnabu_1_1processing_1_1processors_1_1feature__computers_1_1mfcc_1_1Mfcc.html#a62c55172265a864ffd98f3cc49b4459d',1,'nabu.processing.processors.feature_computers.mfcc.Mfcc.get_dim()']]], 4 | ['get_5ffilenames',['get_filenames',['../input__pipeline_8py.html#ad9c5835552b79cc7e7a38853a753a862',1,'nabu::processing::input_pipeline']]], 5 | ['get_5findices',['get_indices',['../ops_8py.html#a259e1c1f8aa0f0fb28d06e562cf4c07e',1,'nabu::neuralnetworks::components::ops']]], 6 | ['get_5fmachines',['get_machines',['../cluster_8py.html#a8b907542543e98ab62f5e2216de101bb',1,'nabu::computing::cluster']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /docs/search/all_8.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['hooks',['hooks',['../classnabu_1_1neuralnetworks_1_1trainers_1_1standard__trainer_1_1StandardTrainer.html#aff614cac68779a4ba17f933b81e9254d',1,'nabu.neuralnetworks.trainers.standard_trainer.StandardTrainer.hooks()'],['../classnabu_1_1neuralnetworks_1_1trainers_1_1trainer_1_1Trainer.html#a4d7438ce4a5f233e92c33fb851a1e346',1,'nabu.neuralnetworks.trainers.trainer.Trainer.hooks()']]], 4 | ['hooks_2epy',['hooks.py',['../hooks_8py.html',1,'']]], 5 | ['hotstart_5fdecoder_2epy',['hotstart_decoder.py',['../hotstart__decoder_8py.html',1,'']]], 6 | ['hotstart_5fencoder_2epy',['hotstart_encoder.py',['../hotstart__encoder_8py.html',1,'']]], 7 | ['hotstartdecoder',['HotstartDecoder',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__decoders_1_1hotstart__decoder_1_1HotstartDecoder.html',1,'nabu::neuralnetworks::models::ed_decoders::hotstart_decoder']]], 8 | ['hotstartencoder',['HotstartEncoder',['../classnabu_1_1neuralnetworks_1_1models_1_1ed__encoders_1_1hotstart__encoder_1_1HotstartEncoder.html',1,'nabu::neuralnetworks::models::ed_encoders::hotstart_encoder']]] 9 | ]; 10 | -------------------------------------------------------------------------------- /docs/search/functions_2.js: -------------------------------------------------------------------------------- 1 | var searchData= 2 | [ 3 | ['batch_5fsize',['batch_size',['../classnabu_1_1neuralnetworks_1_1components_1_1beam__search__decoder_1_1BeamSearchDecoder.html#ae57e76ea61bd677be46903445a628905',1,'nabu::neuralnetworks::components::beam_search_decoder::BeamSearchDecoder']]], 4 | ['begin',['begin',['../classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1LoadAtBegin.html#ada363caea329451d5c9fc7f57a75c0d5',1,'nabu.neuralnetworks.components.hooks.LoadAtBegin.begin()'],['../classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1SaveAtEnd.html#ad1b2579b5860540f253a2350f9343acc',1,'nabu.neuralnetworks.components.hooks.SaveAtEnd.begin()'],['../classnabu_1_1neuralnetworks_1_1components_1_1hooks_1_1ValidationSaveHook.html#a16d145bb51ff685d7cca4eedad4a2a7a',1,'nabu.neuralnetworks.components.hooks.ValidationSaveHook.begin()']]], 5 | ['blstm',['blstm',['../layer_8py.html#a84b6b1a36ee6ee5cb770012e5aa8e1e2',1,'nabu::neuralnetworks::components::layer']]], 6 | ['bucket_5fboundaries',['bucket_boundaries',['../input__pipeline_8py.html#a03ae35500d261c7e05f274e85ccec0ef',1,'nabu::processing::input_pipeline']]] 7 | ]; 8 | -------------------------------------------------------------------------------- /nabu/processing/target_normalizers/character.py: -------------------------------------------------------------------------------- 1 | '''@file character.py 2 | contains the character target normalizer''' 3 | 4 | def normalize(transcription, alphabet): 5 | '''normalize a transcription 6 | 7 | Args: 8 | transcription: the transcription to be normalized as a string 9 | 10 | Returns: 11 | the normalized transcription as a string space seperated per 12 | character''' 13 | 14 | #make the transcription lower case and put it into a list 15 | normalized = list(transcription.lower()) 16 | 17 | #replace the spaces with 18 | normalized = [character if character != ' ' else '' 19 | for character in normalized] 20 | 21 | #replace the end of line with 22 | #replace the spaces with 23 | normalized = [character if character != '\n' else '' 24 | for character in normalized] 25 | 26 | #replace unknown characters with 27 | normalized = [character if character in alphabet else '' 28 | for character in normalized] 29 | 30 | return ' '.join(normalized) 31 | -------------------------------------------------------------------------------- /config/recipes/DNN/WSJ/database.cfg: -------------------------------------------------------------------------------- 1 | [trainfbank] 2 | type = audio_feature 3 | datafiles = /path/to/wav.scp 4 | dir = /esat/spchtemp/scratch/vrenkens/Nabu/data/wsj/train/features 5 | processor_config = config/recipes/DNN/WSJ/feature_processor.cfg 6 | 7 | [test92fbank] 8 | type = audio_feature 9 | datafiles = /path/to/wav.scp 10 | dir = /path/to/features 11 | processor_config = config/recipes/DNN/WSJ/feature_processor.cfg 12 | 13 | [test93fbank] 14 | type = audio_feature 15 | datafiles = /path/to/wav.scp 16 | dir = /path/to/features 17 | processor_config = config/recipes/DNN/WSJ/feature_processor.cfg 18 | 19 | [devfbank] 20 | type = audio_feature 21 | datafiles = /path/to/wav.scp 22 | dir = /path/to/features 23 | processor_config = config/recipes/DNN/WSJ/feature_processor.cfg 24 | 25 | [trainalignments] 26 | type = alignment 27 | datafiles = /path/to/pdfs 28 | dir = /path/to/alignments 29 | processor_config = config/recipes/DNN/WSJ/alignment_processor.cfg 30 | 31 | [devalignments] 32 | type = alignment 33 | datafiles = /path/to/pdfs 34 | dir = /path/to/alignments 35 | processor_config = config/recipes/DNN/WSJ/alignment_processor.cfg 36 | -------------------------------------------------------------------------------- /docs/search/all_0.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_1.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_10.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_11.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_12.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_13.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_14.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_15.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_16.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_17.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_2.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_3.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_4.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_5.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_6.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_7.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_8.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_9.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_a.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_b.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_c.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_d.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_e.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/all_f.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_0.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_1.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_10.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_2.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_3.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_4.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_5.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_6.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_7.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_8.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_9.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_a.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_b.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_c.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_d.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_e.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/files_f.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/pages_0.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/pages_1.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/pages_2.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/pages_3.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/pages_4.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/pages_5.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/pages_6.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/pages_7.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/pages_8.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/pages_9.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_0.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_1.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_2.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_3.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_4.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_5.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_6.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_7.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_8.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_9.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_a.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_b.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_c.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_d.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/search/classes_e.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 |
Loading...
12 |
13 | 16 |
Searching...
17 |
No Matches
18 | 24 |
25 | 26 | 27 | --------------------------------------------------------------------------------