├── docs ├── sphinx │ ├── changelog.rst │ ├── api │ │ ├── enumlist.rst │ │ ├── classlist.rst │ │ ├── structlist.rst │ │ ├── typedeflist.rst │ │ ├── typedef │ │ │ ├── BufferF.rst │ │ │ └── TensorShapeList.rst │ │ ├── enum │ │ │ └── InferenceBackend.rst │ │ ├── class │ │ │ ├── classanira_1_1Buffer.rst │ │ │ ├── classanira_1_1Context.rst │ │ │ ├── classanira_1_1RingBuffer.rst │ │ │ ├── classanira_1_1BackendBase.rst │ │ │ ├── classanira_1_1MemoryBlock.rst │ │ │ ├── classanira_1_1InferenceThread.rst │ │ │ ├── classanira_1_1SessionElement.rst │ │ │ ├── classanira_1_1TFLiteProcessor.rst │ │ │ ├── classanira_1_1InferenceHandler.rst │ │ │ ├── classanira_1_1InferenceManager.rst │ │ │ ├── classanira_1_1JsonConfigLoader.rst │ │ │ ├── classanira_1_1PrePostProcessor.rst │ │ │ ├── classanira_1_1LibtorchProcessor.rst │ │ │ ├── classanira_1_1HighPriorityThread.rst │ │ │ ├── classanira_1_1OnnxRuntimeProcessor.rst │ │ │ └── classanira_1_1benchmark_1_1ProcessBlockFixture.rst │ │ ├── struct │ │ │ ├── structanira_1_1HostConfig.rst │ │ │ ├── structanira_1_1ModelData.rst │ │ │ ├── structanira_1_1TensorShape.rst │ │ │ ├── structanira_1_1ContextConfig.rst │ │ │ ├── structanira_1_1InferenceData.rst │ │ │ ├── structanira_1_1InferenceConfig.rst │ │ │ ├── structanira_1_1ProcessingSpec.rst │ │ │ ├── structanira_1_1TFLiteProcessor_1_1Instance.rst │ │ │ ├── structanira_1_1LibtorchProcessor_1_1Instance.rst │ │ │ ├── structanira_1_1OnnxRuntimeProcessor_1_1Instance.rst │ │ │ └── structanira_1_1JsonConfigLoader_1_1SingleParameterStruct.rst │ │ └── index.rst │ ├── index.rst │ ├── about.rst │ ├── conf.py.in │ ├── latency.rst │ ├── contributing.rst │ ├── examples.rst │ ├── architecture.rst │ └── troubleshooting.rst ├── img │ └── anira-logo.png └── CMakeLists.txt ├── src ├── utils │ ├── Buffer.cpp │ └── RingBuffer.cpp ├── backends │ ├── BackendBase.cpp │ ├── TFLiteProcessor.cpp │ └── OnnxRuntimeProcessor.cpp ├── system │ └── HighPriorityThread.cpp ├── scheduler │ └── InferenceThread.cpp └── PrePostProcessor.cpp ├── examples ├── benchmark │ ├── CMakeLists.txt │ ├── simple-benchmark │ │ ├── defineTestSimpleBenchmark.cpp │ │ ├── CMakeLists.txt │ │ └── defineSimpleBenchmark.cpp │ ├── advanced-benchmark │ │ ├── defineTestAdvancedBenchmark.cpp │ │ ├── ClearCustomProcessor.h │ │ └── CMakeLists.txt │ └── cnn-size-benchmark │ │ ├── defineTestCNNSizeBenchmark.cpp │ │ └── CMakeLists.txt ├── minimal-inference │ ├── CMakeLists.txt │ ├── onnxruntime │ │ └── CMakeLists.txt │ ├── tensorflow-lite │ │ └── CMakeLists.txt │ └── libtorch │ │ └── CMakeLists.txt ├── clap-audio-plugin │ ├── utils │ │ ├── DryWetMixer.h │ │ └── DryWetMixer.cpp │ ├── cmake │ │ └── anira-clap-demo.plist.in │ ├── anira-clap-demo-pluginentry.cpp │ ├── CMakeLists.txt │ └── anira-clap-demo.h ├── CMakeLists.txt └── juce-audio-plugin │ ├── install.cmake │ ├── PluginParameters.h │ └── PluginParameters.cpp ├── include └── anira │ ├── benchmark.h │ ├── utils │ ├── RealtimeSanitizer.h │ ├── Logger.h │ ├── JsonConfigLoader.h │ └── InferenceBackend.h │ ├── system │ └── AniraWinExports.h │ ├── anira.h │ ├── backends │ └── BackendBase.h │ └── ContextConfig.h ├── .github ├── actions │ ├── test │ │ └── action.yml │ ├── setup │ │ └── action.yml │ ├── build │ │ └── action.yml │ └── install │ │ └── action.yml └── workflows │ ├── build_sanitizer.yml │ ├── build_docs_and_deploy.yml │ ├── on_tag.yml │ ├── build_test.yml │ ├── build_examples.yml │ └── build_benchmark.yml ├── .gitignore ├── cmake ├── real-time-sanitizers.cmake ├── test-deps.cmake ├── benchmark-src.cmake ├── msvc-support.cmake ├── package.cmake ├── SetupTensorflowLite.cmake └── SetupOnnxRuntime.cmake ├── extras ├── models │ ├── third-party │ │ └── ircam-acids │ │ │ ├── RaveFunkDrumConfigEncoder.json.in │ │ │ ├── RaveFunkDrumConfigDecoder.json.in │ │ │ ├── RaveFunkDrumConfig.json.in │ │ │ ├── RaveFunkDrumConfigEncoder.h │ │ │ ├── RaveFunkDrumConfig.h │ │ │ └── RaveFunkDrumConfigDecoder.h │ ├── cnn │ │ ├── CNNPrePostProcessor.h │ │ ├── CNNBypassProcessor.h │ │ ├── CNNConfig.h │ │ ├── Small_CNNConfig.h │ │ └── Medium_CNNConfig.h │ ├── model-pool │ │ ├── SimpleGainConfig.json.in │ │ ├── SimpleGainConfig.h │ │ └── SimpleStereoGainConfig.h │ ├── stateful-rnn │ │ └── StatefulRNNConfig.h │ └── hybrid-nn │ │ ├── HybridNNConfig.h │ │ ├── HybridNNBypassProcessor.h │ │ └── HybridNNPrePostProcessor.h └── CMakeLists.txt ├── test ├── test_WavReader.cpp ├── CMakeLists.txt ├── WavReader.h └── utils │ ├── test_Buffer.cpp │ └── test_JsonConfigLoader.cpp ├── Config.cmake.in ├── CITATION.cff └── TODO.md /docs/sphinx/changelog.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../CHANGELOG.md 2 | :parser: myst_parser.sphinx_ 3 | -------------------------------------------------------------------------------- /docs/img/anira-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anira-project/anira/HEAD/docs/img/anira-logo.png -------------------------------------------------------------------------------- /docs/sphinx/api/enumlist.rst: -------------------------------------------------------------------------------- 1 | Enum list 2 | ========== 3 | 4 | .. toctree:: 5 | :glob: 6 | 7 | enum/* 8 | -------------------------------------------------------------------------------- /docs/sphinx/api/classlist.rst: -------------------------------------------------------------------------------- 1 | Class list 2 | ========== 3 | 4 | .. toctree:: 5 | :glob: 6 | 7 | class/* 8 | -------------------------------------------------------------------------------- /docs/sphinx/api/structlist.rst: -------------------------------------------------------------------------------- 1 | Struct list 2 | =========== 3 | 4 | .. toctree:: 5 | :glob: 6 | 7 | struct/* 8 | -------------------------------------------------------------------------------- /docs/sphinx/api/typedeflist.rst: -------------------------------------------------------------------------------- 1 | Typedef List 2 | ============ 3 | 4 | .. toctree:: 5 | :glob: 6 | 7 | typedef/* -------------------------------------------------------------------------------- /docs/sphinx/api/typedef/BufferF.rst: -------------------------------------------------------------------------------- 1 | Typedef anira::BufferF 2 | ====================== 3 | 4 | .. doxygentypedef:: anira::BufferF -------------------------------------------------------------------------------- /src/utils/Buffer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | template class anira::Buffer; 4 | template class anira::Buffer; -------------------------------------------------------------------------------- /examples/benchmark/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(advanced-benchmark) 2 | add_subdirectory(cnn-size-benchmark) 3 | add_subdirectory(simple-benchmark) -------------------------------------------------------------------------------- /docs/sphinx/api/enum/InferenceBackend.rst: -------------------------------------------------------------------------------- 1 | Enum anira::InferenceBackend 2 | ============================ 3 | 4 | .. doxygenenum:: anira::InferenceBackend 5 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1Buffer.rst: -------------------------------------------------------------------------------- 1 | Class anira::Buffer 2 | =================== 3 | 4 | .. doxygenclass:: anira::Buffer 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/typedef/TensorShapeList.rst: -------------------------------------------------------------------------------- 1 | Typedef anira::TensorShapeList 2 | ============================== 3 | 4 | .. doxygentypedef:: anira::TensorShapeList -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1Context.rst: -------------------------------------------------------------------------------- 1 | Class anira::Context 2 | ==================== 3 | 4 | .. doxygenclass:: anira::Context 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1RingBuffer.rst: -------------------------------------------------------------------------------- 1 | Class anira::RingBuffer 2 | ======================= 3 | 4 | .. doxygenclass:: anira::RingBuffer 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1BackendBase.rst: -------------------------------------------------------------------------------- 1 | Class anira::BackendBase 2 | ======================== 3 | 4 | .. doxygenclass:: anira::BackendBase 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1MemoryBlock.rst: -------------------------------------------------------------------------------- 1 | Class anira::MemoryBlock 2 | ======================== 3 | 4 | .. doxygenclass:: anira::MemoryBlock 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/struct/structanira_1_1HostConfig.rst: -------------------------------------------------------------------------------- 1 | Struct anira::HostConfig 2 | ======================== 3 | 4 | .. doxygenstruct:: anira::HostConfig 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/struct/structanira_1_1ModelData.rst: -------------------------------------------------------------------------------- 1 | Struct anira::ModelData 2 | ======================= 3 | 4 | .. doxygenstruct:: anira::ModelData 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/struct/structanira_1_1TensorShape.rst: -------------------------------------------------------------------------------- 1 | Struct anira::TensorShape 2 | ========================= 3 | 4 | .. doxygenstruct:: anira::TensorShape 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1InferenceThread.rst: -------------------------------------------------------------------------------- 1 | Class anira::InferenceThread 2 | ============================ 3 | 4 | .. doxygenclass:: anira::InferenceThread 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1SessionElement.rst: -------------------------------------------------------------------------------- 1 | Class anira::SessionElement 2 | =========================== 3 | 4 | .. doxygenclass:: anira::SessionElement 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1TFLiteProcessor.rst: -------------------------------------------------------------------------------- 1 | Class anira::TFLiteProcessor 2 | ============================ 3 | 4 | .. doxygenclass:: anira::TFLiteProcessor 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/index.rst: -------------------------------------------------------------------------------- 1 | API Documentation 2 | ================= 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | 7 | classlist 8 | structlist 9 | enumlist 10 | typedeflist -------------------------------------------------------------------------------- /docs/sphinx/api/struct/structanira_1_1ContextConfig.rst: -------------------------------------------------------------------------------- 1 | Struct anira::ContextConfig 2 | =========================== 3 | 4 | .. doxygenstruct:: anira::ContextConfig 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/struct/structanira_1_1InferenceData.rst: -------------------------------------------------------------------------------- 1 | Struct anira::InferenceData 2 | =========================== 3 | 4 | .. doxygenstruct:: anira::InferenceData 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1InferenceHandler.rst: -------------------------------------------------------------------------------- 1 | Class anira::InferenceHandler 2 | ============================= 3 | 4 | .. doxygenclass:: anira::InferenceHandler 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1InferenceManager.rst: -------------------------------------------------------------------------------- 1 | Class anira::InferenceManager 2 | ============================= 3 | 4 | .. doxygenclass:: anira::InferenceManager 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1JsonConfigLoader.rst: -------------------------------------------------------------------------------- 1 | Class anira::JsonConfigLoader 2 | ============================= 3 | 4 | .. doxygenclass:: anira::JsonConfigLoader 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1PrePostProcessor.rst: -------------------------------------------------------------------------------- 1 | Class anira::PrePostProcessor 2 | ============================= 3 | 4 | .. doxygenclass:: anira::PrePostProcessor 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/struct/structanira_1_1InferenceConfig.rst: -------------------------------------------------------------------------------- 1 | Struct anira::InferenceConfig 2 | ============================= 3 | 4 | .. doxygenstruct:: anira::InferenceConfig 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/struct/structanira_1_1ProcessingSpec.rst: -------------------------------------------------------------------------------- 1 | Struct anira::ProcessingSpec 2 | ============================ 3 | 4 | .. doxygenstruct:: anira::ProcessingSpec 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1LibtorchProcessor.rst: -------------------------------------------------------------------------------- 1 | Class anira::LibtorchProcessor 2 | ============================== 3 | 4 | .. doxygenclass:: anira::LibtorchProcessor 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /include/anira/benchmark.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_BENCHMARK_H 2 | #define ANIRA_BENCHMARK_H 3 | 4 | #include "utils/helperFunctions.h" 5 | #include "benchmark/ProcessBlockFixture.h" 6 | 7 | #endif // ANIRA_BENCHMARK_H -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1HighPriorityThread.rst: -------------------------------------------------------------------------------- 1 | Class anira::HighPriorityThread 2 | =============================== 3 | 4 | .. doxygenclass:: anira::HighPriorityThread 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1OnnxRuntimeProcessor.rst: -------------------------------------------------------------------------------- 1 | Class anira::OnnxRuntimeProcessor 2 | ================================= 3 | 4 | .. doxygenclass:: anira::OnnxRuntimeProcessor 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/struct/structanira_1_1TFLiteProcessor_1_1Instance.rst: -------------------------------------------------------------------------------- 1 | Struct anira::TFLiteProcessor::Instance 2 | ======================================= 3 | 4 | .. doxygenstruct:: anira::TFLiteProcessor::Instance 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/struct/structanira_1_1LibtorchProcessor_1_1Instance.rst: -------------------------------------------------------------------------------- 1 | Struct anira::LibtorchProcessor::Instance 2 | ========================================= 3 | 4 | .. doxygenstruct:: anira::LibtorchProcessor::Instance 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/class/classanira_1_1benchmark_1_1ProcessBlockFixture.rst: -------------------------------------------------------------------------------- 1 | Class anira::benchmark::ProcessBlockFixture 2 | =========================================== 3 | 4 | .. doxygenclass:: anira::benchmark::ProcessBlockFixture 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /docs/sphinx/api/struct/structanira_1_1OnnxRuntimeProcessor_1_1Instance.rst: -------------------------------------------------------------------------------- 1 | Struct anira::OnnxRuntimeProcessor::Instance 2 | ============================================ 3 | 4 | .. doxygenstruct:: anira::OnnxRuntimeProcessor::Instance 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /.github/actions/test/action.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | description: "Test the library" 3 | 4 | runs: 5 | using: "composite" 6 | steps: 7 | # Build the project 8 | - name: ctest 9 | shell: bash 10 | run: ctest --test-dir build --output-on-failure -------------------------------------------------------------------------------- /examples/minimal-inference/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | if(ANIRA_WITH_LIBTORCH) 2 | add_subdirectory(libtorch) 3 | endif() 4 | if(ANIRA_WITH_TFLITE) 5 | add_subdirectory(tensorflow-lite) 6 | endif() 7 | if(ANIRA_WITH_ONNXRUNTIME) 8 | add_subdirectory(onnxruntime) 9 | endif() -------------------------------------------------------------------------------- /include/anira/utils/RealtimeSanitizer.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_REALTIMESANITIZER_H 2 | #define ANIRA_REALTIMESANITIZER_H 3 | 4 | #ifdef ANIRA_WITH_RTSAN 5 | #define ANIRA_REALTIME [[clang::nonblocking]] 6 | #else 7 | #define ANIRA_REALTIME 8 | #endif 9 | 10 | #endif //ANIRA_REALTIMESANITIZER_H -------------------------------------------------------------------------------- /docs/sphinx/api/struct/structanira_1_1JsonConfigLoader_1_1SingleParameterStruct.rst: -------------------------------------------------------------------------------- 1 | Struct anira::JsonConfigLoader::SingleParameterStruct 2 | ===================================================== 3 | 4 | .. doxygenstruct:: anira::JsonConfigLoader::SingleParameterStruct 5 | :allow-dot-graphs: 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea* 2 | *.DS_Store 3 | /build/ 4 | modules/ 5 | cmake-build* 6 | .vscode 7 | logs/ 8 | results/ 9 | venv/ 10 | extras/models/stateful-rnn/stateful-lstm/ 11 | extras/models/hybrid-nn/GuitarLSTM/ 12 | extras/models/cnn/steerable-nafx/ 13 | extras/models/model-pool/example-models/ 14 | extras/models/third-party/ircam-acids/RAVE/ 15 | extras/**/*.json -------------------------------------------------------------------------------- /cmake/real-time-sanitizers.cmake: -------------------------------------------------------------------------------- 1 | #code adopted from https://github.com/jatinchowdhury18/RTNeural/blob/main/cmake/Sanitizers.cmake 2 | function(anira_rtsan_configure target) 3 | target_compile_definitions(${target} PUBLIC ANIRA_WITH_RTSAN) 4 | target_compile_options(${target} PUBLIC -fsanitize=realtime) 5 | target_link_options(${target} PUBLIC -fsanitize=realtime) 6 | endfunction() -------------------------------------------------------------------------------- /include/anira/utils/Logger.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_LOGGER_H 2 | #define ANIRA_LOGGER_H 3 | 4 | #include 5 | 6 | inline bool isLoggingEnabled() { 7 | #ifdef ENABLE_LOGGING 8 | return true; 9 | #else 10 | return false; 11 | #endif 12 | } 13 | 14 | #define LOG_INFO if (isLoggingEnabled()) (std::cout) 15 | #define LOG_ERROR if (isLoggingEnabled()) (std::cerr) 16 | 17 | #endif //ANIRA_LOGGER_H 18 | -------------------------------------------------------------------------------- /include/anira/system/AniraWinExports.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_ANIRAWINEXPORTS_H 2 | #define ANIRA_ANIRAWINEXPORTS_H 3 | 4 | #if defined(_WIN32) 5 | #ifdef ANIRA_EXPORTS 6 | #define ANIRA_API __declspec(dllexport) 7 | #pragma warning (disable: 4251) 8 | #else 9 | #define ANIRA_API __declspec(dllimport) 10 | #pragma warning (disable: 4251) 11 | #endif 12 | #else 13 | #define ANIRA_API 14 | #endif 15 | 16 | #endif // ANIRA_ANIRAWINEXPORTS_H -------------------------------------------------------------------------------- /examples/benchmark/simple-benchmark/defineTestSimpleBenchmark.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | TEST(Benchmark, Simple){ 6 | #if __linux__ || __APPLE__ 7 | pthread_t self = pthread_self(); 8 | #elif WIN32 9 | HANDLE self = GetCurrentThread(); 10 | #endif 11 | anira::HighPriorityThread::elevate_priority(self, true); 12 | 13 | benchmark::RunSpecifiedBenchmarks(); 14 | } -------------------------------------------------------------------------------- /examples/benchmark/advanced-benchmark/defineTestAdvancedBenchmark.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | TEST(Benchmark, Advanced){ 6 | #if __linux__ || __APPLE__ 7 | pthread_t self = pthread_self(); 8 | #elif WIN32 9 | HANDLE self = GetCurrentThread(); 10 | #endif 11 | anira::HighPriorityThread::elevate_priority(self, true); 12 | 13 | benchmark::RunSpecifiedBenchmarks(); 14 | } -------------------------------------------------------------------------------- /examples/benchmark/cnn-size-benchmark/defineTestCNNSizeBenchmark.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | TEST(Benchmark, CNNSize){ 6 | #if __linux__ || __APPLE__ 7 | pthread_t self = pthread_self(); 8 | #elif WIN32 9 | HANDLE self = GetCurrentThread(); 10 | #endif 11 | anira::HighPriorityThread::elevate_priority(self, true); 12 | 13 | benchmark::RunSpecifiedBenchmarks(); 14 | } -------------------------------------------------------------------------------- /examples/benchmark/advanced-benchmark/ClearCustomProcessor.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_CLEAR_CUSTOM_PROCESSOR_H 2 | #define ANIRA_CLEAR_CUSTOM_PROCESSOR_H 3 | 4 | #include 5 | 6 | class ClearCustomProcessor : public anira::BackendBase { 7 | public: 8 | ClearCustomProcessor(anira::InferenceConfig& inference_config) : anira::BackendBase(inference_config) {} 9 | 10 | void process(std::vector &input, std::vector &output, std::shared_ptr) override { 11 | } 12 | }; 13 | 14 | #endif // ANIRA_CLEAR_CUSTOM_PROCESSOR_H -------------------------------------------------------------------------------- /examples/minimal-inference/onnxruntime/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.16) 2 | project(minimal-onnxruntime C CXX) 3 | 4 | set(CMAKE_CXX_STANDARD 20) 5 | 6 | add_executable(minimal-onnxruntime 7 | minimal-onnxruntime.cpp 8 | ) 9 | 10 | target_link_libraries(minimal-onnxruntime 11 | anira::anira 12 | onnxruntime 13 | ) 14 | 15 | if (MSVC) 16 | foreach(DLL ${ANIRA_SHARED_LIBS_WIN}) 17 | add_custom_command(TARGET minimal-onnxruntime 18 | PRE_BUILD 19 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 20 | ${DLL} 21 | $) 22 | endforeach() 23 | endif (MSVC) -------------------------------------------------------------------------------- /examples/minimal-inference/tensorflow-lite/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.16) 2 | project(minimal-tflite C CXX) 3 | 4 | set(CMAKE_CXX_STANDARD 20) 5 | 6 | add_executable(minimal-tflite 7 | minimal-tflite.cpp 8 | ) 9 | 10 | target_link_libraries(minimal-tflite 11 | anira::anira 12 | tensorflowlite_c 13 | ) 14 | 15 | if (MSVC) 16 | foreach(DLL ${ANIRA_SHARED_LIBS_WIN}) 17 | add_custom_command(TARGET minimal-tflite 18 | PRE_BUILD 19 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 20 | ${DLL} 21 | $) 22 | endforeach() 23 | endif (MSVC) -------------------------------------------------------------------------------- /extras/models/third-party/ircam-acids/RaveFunkDrumConfigEncoder.json.in: -------------------------------------------------------------------------------- 1 | { 2 | "inference_config": { 3 | "model_data": [ 4 | { 5 | "model_path": "@RAVE_FUNK_DRUM_MODEL_PATH@", 6 | "inference_backend": "LIBTORCH", 7 | "model_function": "encode" 8 | } 9 | ], 10 | "tensor_shape": [ 11 | { 12 | "input_shape": [1, 1, 2048], 13 | "output_shape": [1, 4, 1] 14 | } 15 | ], 16 | "processing_spec": { 17 | "preprocess_input_channels": [1], 18 | "postprocess_output_channels": [4] 19 | }, 20 | "max_inference_time": 42.66, 21 | "warm_up": 5, 22 | "session_exclusive_processor": true 23 | } 24 | } -------------------------------------------------------------------------------- /examples/minimal-inference/libtorch/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.16) 2 | project(minimal-libtorch C CXX) 3 | 4 | set(CMAKE_CXX_STANDARD 20) 5 | 6 | add_executable(minimal-libtorch 7 | minimal-libtorch.cpp 8 | ) 9 | 10 | target_link_libraries(minimal-libtorch 11 | anira::anira 12 | "${TORCH_LIBRARIES}" 13 | ) 14 | 15 | if (MSVC) 16 | foreach(DLL ${ANIRA_SHARED_LIBS_WIN}) 17 | add_custom_command(TARGET ${PROJECT_NAME} 18 | PRE_BUILD 19 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 20 | ${DLL} 21 | $) 22 | endforeach() 23 | endif (MSVC) 24 | -------------------------------------------------------------------------------- /include/anira/anira.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_H 2 | #define ANIRA_H 3 | 4 | #include "InferenceConfig.h" 5 | #include "InferenceHandler.h" 6 | #include "PrePostProcessor.h" 7 | #include "backends/LibTorchProcessor.h" 8 | #include "backends/OnnxRuntimeProcessor.h" 9 | #include "backends/TFLiteProcessor.h" 10 | #include "scheduler/InferenceManager.h" 11 | #include "scheduler/InferenceThread.h" 12 | #include "scheduler/Context.h" 13 | #include "scheduler/SessionElement.h" 14 | #include "utils/Buffer.h" 15 | #include "utils/HostConfig.h" 16 | #include "utils/InferenceBackend.h" 17 | #include "utils/RingBuffer.h" 18 | #include "system/HighPriorityThread.h" 19 | #include "utils/JsonConfigLoader.h" 20 | 21 | #endif // ANIRA_H -------------------------------------------------------------------------------- /test/test_WavReader.cpp: -------------------------------------------------------------------------------- 1 | #include "WavReader.h" 2 | #include "gtest/gtest.h" 3 | 4 | 5 | 6 | TEST(WavReader, Read){ 7 | std::vector data_input; 8 | std::vector reference = {-0.008862195, -0.007644168, -0.006510143, -0.004956109, -0.002772061, 0.0001680037, 0.0034440758, 0.0063841403, 0.008064178, 0.007770171, 0.0058801295, 0.0029820655, 4.2000924e-05, -0.0023520517, -0.004284094, -0.0059221303, -0.007098156, -0.007224159, -0.005502121, -0.0020580452}; 9 | read_wav(string(GUITARLSTM_MODELS_PATH_TENSORFLOW) + "/model_0/x_test.wav", data_input); 10 | 11 | for (size_t i = 0; i < reference.size(); i++) 12 | { 13 | EXPECT_FLOAT_EQ(data_input[i], reference[i]); 14 | } 15 | 16 | 17 | } -------------------------------------------------------------------------------- /extras/models/cnn/CNNPrePostProcessor.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_CNNPREPOSTPROCESSOR_H 2 | #define ANIRA_CNNPREPOSTPROCESSOR_H 3 | 4 | #include "CNNConfig.h" 5 | #include 6 | 7 | class CNNPrePostProcessor : public anira::PrePostProcessor 8 | { 9 | public: 10 | using anira::PrePostProcessor::PrePostProcessor; 11 | 12 | virtual void pre_process(std::vector& input, std::vector& output, [[maybe_unused]] anira::InferenceBackend current_inference_backend) override { 13 | pop_samples_from_buffer(input[0], output[0], m_inference_config.get_tensor_output_size()[0], m_inference_config.get_tensor_input_size()[0]-m_inference_config.get_tensor_output_size()[0]); 14 | } 15 | }; 16 | 17 | #endif //ANIRA_CNNPREPOSTPROCESSOR_H 18 | -------------------------------------------------------------------------------- /Config.cmake.in: -------------------------------------------------------------------------------- 1 | @PACKAGE_INIT@ 2 | 3 | message(STATUS "Configuring anira") 4 | 5 | set(ANIRA_WITH_LIBTORCH @ANIRA_WITH_LIBTORCH@) 6 | set(ANIRA_WITH_BENCHMARK @ANIRA_WITH_BENCHMARK@) 7 | set(ANIRA_WITH_TESTS @ANIRA_WITH_TESTS@) 8 | set(ANIRA_VERSION @PROJECT_VERSION_FULL@) 9 | 10 | find_package(concurrentqueue REQUIRED) 11 | find_package(nlohmann_json REQUIRED) 12 | 13 | # Find the dependencies 14 | if (ANIRA_WITH_LIBTORCH) 15 | find_package(Torch REQUIRED) 16 | endif() 17 | 18 | if (ANIRA_WITH_TESTS OR ANIRA_WITH_BENCHMARK) 19 | find_package(GTest REQUIRED) 20 | endif() 21 | 22 | if (ANIRA_WITH_BENCHMARK) 23 | find_package(benchmark REQUIRED) 24 | endif() 25 | 26 | include("${CMAKE_CURRENT_LIST_DIR}/aniraTargets.cmake") 27 | 28 | check_required_components(anira) -------------------------------------------------------------------------------- /extras/models/third-party/ircam-acids/RaveFunkDrumConfigDecoder.json.in: -------------------------------------------------------------------------------- 1 | { 2 | "inference_config": { 3 | "model_data": [ 4 | { 5 | "model_path": "@RAVE_FUNK_DRUM_MODEL_PATH@", 6 | "inference_backend": "LIBTORCH", 7 | "model_function": "decode" 8 | } 9 | ], 10 | "tensor_shape": [ 11 | { 12 | "input_shape": [1, 4, 1], 13 | "output_shape": [1, 1, 2048] 14 | } 15 | ], 16 | "processing_spec": { 17 | "preprocess_input_channels": [4], 18 | "postprocess_output_channels": [1], 19 | "preprocess_input_size": [1], 20 | "postprocess_output_size": [2048], 21 | "internal_model_latency": [2048] 22 | }, 23 | "max_inference_time": 42.66, 24 | "warm_up": 5, 25 | "session_exclusive_processor": true 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /examples/clap-audio-plugin/utils/DryWetMixer.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_DRYWETMIXER_H 2 | #define ANIRA_DRYWETMIXER_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace clap_plugin_example::utils { 9 | 10 | class DryWetMixer { 11 | public: 12 | DryWetMixer(); 13 | 14 | void prepare(double sample_rate, size_t buffer_size, size_t latency_samples); 15 | void push_dry_sample(float dry_sample); 16 | 17 | float mix_wet_sample(float wet_sample); 18 | 19 | void set_mix(float new_mix); 20 | 21 | private: 22 | std::vector m_delay_buffer; 23 | 24 | double m_sample_rate; 25 | size_t m_buffer_size; 26 | 27 | size_t m_latency_samples; 28 | float m_mix; 29 | 30 | size_t m_write_index; 31 | size_t m_read_index; 32 | }; 33 | 34 | } 35 | 36 | #endif //ANIRA_DRYWETMIXER_H 37 | -------------------------------------------------------------------------------- /extras/models/third-party/ircam-acids/RaveFunkDrumConfig.json.in: -------------------------------------------------------------------------------- 1 | { 2 | "context_config": { 3 | "num_threads": 1 4 | }, 5 | "inference_config": { 6 | "model_data": [ 7 | { 8 | "model_path": "@RAVE_FUNK_DRUM_MODEL_PATH@", 9 | "inference_backend": "LIBTORCH" 10 | } 11 | ], 12 | "tensor_shape": [ 13 | { 14 | "input_shape": [1, 1, 2048], 15 | "output_shape": [1, 1, 2048] 16 | } 17 | ], 18 | "processing_spec": { 19 | "preprocess_input_channels": [1], 20 | "postprocess_output_channels": [1], 21 | "preprocess_input_size": [2048], 22 | "postprocess_output_size": [2048], 23 | "internal_model_latency": [2048] 24 | }, 25 | "max_inference_time": 42.66, 26 | "warm_up": 5, 27 | "session_exclusive_processor": true 28 | } 29 | } -------------------------------------------------------------------------------- /examples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_compile_definitions( 2 | GUITARLSTM_MODELS_PATH_TENSORFLOW="${GUITARLSTM_MODELS_PATH_TENSORFLOW}" 3 | GUITARLSTM_MODELS_PATH_PYTORCH="${GUITARLSTM_MODELS_PATH_PYTORCH}" 4 | STEERABLENAFX_MODELS_PATH_TENSORFLOW="${STEERABLENAFX_MODELS_PATH_TENSORFLOW}" 5 | STEERABLENAFX_MODELS_PATH_PYTORCH="${STEERABLENAFX_MODELS_PATH_PYTORCH}" 6 | STATEFULLSTM_MODELS_PATH_TENSORFLOW="${STATEFULLSTM_MODELS_PATH_TENSORFLOW}" 7 | STATEFULLSTM_MODELS_PATH_PYTORCH="${STATEFULLSTM_MODELS_PATH_PYTORCH}" 8 | SIMPLEGAIN_MODEL_PATH="${SIMPLEGAIN_MODEL_PATH}" 9 | RAVE_MODEL_DIR="${RAVE_MODEL_DIR}" 10 | RAVE_MODEL_FUNK_DRUM_JSON_CONFIG_PATH="${RAVE_MODEL_FUNK_DRUM_JSON_CONFIG_PATH}" 11 | ) 12 | 13 | 14 | if(ANIRA_WITH_BENCHMARK) 15 | add_subdirectory(benchmark) 16 | endif() 17 | 18 | add_subdirectory(minimal-inference) 19 | add_subdirectory(juce-audio-plugin) 20 | add_subdirectory(clap-audio-plugin) -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: If you use this software, please cite both the article from preferred-citation and the software itself. 3 | authors: 4 | - family-names: Ackva 5 | given-names: Valentin 6 | - family-names: Schulz 7 | given-names: Fares 8 | title: 'ANIRA: An Architecture for Neural Network Inference in Real-Time Audio Applications' 9 | version: 1.0.0 10 | doi: 10.1109/IS262782.2024.10704099 11 | date-released: '2024-11-09' 12 | preferred-citation: 13 | authors: 14 | - family-names: Ackva 15 | given-names: Valentin 16 | - family-names: Schulz 17 | given-names: Fares 18 | title: 'ANIRA: An Architecture for Neural Network Inference in Real-Time Audio Applications' 19 | doi: 10.1109/IS262782.2024.10704099 20 | type: conference-paper 21 | pages: 1-10 22 | year: '2024' 23 | collection-title: 2024 IEEE 5th International Symposium on the Internet of Sounds (IS2) 24 | conference: {} 25 | publisher: 26 | name: IEEE -------------------------------------------------------------------------------- /docs/sphinx/index.rst: -------------------------------------------------------------------------------- 1 | Anira Documentation 2 | =================== 3 | 4 | .. include:: ../../README.md 5 | :parser: myst_parser.sphinx_ 6 | :end-before: ## Documentation 7 | 8 | .. include:: ../../README.md 9 | :parser: myst_parser.sphinx_ 10 | :start-after: 11 | 12 | .. toctree:: 13 | :maxdepth: 1 14 | :hidden: 15 | :caption: Contents: 16 | 17 | about 18 | getting_started 19 | usage 20 | custom_preprocessing 21 | custom_backends 22 | examples 23 | api/index 24 | architecture 25 | benchmarking 26 | latency 27 | troubleshooting 28 | changelog 29 | contributing 30 | 31 | Next Steps 32 | ~~~~~~~~~~ 33 | 34 | New to Anira? Start with :doc:`getting_started` for installation and basic usage. 35 | 36 | Ready to dive deeper? Check out the :doc:`usage` and explore :doc:`custom_preprocessing` or :doc:`custom_backends` for advanced use cases. 37 | 38 | Having issues? See :doc:`troubleshooting` or visit the GitHub repository for support. 39 | -------------------------------------------------------------------------------- /extras/models/cnn/CNNBypassProcessor.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_CNN_CUSTOM_PROCESSOR_H 2 | #define ANIRA_CNN_CUSTOM_PROCESSOR_H 3 | 4 | #include 5 | 6 | class CNNBypassProcessor : public anira::BackendBase { 7 | public: 8 | CNNBypassProcessor(anira::InferenceConfig& inference_config) : anira::BackendBase(inference_config) {} 9 | 10 | void process(std::vector &input, std::vector &output, [[maybe_unused]] std::shared_ptr session) override { 11 | auto sample_diff = input[0].get_num_samples() - output[0].get_num_samples(); 12 | 13 | for (size_t channel = 0; channel < input[0].get_num_channels(); ++channel) { 14 | auto write_ptr = output[0].get_write_pointer(channel); 15 | auto read_ptr = input[0].get_read_pointer(channel); 16 | 17 | for (size_t i = 0; i < output[0].get_num_samples(); ++i) { 18 | write_ptr[i] = read_ptr[i+sample_diff]; 19 | } 20 | } 21 | } 22 | }; 23 | 24 | #endif // ANIRA_CNN_CUSTOM_PROCESSOR_H -------------------------------------------------------------------------------- /extras/models/model-pool/SimpleGainConfig.json.in: -------------------------------------------------------------------------------- 1 | { 2 | "context_config": { 3 | "num_threads": 1 4 | }, 5 | "inference_config": { 6 | "model_data": [ 7 | { 8 | "model_path": "@SIMPLEGAIN_MODEL_PATH@/simple_gain_network_mono.pt", 9 | "inference_backend": "LIBTORCH" 10 | }, 11 | { 12 | "model_path": "@SIMPLEGAIN_MODEL_PATH@/simple_gain_network_mono.onnx", 13 | "inference_backend": "ONNX" 14 | }, 15 | { 16 | "model_path": "@SIMPLEGAIN_MODEL_PATH@/simple_gain_network_mono.tflite", 17 | "inference_backend": "TFLITE" 18 | } 19 | ], 20 | "tensor_shape": [ 21 | { 22 | "input_shape": [[1, 1, 512],[1]], 23 | "output_shape": [[1, 1, 512],[1]] 24 | } 25 | ], 26 | "processing_spec": { 27 | "preprocess_input_channels": [1, 1], 28 | "postprocess_output_channels": [1, 1], 29 | "preprocess_input_size": [512, 0], 30 | "postprocess_output_size": [512, 0] 31 | }, 32 | "max_inference_time": 5.0, 33 | "warm_up": 1 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /cmake/test-deps.cmake: -------------------------------------------------------------------------------- 1 | # This disables the default behavior of adding all targets to the CTest dashboard. 2 | set_property(GLOBAL PROPERTY CTEST_TARGETS_ADDED 1) 3 | 4 | include(FetchContent) 5 | 6 | # enable ctest 7 | include(CTest) 8 | 9 | # Externally provided libraries 10 | FetchContent_Declare(googletest 11 | GIT_REPOSITORY https://github.com/google/googletest.git 12 | GIT_PROGRESS TRUE 13 | GIT_SHALLOW TRUE 14 | GIT_TAG v1.14.0) 15 | 16 | 17 | # This command ensures that each of the named dependencies are made available to the project by the time it returns. If the dependency has already been populated the command does nothing. Otherwise, the command populates the dependency and then calls add_subdirectory() on the result. 18 | FetchContent_MakeAvailable(googletest) 19 | 20 | # enable position independent code because otherwise the library cannot be linked into a shared library 21 | set_target_properties(gtest PROPERTIES POSITION_INDEPENDENT_CODE ON) 22 | target_link_libraries(${PROJECT_NAME} PUBLIC gtest_main) 23 | 24 | # include Loads and runs CMake code from the file given. Loads and runs CMake code from the file given. 25 | include(GoogleTest) 26 | -------------------------------------------------------------------------------- /extras/models/third-party/ircam-acids/RaveFunkDrumConfigEncoder.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_RAVE_FUNK_DRUM_CONFIG_ENCODER_H 2 | #define ANIRA_RAVE_FUNK_DRUM_CONFIG_ENCODER_H 3 | 4 | #include 5 | 6 | static std::vector model_data_rave_funk_drum_encoder_config = { 7 | #ifdef USE_LIBTORCH 8 | {RAVE_MODEL_DIR + std::string("/rave_funk_drum.ts"), anira::InferenceBackend::LIBTORCH, std::string("encode")}, 9 | #endif 10 | }; 11 | 12 | static std::vector tensor_shape_rave_funk_drum_encoder_config = { 13 | {{{1, 1, 2048}}, {{1, 4, 1}}} 14 | }; 15 | 16 | static anira::ProcessingSpec processing_spec_rave_funk_drum_encoder_config{ 17 | {1}, // preprocess_input_channels 18 | {4} // postprocess_output_channels 19 | }; 20 | 21 | static anira::InferenceConfig rave_funk_drum_encoder_config( 22 | model_data_rave_funk_drum_encoder_config, 23 | tensor_shape_rave_funk_drum_encoder_config, 24 | processing_spec_rave_funk_drum_encoder_config, 25 | 42.66f, 26 | 5, 27 | true // session_exclusive_processor because of cached convolution layers in the model 28 | ); 29 | 30 | #endif //ANIRA_RAVE_FUNK_DRUM_CONFIG_ENCODER_H 31 | -------------------------------------------------------------------------------- /extras/models/third-party/ircam-acids/RaveFunkDrumConfig.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_RAVE_FUNK_DRUM_CONFIG_H 2 | #define ANIRA_RAVE_FUNK_DRUM_CONFIG_H 3 | 4 | #include 5 | 6 | static std::vector model_data_rave_funk_drum_config = { 7 | #ifdef USE_LIBTORCH 8 | {RAVE_MODEL_DIR + std::string("/rave_funk_drum.ts"), anira::InferenceBackend::LIBTORCH}, 9 | #endif 10 | }; 11 | 12 | static std::vector tensor_shape_rave_funk_drum_config = { 13 | {{{1, 1, 2048}}, {{1, 1, 2048}}} 14 | }; 15 | 16 | static anira::ProcessingSpec processing_spec_rave_funk_drum_config{ 17 | {1}, // preprocess_input_channels 18 | {1}, // postprocess_output_channels 19 | {2048}, // preprocess_input_size 20 | {2048}, // postprocess_output_size 21 | {2048} // internal_model_latency 22 | }; 23 | 24 | static anira::InferenceConfig rave_funk_drum_config( 25 | model_data_rave_funk_drum_config, 26 | tensor_shape_rave_funk_drum_config, 27 | processing_spec_rave_funk_drum_config, 28 | 42.66f, 29 | 5, 30 | true // session_exclusive_processor because of cached convolution layers in the model 31 | ); 32 | 33 | #endif //ANIRA_RAVE_FUNK_DRUM_CONFIG_H 34 | -------------------------------------------------------------------------------- /src/backends/BackendBase.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace anira { 4 | 5 | BackendBase::BackendBase(InferenceConfig& inference_config) : m_inference_config(inference_config) { 6 | } 7 | 8 | void BackendBase::prepare() { 9 | 10 | } 11 | 12 | void BackendBase::process(std::vector& input, std::vector& output, [[maybe_unused]] std::shared_ptr session) { 13 | for (size_t i = 0; i < input.size(); ++i) { 14 | bool equal_channels = input[i].get_num_channels() == output[i].get_num_channels(); 15 | auto sample_diff = input[i].get_num_samples() - output[i].get_num_samples(); 16 | if (equal_channels && sample_diff == 0) { 17 | for (int channel = 0; channel < input[i].get_num_channels(); ++channel) { 18 | auto write_ptr = output[i].get_write_pointer(channel); 19 | auto read_ptr = input[i].get_read_pointer(channel); 20 | 21 | for (size_t j = 0; j < output[i].get_num_samples(); ++j) { 22 | write_ptr[j] = read_ptr[j]; 23 | } 24 | } 25 | } else { 26 | output[i].clear(); 27 | } 28 | } 29 | } 30 | 31 | } // namespace anira -------------------------------------------------------------------------------- /extras/models/third-party/ircam-acids/RaveFunkDrumConfigDecoder.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_RAVE_FUNK_DRUM_CONFIG_DECODER_H 2 | #define ANIRA_RAVE_FUNK_DRUM_CONFIG_DECODER_H 3 | 4 | #include 5 | 6 | static std::vector model_data_rave_funk_drum_decoder_config = { 7 | #ifdef USE_LIBTORCH 8 | {RAVE_MODEL_DIR + std::string("/rave_funk_drum.ts"), anira::InferenceBackend::LIBTORCH, std::string("decode")}, 9 | #endif 10 | }; 11 | 12 | static std::vector tensor_shape_rave_funk_drum_decoder_config = { 13 | {{{1, 4, 1}}, {{1, 1, 2048}}} 14 | }; 15 | 16 | static anira::ProcessingSpec processing_spec_rave_funk_drum_decoder_config{ 17 | {4}, // preprocess_input_channels 18 | {1}, // postprocess_output_channels 19 | {1}, // preprocess_input_size 20 | {2048}, // postprocess_output_size 21 | {2048} // internal_model_latency 22 | }; 23 | 24 | static anira::InferenceConfig rave_funk_drum_decoder_config( 25 | model_data_rave_funk_drum_decoder_config, 26 | tensor_shape_rave_funk_drum_decoder_config, 27 | processing_spec_rave_funk_drum_decoder_config, 28 | 42.66f, 29 | 5, 30 | true // session_exclusive_processor because of cached convolution layers in the model 31 | ); 32 | 33 | #endif //ANIRA_RAVE_FUNK_DRUM_CONFIG_DECODER_H 34 | -------------------------------------------------------------------------------- /examples/clap-audio-plugin/cmake/anira-clap-demo.plist.in: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CFBundleDevelopmentRegion 6 | English 7 | CFBundleExecutable 8 | ${MACOSX_BUNDLE_EXECUTABLE_NAME} 9 | CFBundleGetInfoString 10 | ${MACOSX_BUNDLE_INFO_STRING} 11 | CFBundleIconFile 12 | ${MACOSX_BUNDLE_ICON_FILE} 13 | CFBundleIdentifier 14 | ${MACOSX_BUNDLE_GUI_IDENTIFIER} 15 | CFBundleInfoDictionaryVersion 16 | 6.0 17 | CFBundleLongVersionString 18 | ${MACOSX_BUNDLE_LONG_VERSION_STRING} 19 | CFBundleName 20 | ${MACOSX_BUNDLE_BUNDLE_NAME} 21 | CFBundlePackageType 22 | BNDL 23 | CFBundleShortVersionString 24 | ${MACOSX_BUNDLE_SHORT_VERSION_STRING} 25 | CFBundleSignature 26 | ???? 27 | CFBundleVersion 28 | ${MACOSX_BUNDLE_BUNDLE_VERSION} 29 | CSResourcesFileMapped 30 | 31 | NSHumanReadableCopyright 32 | ${MACOSX_BUNDLE_COPYRIGHT} 33 | 34 | 35 | -------------------------------------------------------------------------------- /extras/models/stateful-rnn/StatefulRNNConfig.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_STATEFULRNNCONFIG_H 2 | #define ANIRA_STATEFULRNNCONFIG_H 3 | 4 | #include 5 | 6 | static std::vector model_data_rnn_config = { 7 | #ifdef USE_LIBTORCH 8 | {STATEFULLSTM_MODELS_PATH_PYTORCH + std::string("/model_0/stateful-lstm-dynamic.pt"), anira::InferenceBackend::LIBTORCH}, 9 | #endif 10 | #ifdef USE_ONNXRUNTIME 11 | {STATEFULLSTM_MODELS_PATH_PYTORCH + std::string("/model_0/stateful-lstm-libtorch.onnx"), anira::InferenceBackend::ONNX}, 12 | #endif 13 | #ifdef USE_TFLITE 14 | {STATEFULLSTM_MODELS_PATH_TENSORFLOW + std::string("/model_0/stateful-lstm-dynamic.tflite"), anira::InferenceBackend::TFLITE}, 15 | #endif 16 | }; 17 | 18 | static std::vector tensor_shape_rnn_config = { 19 | #ifdef USE_LIBTORCH 20 | {{{2048, 1, 1}}, {{2048, 1, 1}}, anira::InferenceBackend::LIBTORCH}, 21 | #endif 22 | #ifdef USE_ONNXRUNTIME 23 | {{{2048, 1, 1}}, {{2048, 1, 1}}, anira::InferenceBackend::ONNX}, 24 | #endif 25 | #ifdef USE_TFLITE 26 | {{{1, 2048, 1}}, {{1, 2048, 1}}, anira::InferenceBackend::TFLITE}, 27 | #endif 28 | }; 29 | 30 | static anira::InferenceConfig rnn_config ( 31 | model_data_rnn_config, 32 | tensor_shape_rnn_config, 33 | 42.66f, 34 | 2, 35 | true 36 | ); 37 | 38 | #endif //ANIRA_STATEFULRNNCONFIG_H 39 | -------------------------------------------------------------------------------- /.github/actions/setup/action.yml: -------------------------------------------------------------------------------- 1 | name: setup 2 | description: "Setup the project and install dependencies" 3 | 4 | runs: 5 | using: "composite" 6 | steps: 7 | #A simple printout of the matrix 8 | - name: printout 9 | shell: bash 10 | run: | 11 | echo ${{ github.ref }} 12 | echo "matrix.name=${{ matrix.name }}"; 13 | echo "matrix.os=${{ matrix.os }}"; 14 | if [ "${{ matrix.name }}" == "Linux-x86_64" ]; then 15 | echo "$(uname -a)" 16 | elif [ "${{ matrix.os }}" == "macOS-latest" ]; then 17 | echo "$(uname -a)" 18 | fi; 19 | 20 | # We need the osxutils to get the codesign and notorization tools 21 | - name: install deps 22 | shell: bash 23 | run: | 24 | if [ "${{ matrix.name }}" == "Linux-x86_64" ]; then 25 | sudo apt-get update && sudo apt install ninja-build 26 | elif [ "${{ matrix.name }}" == "macOS-x86_64" ]; then 27 | brew install osxutils ninja 28 | echo "brew prefix: $(brew --prefix)" 29 | elif [ "${{ matrix.name }}" == "macOS-arm64" ]; then 30 | brew install osxutils ninja 31 | echo "brew prefix: $(brew --prefix)" 32 | elif [ "${{ matrix.name }}" == "Windows-x86_64" ]; then 33 | choco install ninja 34 | else 35 | echo "Unknown OS"; 36 | fi; -------------------------------------------------------------------------------- /extras/models/model-pool/SimpleGainConfig.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_SIMPLEGAINCONFIG_H 2 | #define ANIRA_SIMPLEGAINCONFIG_H 3 | 4 | #include 5 | 6 | static std::vector model_data_gain_config = { 7 | #ifdef USE_LIBTORCH 8 | {SIMPLEGAIN_MODEL_PATH + std::string("/simple_gain_network_mono.pt"), anira::InferenceBackend::LIBTORCH}, 9 | #endif 10 | #ifdef USE_ONNXRUNTIME 11 | {SIMPLEGAIN_MODEL_PATH + std::string("/simple_gain_network_mono.onnx"), anira::InferenceBackend::ONNX}, 12 | #endif 13 | #ifdef USE_TFLITE 14 | {SIMPLEGAIN_MODEL_PATH + std::string("/simple_gain_network_mono.tflite"), anira::InferenceBackend::TFLITE}, 15 | #endif 16 | }; 17 | 18 | static std::vector tensor_shape_gain_config = { 19 | {{{1, 1, 512}, {1}}, {{1, 1, 512}, {1}}}, // When no backend is specified, the tensor shape is seen as universal for all backends 20 | }; 21 | 22 | static anira::ProcessingSpec processing_spec_gain_config = { 23 | {1, 1}, // preprocess_input_channels 24 | {1, 1}, // postprocess_output_channels 25 | {512, 0}, // preprocess_input_size; zero indicates non-streamable input 26 | {512, 0} // postprocess_output_size; zero indicates non-streamable output 27 | }; 28 | 29 | static anira::InferenceConfig gain_config( 30 | model_data_gain_config, 31 | tensor_shape_gain_config, 32 | processing_spec_gain_config, 33 | 5.f, 34 | 1 35 | ); 36 | 37 | #endif //ANIRA_SIMPLEGAINCONFIG_H 38 | -------------------------------------------------------------------------------- /examples/clap-audio-plugin/utils/DryWetMixer.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by Valentin Ackva on 25/10/2024. 3 | // 4 | 5 | #include "DryWetMixer.h" 6 | 7 | namespace clap_plugin_example::utils 8 | { 9 | 10 | DryWetMixer::DryWetMixer() : m_sample_rate(0.0), m_buffer_size(0), m_latency_samples(0), m_write_index(0), m_read_index(0), 11 | m_mix(1.0f) 12 | { 13 | 14 | } 15 | 16 | void DryWetMixer::prepare(double sample_rate, size_t buffer_size, size_t latency_samples) { 17 | m_sample_rate = sample_rate; 18 | m_buffer_size = buffer_size; 19 | m_latency_samples = latency_samples; 20 | 21 | m_delay_buffer.resize(m_latency_samples + buffer_size); 22 | std::fill(m_delay_buffer.begin(), m_delay_buffer.end(), 0.0f); 23 | 24 | m_write_index = 0; 25 | m_read_index = (m_write_index + m_buffer_size - m_latency_samples) % m_buffer_size; 26 | } 27 | 28 | void DryWetMixer::push_dry_sample(float dry_sample) { 29 | m_delay_buffer[m_write_index] = dry_sample; 30 | 31 | m_write_index = (m_write_index + 1) % m_delay_buffer.size(); 32 | } 33 | 34 | float DryWetMixer::mix_wet_sample(float wet_sample) { 35 | float delayed_dry_sample = m_delay_buffer[m_read_index]; 36 | 37 | m_read_index = (m_read_index + 1) % m_delay_buffer.size(); 38 | 39 | return (1.0f - m_mix) * delayed_dry_sample + m_mix * wet_sample; 40 | } 41 | 42 | void DryWetMixer::set_mix(float new_mix) { 43 | m_mix = std::clamp(new_mix, 0.0f, 1.0f); 44 | } 45 | 46 | } -------------------------------------------------------------------------------- /extras/models/hybrid-nn/HybridNNConfig.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_HYBRIDNNCONFIG_H 2 | #define ANIRA_HYBRIDNNCONFIG_H 3 | 4 | #include 5 | 6 | static std::vector model_data_hybridnn_config = { 7 | #ifdef USE_LIBTORCH 8 | {GUITARLSTM_MODELS_PATH_PYTORCH + std::string("/model_0/GuitarLSTM-dynamic.pt"), anira::InferenceBackend::LIBTORCH}, 9 | #endif 10 | #ifdef USE_ONNXRUNTIME 11 | {GUITARLSTM_MODELS_PATH_PYTORCH + std::string("/model_0/GuitarLSTM-libtorch-dynamic.onnx"), anira::InferenceBackend::ONNX}, 12 | #endif 13 | #ifdef USE_TFLITE 14 | {GUITARLSTM_MODELS_PATH_TENSORFLOW + std::string("/model_0/GuitarLSTM-256.tflite"), anira::InferenceBackend::TFLITE}, 15 | #endif 16 | }; 17 | 18 | static std::vector tensor_shape_hybridnn_config = { 19 | #ifdef USE_TFLITE 20 | {{{256, 150, 1}}, {{256, 1}}, anira::InferenceBackend::TFLITE}, 21 | #endif 22 | {{{256, 1, 150}}, {{256, 1}}} 23 | }; 24 | 25 | static anira::ProcessingSpec processing_spec_hybridnn_config = { 26 | {1}, // preprocess_input_channels 27 | {1}, // postprocess_output_channels 28 | {256}, // preprocess_input_size 29 | {256} // postprocess_output_size 30 | }; 31 | 32 | static anira::InferenceConfig hybridnn_config ( 33 | model_data_hybridnn_config, 34 | tensor_shape_hybridnn_config, 35 | processing_spec_hybridnn_config, 36 | 5.33f, 37 | 3 38 | ); 39 | 40 | #endif //ANIRA_HYBRIDNNCONFIG_H 41 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | # TODOs 2 | 3 | ## Architecture 4 | 5 | - [ ] Make moodycamel producer structs with session id and delete them when the session removed 6 | - [ ] Model and context config should be also available as json file 7 | - [ ] InferenceConfig check 8 | - [ ] RTSan check in CI 9 | - [ ] Change model_path function 10 | - [ ] Make processBlockFixture could be more versatile with new shapes 11 | - [ ] Fix TFLite benchmark error 12 | 13 | ## Extras 14 | 15 | - [ ] Put RAVE model on tu servers, for more stable download 16 | 17 | ## Documentation 18 | 19 | - [ ] Update Dokumentation for JSON example 20 | - [ ] Update Dokumentation for supporting all models now 21 | - [ ] Add more examples to the documentation 22 | 23 | ## Testing 24 | 25 | - [ ] More sanitizer tests 26 | - [ ] Run the examples as tests in CI 27 | - [ ] InferenceHandler tests with buffersizes that are not a multiple of the preprocess input size 28 | 29 | ## Bugs 30 | 31 | - [ ] Fix noise burst at the start of the plugin with the model 6 - this is really annoying and hurts ears! 32 | - [ ] When declaring the universal shape in HybridNNConfig.h first, tests fail on asahi linux system (tflite gets universal tensor shapes) 33 | - [ ] Calling reset in inference handler with blocking mechanism causes freeze 34 | 35 | ## Packaging 36 | 37 | - [ ] Trigger `ldconfig` in the .deb package 38 | - [ ] Artifacts should not be .zip as symlinks are not supported 39 | - [ ] Add qemu docker emulation for aarch64 and armv7l linux in CI 40 | - [ ] Build the .deb package in CI -------------------------------------------------------------------------------- /extras/models/model-pool/SimpleStereoGainConfig.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_SIMPLESTEREOGAINCONFIG_H 2 | #define ANIRA_SIMPLESTEREOGAINCONFIG_H 3 | 4 | #include 5 | 6 | static std::vector model_data_stereo_gain_config = { 7 | #ifdef USE_LIBTORCH 8 | {SIMPLEGAIN_MODEL_PATH + std::string("/simple_gain_network_stereo.pt"), anira::InferenceBackend::LIBTORCH}, 9 | #endif 10 | #ifdef USE_ONNXRUNTIME 11 | {SIMPLEGAIN_MODEL_PATH + std::string("/simple_gain_network_stereo.onnx"), anira::InferenceBackend::ONNX}, 12 | #endif 13 | #ifdef USE_TFLITE 14 | {SIMPLEGAIN_MODEL_PATH + std::string("/simple_gain_network_stereo.tflite"), anira::InferenceBackend::TFLITE}, 15 | #endif 16 | }; 17 | 18 | static std::vector tensor_shape_stereo_gain_config = { 19 | {{{1, 2, 512}, {1}}, {{1, 2, 512}, {1}}}, // When no backend is specified, the tensor shape is seen as universal for all backends 20 | }; 21 | 22 | static anira::ProcessingSpec processing_spec_stereo_gain_config = { 23 | {2, 1}, // preprocess_input_channels 24 | {2, 1}, // postprocess_output_channels 25 | {512, 0}, // preprocess_input_size; zero indicates non-streamable input 26 | {512, 0} // postprocess_output_size; zero indicates non-streamable output 27 | }; 28 | 29 | static anira::InferenceConfig stereo_gain_config( 30 | model_data_stereo_gain_config, 31 | tensor_shape_stereo_gain_config, 32 | processing_spec_stereo_gain_config, 33 | 5.f, 34 | 1 35 | ); 36 | 37 | #endif //ANIRA_SIMPLESTEREOGAINCONFIG_H 38 | -------------------------------------------------------------------------------- /extras/models/hybrid-nn/HybridNNBypassProcessor.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_HYBRID_NN_BYPASS_PROCESSOR_H 2 | #define ANIRA_HYBRID_NN_BYPASS_PROCESSOR_H 3 | 4 | #include 5 | 6 | class HybridNNBypassProcessor : public anira::BackendBase { 7 | public: 8 | HybridNNBypassProcessor(anira::InferenceConfig& inference_config) : anira::BackendBase(inference_config) {} 9 | 10 | void process(std::vector &input, std::vector &output, [[maybe_unused]] std::shared_ptr session) override { 11 | size_t num_batches; 12 | size_t num_input_samples; 13 | 14 | #if USE_TFLITE 15 | num_batches = (size_t) m_inference_config.get_tensor_input_shape(anira::InferenceBackend::TFLITE)[0][0]; 16 | num_input_samples = (size_t) m_inference_config.get_tensor_input_shape(anira::InferenceBackend::TFLITE)[0][1]; 17 | #else 18 | num_batches = (size_t) m_inference_config.get_tensor_input_shape()[0][0]; 19 | num_input_samples = (size_t) m_inference_config.get_tensor_input_shape()[0][2]; 20 | #endif 21 | 22 | for (size_t channel = 0; channel < input[0].get_num_channels(); ++channel) { 23 | float* write_ptr = output[0].get_write_pointer(channel); 24 | const float* read_ptr = input[0].get_read_pointer(channel); 25 | 26 | for (size_t batch = 0; batch < num_batches; ++batch) { 27 | size_t base_index = batch * num_input_samples; 28 | write_ptr[batch] = read_ptr[num_input_samples - 1 + base_index]; 29 | } 30 | } 31 | } 32 | }; 33 | 34 | #endif // ANIRA_HYBRID_NN_BYPASS_PROCESSOR_H -------------------------------------------------------------------------------- /extras/models/cnn/CNNConfig.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_CNNCONFIG_H 2 | #define ANIRA_CNNCONFIG_H 3 | 4 | #include 5 | 6 | static std::vector model_data_cnn_config = { 7 | #ifdef USE_LIBTORCH 8 | {STEERABLENAFX_MODELS_PATH_PYTORCH + std::string("/model_0/steerable-nafx-dynamic.pt"), anira::InferenceBackend::LIBTORCH}, 9 | #endif 10 | #ifdef USE_ONNXRUNTIME 11 | {STEERABLENAFX_MODELS_PATH_PYTORCH + std::string("/model_0/steerable-nafx-libtorch-dynamic.onnx"), anira::InferenceBackend::ONNX}, 12 | #endif 13 | #ifdef USE_TFLITE 14 | {STEERABLENAFX_MODELS_PATH_TENSORFLOW + std::string("/model_0/steerable-nafx-dynamic.tflite"), anira::InferenceBackend::TFLITE}, 15 | #endif 16 | }; 17 | 18 | static std::vector tensor_shape_cnn_config = { 19 | #ifdef USE_LIBTORCH 20 | {{{1, 1, 15380}}, {{1, 1, 2048}}, anira::InferenceBackend::LIBTORCH}, 21 | #endif 22 | #ifdef USE_ONNXRUNTIME 23 | {{{1, 1, 15380}}, {{1, 1, 2048}}, anira::InferenceBackend::ONNX}, 24 | #endif 25 | #ifdef USE_TFLITE 26 | {{{1, 15380, 1}}, {{1, 2048, 1}}, anira::InferenceBackend::TFLITE}, 27 | #endif 28 | }; 29 | 30 | static anira::ProcessingSpec processing_spec_cnn_config = { 31 | {1}, // preprocess_input_channels 32 | {1}, // postprocess_output_channels 33 | {2048}, // preprocess_input_size 34 | {2048} // postprocess_output_size 35 | }; 36 | 37 | static anira::InferenceConfig cnn_config ( 38 | model_data_cnn_config, 39 | tensor_shape_cnn_config, 40 | processing_spec_cnn_config, 41 | 42.66f, 42 | 2 43 | ); 44 | 45 | 46 | #endif //ANIRA_CNNCONFIG_H 47 | -------------------------------------------------------------------------------- /extras/models/cnn/Small_CNNConfig.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_SMALL_CNNCONFIG_2048_H 2 | #define ANIRA_SMALL_CNNCONFIG_2048_H 3 | 4 | #include 5 | 6 | static std::vector model_data_small_cnn_config = { 7 | #ifdef USE_LIBTORCH 8 | {STEERABLENAFX_MODELS_PATH_PYTORCH + std::string("/model_0/steerable-nafx-2_blocks-dynamic.pt"), anira::InferenceBackend::LIBTORCH}, 9 | #endif 10 | #ifdef USE_ONNXRUNTIME 11 | {STEERABLENAFX_MODELS_PATH_PYTORCH + std::string("/model_0/steerable-nafx-2_blocks-libtorch-dynamic.onnx"), anira::InferenceBackend::ONNX}, 12 | #endif 13 | #ifdef USE_TFLITE 14 | {STEERABLENAFX_MODELS_PATH_TENSORFLOW + std::string("/model_0/steerable-nafx-2_blocks-dynamic.tflite"), anira::InferenceBackend::TFLITE}, 15 | #endif 16 | }; 17 | 18 | static std::vector tensor_shape_small_cnn_config = { 19 | #ifdef USE_LIBTORCH 20 | {{{1, 1, 2180}}, {{1, 1, 2048}}, anira::InferenceBackend::LIBTORCH}, 21 | #endif 22 | #ifdef USE_ONNXRUNTIME 23 | {{{1, 1, 2180}}, {{1, 1, 2048}}, anira::InferenceBackend::ONNX}, 24 | #endif 25 | #ifdef USE_TFLITE 26 | {{{1, 2180, 1}}, {{1, 2048, 1}}, anira::InferenceBackend::TFLITE}, 27 | #endif 28 | }; 29 | 30 | static anira::ProcessingSpec processing_spec_small_cnn_config = { 31 | {1}, // preprocess_input_channels 32 | {1}, // postprocess_output_channels 33 | {2048}, // preprocess_input_size 34 | {2048} // postprocess_output_size 35 | }; 36 | 37 | static anira::InferenceConfig small_cnn_config ( 38 | model_data_small_cnn_config, 39 | tensor_shape_small_cnn_config, 40 | processing_spec_small_cnn_config, 41 | 42.66f 42 | ); 43 | 44 | 45 | #endif //ANIRA_SMALL_CNNCONFIG_2048_H 46 | -------------------------------------------------------------------------------- /extras/models/cnn/Medium_CNNConfig.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_MEDIUM_CNNCONFIG_2048_H 2 | #define ANIRA_MEDIUM_CNNCONFIG_2048_H 3 | 4 | #include 5 | 6 | static std::vector model_data_medium_cnn_config = { 7 | #ifdef USE_LIBTORCH 8 | {STEERABLENAFX_MODELS_PATH_PYTORCH + std::string("/model_0/steerable-nafx-3_blocks-dynamic.pt"), anira::InferenceBackend::LIBTORCH}, 9 | #endif 10 | #ifdef USE_ONNXRUNTIME 11 | {STEERABLENAFX_MODELS_PATH_PYTORCH + std::string("/model_0/steerable-nafx-3_blocks-libtorch-dynamic.onnx"), anira::InferenceBackend::ONNX}, 12 | #endif 13 | #ifdef USE_TFLITE 14 | {STEERABLENAFX_MODELS_PATH_TENSORFLOW + std::string("/model_0/steerable-nafx-3_blocks-dynamic.tflite"), anira::InferenceBackend::TFLITE}, 15 | #endif 16 | }; 17 | 18 | static std::vector tensor_shape_medium_cnn_config = { 19 | #ifdef USE_LIBTORCH 20 | {{{1, 1, 3380}}, {{1, 1, 2048}}, anira::InferenceBackend::LIBTORCH}, 21 | #endif 22 | #ifdef USE_ONNXRUNTIME 23 | {{{1, 1, 3380}}, {{1, 1, 2048}}, anira::InferenceBackend::ONNX}, 24 | #endif 25 | #ifdef USE_TFLITE 26 | {{{1, 3380, 1}}, {{1, 2048, 1}}, anira::InferenceBackend::TFLITE}, 27 | #endif 28 | }; 29 | 30 | static anira::ProcessingSpec processing_spec_medium_cnn_config = { 31 | {1}, // preprocess_input_channels 32 | {1}, // postprocess_output_channels 33 | {2048}, // preprocess_input_size 34 | {2048} // postprocess_output_size 35 | }; 36 | 37 | static anira::InferenceConfig medium_cnn_config ( 38 | model_data_medium_cnn_config, 39 | tensor_shape_medium_cnn_config, 40 | processing_spec_medium_cnn_config, 41 | 42.66f 42 | ); 43 | 44 | 45 | #endif //ANIRA_MEDIUM_CNNCONFIG_2048_H 46 | -------------------------------------------------------------------------------- /examples/benchmark/simple-benchmark/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.15) 2 | 3 | # Sets the minimum macOS version 4 | if (APPLE) 5 | set(CMAKE_OSX_DEPLOYMENT_TARGET "11.0" CACHE STRING "Minimum version of the target platform" FORCE) 6 | if(CMAKE_OSX_DEPLOYMENT_TARGET) 7 | message("The minimum macOS version is set to " $CACHE{CMAKE_OSX_DEPLOYMENT_TARGET}.) 8 | endif() 9 | endif () 10 | 11 | # ============================================================================== 12 | # Setup the project 13 | # ============================================================================== 14 | 15 | set (PROJECT_NAME simple-benchmark) 16 | 17 | project (${PROJECT_NAME} VERSION 0.0.1) 18 | 19 | # Sets the cpp language minimum 20 | set(CMAKE_CXX_STANDARD 20) 21 | set(CMAKE_CXX_STANDARD_REQUIRED True) 22 | 23 | # set(ANIRA_WITH_BENCHMARK ON) 24 | # add_subdirectory(anira) # set this to the path of the anira library if its a submodule of your repository 25 | # list(APPEND CMAKE_PREFIX_PATH "/path/to/anira") # Use this if you use the precompiled version of anira 26 | # find_package(anira REQUIRED) 27 | 28 | add_executable(${PROJECT_NAME}) 29 | 30 | target_sources(${PROJECT_NAME} PRIVATE 31 | defineSimpleBenchmark.cpp 32 | defineTestSimpleBenchmark.cpp 33 | ) 34 | 35 | target_link_libraries(${PROJECT_NAME} anira::anira) 36 | 37 | # gtest_discover_tests will register a CTest test for each gtest and run them all in parallel with the rest of the Test. 38 | gtest_discover_tests(${PROJECT_NAME} DISCOVERY_TIMEOUT 90) 39 | 40 | if (MSVC) 41 | foreach(DLL ${ANIRA_SHARED_LIBS_WIN}) 42 | add_custom_command(TARGET ${PROJECT_NAME} 43 | PRE_BUILD 44 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 45 | ${DLL} 46 | $) 47 | endforeach() 48 | endif (MSVC) 49 | -------------------------------------------------------------------------------- /examples/benchmark/advanced-benchmark/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.15) 2 | 3 | # Sets the minimum macOS version 4 | if (APPLE) 5 | set(CMAKE_OSX_DEPLOYMENT_TARGET "11.0" CACHE STRING "Minimum version of the target platform" FORCE) 6 | if(CMAKE_OSX_DEPLOYMENT_TARGET) 7 | message("The minimum macOS version is set to " $CACHE{CMAKE_OSX_DEPLOYMENT_TARGET}.) 8 | endif() 9 | endif () 10 | 11 | # ============================================================================== 12 | # Setup the project 13 | # ============================================================================== 14 | 15 | set (PROJECT_NAME advanced-benchmark) 16 | 17 | project (${PROJECT_NAME} VERSION 0.0.1) 18 | 19 | # Sets the cpp language minimum 20 | set(CMAKE_CXX_STANDARD 20) 21 | set(CMAKE_CXX_STANDARD_REQUIRED True) 22 | 23 | # set(ANIRA_WITH_BENCHMARK ON) 24 | # add_subdirectory(anira) # set this to the path of the anira library if its a submodule of your repository 25 | # list(APPEND CMAKE_PREFIX_PATH "/path/to/anira") # Use this if you use the precompiled version of anira 26 | # find_package(anira REQUIRED) 27 | 28 | add_executable(${PROJECT_NAME}) 29 | 30 | target_sources(${PROJECT_NAME} PRIVATE 31 | defineAdvancedBenchmark.cpp 32 | defineTestAdvancedBenchmark.cpp 33 | ) 34 | 35 | target_link_libraries(${PROJECT_NAME} anira::anira) 36 | 37 | # gtest_discover_tests will register a CTest test for each gtest and run them all in parallel with the rest of the Test. 38 | gtest_discover_tests(${PROJECT_NAME} DISCOVERY_TIMEOUT 90) 39 | 40 | if (MSVC) 41 | foreach(DLL ${ANIRA_SHARED_LIBS_WIN}) 42 | add_custom_command(TARGET ${PROJECT_NAME} 43 | PRE_BUILD 44 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 45 | ${DLL} 46 | $) 47 | endforeach() 48 | endif (MSVC) 49 | -------------------------------------------------------------------------------- /examples/benchmark/cnn-size-benchmark/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.15) 2 | 3 | # Sets the minimum macOS version 4 | if (APPLE) 5 | set(CMAKE_OSX_DEPLOYMENT_TARGET "11.0" CACHE STRING "Minimum version of the target platform" FORCE) 6 | if(CMAKE_OSX_DEPLOYMENT_TARGET) 7 | message("The minimum macOS version is set to " $CACHE{CMAKE_OSX_DEPLOYMENT_TARGET}.) 8 | endif() 9 | endif () 10 | 11 | # ============================================================================== 12 | # Setup the project 13 | # ============================================================================== 14 | 15 | set (PROJECT_NAME cnn-size-benchmark) 16 | 17 | project (${PROJECT_NAME} VERSION 0.0.1) 18 | 19 | # Sets the cpp language minimum 20 | set(CMAKE_CXX_STANDARD 20) 21 | set(CMAKE_CXX_STANDARD_REQUIRED True) 22 | 23 | # set(ANIRA_WITH_BENCHMARK ON) 24 | # add_subdirectory(anira) # set this to the path of the anira library if its a submodule of your repository 25 | # list(APPEND CMAKE_PREFIX_PATH "/path/to/anira") # Use this if you use the precompiled version of anira 26 | # find_package(anira REQUIRED) 27 | 28 | add_executable(${PROJECT_NAME}) 29 | 30 | target_sources(${PROJECT_NAME} PRIVATE 31 | defineCNNSizeBenchmark.cpp 32 | defineTestCNNSizeBenchmark.cpp 33 | ) 34 | 35 | target_link_libraries(${PROJECT_NAME} anira::anira) 36 | 37 | # gtest_discover_tests will register a CTest test for each gtest and run them all in parallel with the rest of the Test. 38 | gtest_discover_tests(${PROJECT_NAME} DISCOVERY_TIMEOUT 90) 39 | 40 | if (MSVC) 41 | foreach(DLL ${ANIRA_SHARED_LIBS_WIN}) 42 | add_custom_command(TARGET ${PROJECT_NAME} 43 | PRE_BUILD 44 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 45 | ${DLL} 46 | $) 47 | endforeach() 48 | endif (MSVC) 49 | -------------------------------------------------------------------------------- /.github/workflows/build_sanitizer.yml: -------------------------------------------------------------------------------- 1 | name: build_sanitizer 2 | 3 | on: 4 | workflow_call: 5 | workflow_dispatch: # lets you run a build from github.com 6 | # Runs the workflow on all push events 7 | push: 8 | branches: 9 | - main 10 | pull_request: 11 | branches: 12 | - main 13 | 14 | env: 15 | SCCACHE_GHA_ENABLED: "true" 16 | 17 | # When pushing new commits, cancel any workflows with the same name on that branch 18 | concurrency: 19 | group: ${{ github.workflow }}-${{ github.ref }} 20 | cancel-in-progress: true 21 | 22 | jobs: 23 | build_and_test_radsan: 24 | if: false # Skip this job for now 25 | name: Check real-time safety with RADSan 26 | strategy: 27 | matrix: 28 | name: [Linux-x86_64] 29 | os: [ubuntu-latest] 30 | 31 | runs-on: ${{ matrix.os }} 32 | 33 | container: 34 | image: realtimesanitizer/radsan-clang:latest 35 | 36 | steps: 37 | - name: Install required tools 38 | run: apt-get update && apt-get install -y git cmake ninja-build sudo 39 | - name: get repo and submodules 40 | uses: actions/checkout@v4 41 | with: 42 | submodules: true 43 | fetch-depth: 0 44 | fetch-tags: true 45 | - name: Allow safe Git directory 46 | run: git config --global --add safe.directory "$GITHUB_WORKSPACE" 47 | 48 | - name: Verify workspace 49 | run: | 50 | echo "Current directory: $(pwd)" 51 | ls -la 52 | - name: setup 53 | uses: ./.github/actions/setup 54 | - name: build 55 | uses: ./.github/actions/build 56 | with: 57 | BUILD_TYPE: Release 58 | CMAKE_BUILD_PARALLEL_LEVEL: 4 59 | CMAKE_BUILD_ARGS: "-DBUILD_SHARED_LIBS=ON -DANIRA_WITH_TESTS=ON -DANIRA_WITH_RADSAN=ON" 60 | - name: test 61 | uses: ./.github/actions/test 62 | -------------------------------------------------------------------------------- /.github/workflows/build_docs_and_deploy.yml: -------------------------------------------------------------------------------- 1 | name: build_docs_and_deploy 2 | 3 | on: 4 | workflow_call: 5 | workflow_dispatch: # lets you run a build from github.com 6 | # Runs the workflow on all push events 7 | push: 8 | branches: 9 | - main 10 | pull_request: 11 | branches: 12 | - main 13 | 14 | env: 15 | SCCACHE_GHA_ENABLED: "true" 16 | 17 | # When pushing new commits, cancel any workflows with the same name on that branch 18 | concurrency: 19 | group: ${{ github.workflow }}-${{ github.ref }} 20 | cancel-in-progress: true 21 | 22 | jobs: 23 | build_docs: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - name: get repo and submodules 27 | uses: actions/checkout@v4 28 | with: 29 | submodules: true 30 | fetch-depth: 0 31 | - name: setup 32 | uses: ./.github/actions/setup 33 | - name: dependencies 34 | run: | 35 | sudo apt-get install -y doxygen graphviz 36 | - name: build 37 | uses: ./.github/actions/build 38 | with: 39 | BUILD_TYPE: Release 40 | CMAKE_BUILD_PARALLEL_LEVEL: 4 41 | CMAKE_BUILD_ARGS: "-DBUILD_SHARED_LIBS=ON -DANIRA_WITH_DOCS=ON" 42 | TARGETS: "sphinx-docs" 43 | - name: upload docs 44 | uses: actions/upload-pages-artifact@v3 # or specific "vX.X.X" version tag for this action 45 | with: 46 | path: build/docs/sphinx/html 47 | 48 | # Deployment job 49 | deploy: 50 | needs: build_docs 51 | if: github.ref == 'refs/heads/main' && github.event_name != 'pull_request' 52 | permissions: 53 | pages: write 54 | id-token: write 55 | environment: 56 | name: github-pages 57 | url: ${{ steps.deployment.outputs.page_url }} 58 | runs-on: ubuntu-latest 59 | steps: 60 | - name: Deploy to GitHub Pages 61 | id: deployment 62 | uses: actions/deploy-pages@v4 -------------------------------------------------------------------------------- /examples/clap-audio-plugin/anira-clap-demo-pluginentry.cpp: -------------------------------------------------------------------------------- 1 | #include "anira-clap-demo.h" 2 | #include 3 | #include 4 | #include 5 | 6 | namespace clap_plugin_example::pluginentry 7 | { 8 | 9 | uint32_t clap_get_plugin_count(const clap_plugin_factory *f) { return 1; } 10 | const clap_plugin_descriptor *clap_get_plugin_descriptor(const clap_plugin_factory *f, uint32_t w) 11 | { 12 | return &AniraClapPluginExample::m_desc; 13 | } 14 | 15 | static const clap_plugin *clap_create_plugin(const clap_plugin_factory *f, const clap_host *host, 16 | const char *plugin_id) 17 | { 18 | if (strcmp(plugin_id, AniraClapPluginExample::m_desc.id)) 19 | { 20 | std::cout << "Warning: CLAP asked for plugin_id '" << plugin_id 21 | << "' and clap-saw-demo ID is '" << AniraClapPluginExample::m_desc.id << "'" << std::endl; 22 | return nullptr; 23 | } 24 | 25 | auto p = new AniraClapPluginExample(host); 26 | return p->clapPlugin(); 27 | } 28 | 29 | const CLAP_EXPORT struct clap_plugin_factory clap_saw_demo_factory = { 30 | clap_plugin_example::pluginentry::clap_get_plugin_count, 31 | clap_plugin_example::pluginentry::clap_get_plugin_descriptor, 32 | clap_plugin_example::pluginentry::clap_create_plugin, 33 | }; 34 | static const void *get_factory(const char *factory_id) 35 | { 36 | return (!strcmp(factory_id, CLAP_PLUGIN_FACTORY_ID)) ? &clap_saw_demo_factory : nullptr; 37 | } 38 | 39 | bool clap_init(const char *p) { return true; } 40 | void clap_deinit() {} 41 | 42 | } // namespace clap_plugin_example::pluginentry 43 | 44 | extern "C" 45 | { 46 | // clang-format off 47 | const CLAP_EXPORT struct clap_plugin_entry clap_entry = { 48 | CLAP_VERSION, 49 | clap_plugin_example::pluginentry::clap_init, 50 | clap_plugin_example::pluginentry::clap_deinit, 51 | clap_plugin_example::pluginentry::get_factory 52 | }; 53 | // clang-format on 54 | } 55 | -------------------------------------------------------------------------------- /test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(PROJECT_NAME tests) 2 | project (${PROJECT_NAME} VERSION ${PROJECT_VERSION}) 3 | 4 | 5 | # Sets the cpp language minimum 6 | set(CMAKE_CXX_STANDARD 20) 7 | set(CMAKE_CXX_STANDARD_REQUIRED True) 8 | 9 | 10 | add_compile_definitions( 11 | GUITARLSTM_MODELS_PATH_TENSORFLOW="${GUITARLSTM_MODELS_PATH_TENSORFLOW}" 12 | GUITARLSTM_MODELS_PATH_PYTORCH="${GUITARLSTM_MODELS_PATH_PYTORCH}" 13 | STEERABLENAFX_MODELS_PATH_TENSORFLOW="${STEERABLENAFX_MODELS_PATH_TENSORFLOW}" 14 | STEERABLENAFX_MODELS_PATH_PYTORCH="${STEERABLENAFX_MODELS_PATH_PYTORCH}" 15 | STATEFULLSTM_MODELS_PATH_TENSORFLOW="${STATEFULLSTM_MODELS_PATH_TENSORFLOW}" 16 | STATEFULLSTM_MODELS_PATH_PYTORCH="${STATEFULLSTM_MODELS_PATH_PYTORCH}" 17 | RAVE_MODEL_FUNK_DRUM_JSON_CONFIG_PATH="${RAVE_MODEL_FUNK_DRUM_JSON_CONFIG_PATH}" 18 | RAVE_MODEL_FUNK_DRUM_ENCODER_JSON_CONFIG_PATH="${RAVE_MODEL_FUNK_DRUM_ENCODER_JSON_CONFIG_PATH}" 19 | RAVE_MODEL_FUNK_DRUM_DECODER_JSON_CONFIG_PATH="${RAVE_MODEL_FUNK_DRUM_DECODER_JSON_CONFIG_PATH}" 20 | SIMPLE_GAIN_JSON_CONFIG_PATH="${SIMPLE_GAIN_JSON_CONFIG_PATH}" 21 | SIMPLEGAIN_MODEL_PATH="${SIMPLEGAIN_MODEL_PATH}" 22 | RAVE_MODEL_DIR="${RAVE_MODEL_DIR}" 23 | ) 24 | 25 | add_executable(${PROJECT_NAME}) 26 | 27 | target_sources(${PROJECT_NAME} PRIVATE 28 | test_InferenceHandler.cpp 29 | utils/test_Buffer.cpp 30 | utils/test_RingBuffer.cpp 31 | utils/test_JsonConfigLoader.cpp 32 | scheduler/test_InferenceManager.cpp 33 | scheduler/test_SessionElement.cpp 34 | test_WavReader.cpp 35 | ) 36 | 37 | target_link_libraries(${PROJECT_NAME} anira::anira) 38 | 39 | # gtest_discover_tests will register a CTest test for each gtest and run them all in parallel with the rest of the Test. 40 | gtest_discover_tests(${PROJECT_NAME} DISCOVERY_TIMEOUT 90) 41 | 42 | if (MSVC) 43 | add_custom_command(TARGET ${PROJECT_NAME} 44 | PRE_BUILD 45 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 46 | ${ANIRA_SHARED_LIBS_WIN} 47 | $ 48 | ) 49 | endif (MSVC) -------------------------------------------------------------------------------- /.github/workflows/on_tag.yml: -------------------------------------------------------------------------------- 1 | name: on_tag 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | tags: 7 | - 'v*.*.*' 8 | 9 | env: 10 | SCCACHE_GHA_ENABLED: "true" 11 | 12 | jobs: 13 | build_test: 14 | uses: ./.github/workflows/build_test.yml 15 | secrets: inherit 16 | build_release: 17 | needs: build_test 18 | name: ${{ matrix.name }} 19 | strategy: 20 | fail-fast: false # show all errors for each platform (vs. cancel jobs on error) 21 | matrix: 22 | include: 23 | - name: Linux-x86_64 24 | os: ubuntu-latest 25 | - name: macOS-x86_64 26 | os: macOS-latest 27 | - name: macOS-arm64 28 | os: macOS-latest 29 | - name: Windows-x86_64 30 | os: windows-latest 31 | runs-on: ${{ matrix.os }} 32 | steps: 33 | - name: get repo and submodules 34 | uses: actions/checkout@v4 35 | with: 36 | submodules: true 37 | fetch-depth: 0 38 | - name: setup 39 | uses: ./.github/actions/setup 40 | - name: build 41 | uses: ./.github/actions/build 42 | with: 43 | BUILD_TYPE: Release 44 | CMAKE_BUILD_PARALLEL_LEVEL: 4 45 | CMAKE_BUILD_ARGS: "-DBUILD_SHARED_LIBS=ON -DANIRA_WITH_INSTALL=ON" 46 | - name: install 47 | id: install 48 | uses: ./.github/actions/install 49 | with: 50 | BUILD_TYPE: Release 51 | PROJECT_NAME: anira 52 | DEV_ID_APP_CERT: ${{ secrets.DEV_ID_APP_CERT }} 53 | DEV_ID_APP_PWD: ${{ secrets.DEV_ID_APP_PWD }} 54 | DEV_ID_APP: ${{ secrets.DEV_ID_APP }} 55 | release: 56 | if: startsWith(github.ref, 'refs/tags/') 57 | runs-on: ubuntu-latest 58 | needs: 59 | - build_release 60 | steps: 61 | - name: Get Artifacts 62 | uses: actions/download-artifact@v4 63 | - name: Create Release 64 | uses: softprops/action-gh-release@v2 65 | with: 66 | files: | 67 | */*.zip 68 | 69 | -------------------------------------------------------------------------------- /.github/workflows/build_test.yml: -------------------------------------------------------------------------------- 1 | name: build_test 2 | 3 | on: 4 | workflow_call: 5 | workflow_dispatch: # lets you run a build from github.com 6 | # Runs the workflow on all push events 7 | push: 8 | branches: 9 | - main 10 | pull_request: 11 | branches: 12 | - main 13 | 14 | env: 15 | SCCACHE_GHA_ENABLED: "true" 16 | 17 | # When pushing new commits, cancel any workflows with the same name on that branch 18 | concurrency: 19 | group: ${{ github.workflow }}-${{ github.ref }} 20 | cancel-in-progress: true 21 | 22 | jobs: 23 | build_test: 24 | name: ${{ matrix.name }} 25 | strategy: 26 | fail-fast: false # show all errors for each platform (vs. cancel jobs on error) 27 | matrix: 28 | include: 29 | - name: Linux-x86_64 30 | os: ubuntu-latest 31 | inference_engines: ON 32 | - name: Linux-x86_64-no_inference_engines 33 | os: ubuntu-latest 34 | inference_engines: OFF 35 | - name: macOS-x86_64 36 | os: macOS-latest 37 | inference_engines: ON 38 | - name: macOS-arm64 39 | os: macOS-latest 40 | inference_engines: ON 41 | - name: Windows-x86_64 42 | os: windows-latest 43 | inference_engines: ON 44 | 45 | runs-on: ${{ matrix.os }} 46 | steps: 47 | - name: get repo and submodules 48 | uses: actions/checkout@v4 49 | with: 50 | submodules: true 51 | fetch-depth: 0 52 | - name: setup 53 | uses: ./.github/actions/setup 54 | - name: build 55 | uses: ./.github/actions/build 56 | with: 57 | BUILD_TYPE: Release 58 | CMAKE_BUILD_PARALLEL_LEVEL: 4 59 | CMAKE_BUILD_ARGS: "-DBUILD_SHARED_LIBS=ON -DANIRA_WITH_TESTS=ON -DANIRA_WITH_LIBTORCH=${{ matrix.inference_engines }} -DANIRA_WITH_ONNXRUNTIME=${{ matrix.inference_engines }} -DANIRA_WITH_TFLITE=${{ matrix.inference_engines }}" 60 | - name: test 61 | uses: ./.github/actions/test 62 | -------------------------------------------------------------------------------- /.github/workflows/build_examples.yml: -------------------------------------------------------------------------------- 1 | name: build_examples 2 | 3 | on: 4 | workflow_call: 5 | workflow_dispatch: # lets you run a build from github.com 6 | # Runs the workflow on all push events 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | env: 12 | SCCACHE_GHA_ENABLED: "true" 13 | 14 | # When pushing new commits, cancel any workflows with the same name on that branch 15 | concurrency: 16 | group: ${{ github.workflow }}-${{ github.ref }} 17 | cancel-in-progress: true 18 | 19 | jobs: 20 | build_test: 21 | name: ${{ matrix.name }} 22 | strategy: 23 | fail-fast: false # show all errors for each platform (vs. cancel jobs on error) 24 | matrix: 25 | include: 26 | - name: Linux-x86_64 27 | os: ubuntu-latest 28 | - name: macOS-x86_64 29 | os: macOS-latest 30 | - name: macOS-arm64 31 | os: macOS-latest 32 | - name: Windows-x86_64 33 | os: windows-latest 34 | 35 | runs-on: ${{ matrix.os }} 36 | steps: 37 | - name: get repo and submodules 38 | uses: actions/checkout@v4 39 | with: 40 | submodules: true 41 | fetch-depth: 0 42 | - name: setup 43 | uses: ./.github/actions/setup 44 | - name: add juce deps 45 | shell: bash 46 | run: | 47 | if [ "${{ matrix.name }}" == "Linux-x86_64" ]; then 48 | sudo apt install libxrandr-dev libxinerama-dev libxcursor-dev libfreetype6-dev libasound2-dev libfontconfig1-dev libjack-jackd2-dev 49 | # Fix the juce bug with ft2build.h not found 50 | sudo ln -s /usr/include/freetype2/ft2build.h /usr/include/ft2build.h 51 | sudo ln -s /usr/include/freetype2/freetype /usr/include/freetype 52 | fi 53 | - name: build 54 | uses: ./.github/actions/build 55 | with: 56 | BUILD_TYPE: Release 57 | CMAKE_BUILD_PARALLEL_LEVEL: 4 58 | TARGETS: (anira-clap-plugin-example anira-juce-plugin-example_All minimal-libtorch minimal-onnxruntime minimal-tflite) 59 | CMAKE_BUILD_ARGS: "-DBUILD_SHARED_LIBS=ON -DANIRA_WITH_EXAMPLES=ON" 60 | -------------------------------------------------------------------------------- /.github/workflows/build_benchmark.yml: -------------------------------------------------------------------------------- 1 | name: build_benchmark 2 | 3 | on: 4 | workflow_call: 5 | workflow_dispatch: # lets you run a build from github.com 6 | # Runs the workflow on all push events 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | env: 12 | SCCACHE_GHA_ENABLED: "true" 13 | 14 | # When pushing new commits, cancel any workflows with the same name on that branch 15 | concurrency: 16 | group: ${{ github.workflow }}-${{ github.ref }} 17 | cancel-in-progress: true 18 | 19 | jobs: 20 | build_test: 21 | name: ${{ matrix.name }} 22 | strategy: 23 | fail-fast: false # show all errors for each platform (vs. cancel jobs on error) 24 | matrix: 25 | include: 26 | - name: Linux-x86_64 27 | os: ubuntu-latest 28 | - name: macOS-x86_64 29 | os: macOS-latest 30 | - name: macOS-arm64 31 | os: macOS-latest 32 | - name: Windows-x86_64 33 | os: windows-latest 34 | 35 | runs-on: ${{ matrix.os }} 36 | steps: 37 | - name: get repo and submodules 38 | uses: actions/checkout@v4 39 | with: 40 | submodules: true 41 | fetch-depth: 0 42 | - name: setup 43 | uses: ./.github/actions/setup 44 | - name: add juce deps 45 | shell: bash 46 | run: | 47 | if [ "${{ matrix.name }}" == "Linux-x86_64" ]; then 48 | sudo apt install libxrandr-dev libxinerama-dev libxcursor-dev libfreetype6-dev libasound2-dev libfontconfig1-dev 49 | # Fix the juce bug with ft2build.h not found 50 | sudo ln -s /usr/include/freetype2/ft2build.h /usr/include/ft2build.h 51 | sudo ln -s /usr/include/freetype2/freetype /usr/include/freetype 52 | fi 53 | 54 | - name: build 55 | uses: ./.github/actions/build 56 | with: 57 | BUILD_TYPE: Release 58 | CMAKE_BUILD_PARALLEL_LEVEL: 4 59 | TARGETS: (cnn-size-benchmark advanced-benchmark simple-benchmark) 60 | CMAKE_BUILD_ARGS: "-DBUILD_SHARED_LIBS=ON -DANIRA_WITH_BENCHMARK=ON -DANIRA_WITH_EXAMPLES=ON" 61 | - name: test 62 | uses: ./.github/actions/test 63 | -------------------------------------------------------------------------------- /cmake/benchmark-src.cmake: -------------------------------------------------------------------------------- 1 | # ============================================================================== 2 | # Sources related to the benchmarking options 3 | # ============================================================================== 4 | 5 | target_sources(${PROJECT_NAME} 6 | PRIVATE 7 | # TODO: find out why we need to add the header files here, so that they can find the and files 8 | include/anira/benchmark/ProcessBlockFixture.h 9 | src/benchmark/ProcessBlockFixture.cpp 10 | ) 11 | 12 | # This disables the default behavior of adding all targets to the CTest dashboard. 13 | set_property(GLOBAL PROPERTY CTEST_TARGETS_ADDED 1) 14 | 15 | include(FetchContent) 16 | 17 | FetchContent_Declare(benchmark 18 | GIT_REPOSITORY https://github.com/google/benchmark.git 19 | GIT_PROGRESS TRUE 20 | GIT_SHALLOW TRUE 21 | GIT_TAG v1.8.3) 22 | 23 | # For benchmark we want to set the BENCMARK_ENABLE_TESTING to OFF therefore we cannot use FetchContent_MakeAvailable() 24 | # Check if population has already been performed 25 | FetchContent_GetProperties(benchmark) 26 | if(NOT benchmark_POPULATED) 27 | # Fetch the content using previously declared details 28 | FetchContent_Populate(benchmark) 29 | 30 | # Set custom variables, policies, etc. 31 | set(BENCHMARK_ENABLE_TESTING OFF) 32 | set(BENCHMARK_ENABLE_GTEST_TESTS OFF) 33 | 34 | if (APPLE AND (CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")) 35 | set(HAVE_STD_REGEX ON) 36 | set(RUN_HAVE_STD_REGEX 1) 37 | endif() 38 | 39 | # Bring the populated content into the build 40 | add_subdirectory(${benchmark_SOURCE_DIR} ${benchmark_BINARY_DIR}) 41 | 42 | # Supress warnings by making include directories system directories 43 | get_property(BENCHMARK_INCLUDE_DIRS TARGET benchmark PROPERTY INTERFACE_INCLUDE_DIRECTORIES) 44 | target_include_directories(benchmark SYSTEM INTERFACE ${BENCHMARK_INCLUDE_DIRS}) 45 | endif() 46 | 47 | # enable position independent code because otherwise the library cannot be linked into a shared library 48 | set_target_properties(benchmark PROPERTIES POSITION_INDEPENDENT_CODE ON) 49 | target_link_libraries(${PROJECT_NAME} PUBLIC benchmark) 50 | -------------------------------------------------------------------------------- /examples/clap-audio-plugin/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.15) 2 | 3 | set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") 4 | set(CMAKE_OSX_DEPLOYMENT_TARGET 10.11 CACHE STRING "Build for 10.1") 5 | set(CMAKE_POSITION_INDEPENDENT_CODE ON) 6 | 7 | if(NOT CMAKE_BUILD_TYPE) 8 | message(FATAL_ERROR "You need to specify CMAKE_BUILD_TYPE") 9 | endif() 10 | 11 | project(anira-clap-plugin-example VERSION ${PROJECT_VERSION} LANGUAGES C CXX) 12 | 13 | FetchContent_Declare(clap 14 | GIT_REPOSITORY https://github.com/free-audio/clap.git 15 | GIT_TAG main 16 | GIT_PROGRESS TRUE 17 | EXCLUDE_FROM_ALL 18 | ) 19 | FetchContent_MakeAvailable(clap) 20 | 21 | FetchContent_Declare(clap-helpers 22 | GIT_REPOSITORY https://github.com/free-audio/clap-helpers.git 23 | GIT_TAG main 24 | GIT_PROGRESS TRUE 25 | EXCLUDE_FROM_ALL 26 | ) 27 | FetchContent_MakeAvailable(clap-helpers) 28 | 29 | add_library(${PROJECT_NAME} MODULE 30 | anira-clap-demo.cpp 31 | anira-clap-demo-pluginentry.cpp 32 | utils/DryWetMixer.cpp 33 | ) 34 | target_link_libraries(${PROJECT_NAME} clap-core clap-helpers anira) 35 | 36 | if(APPLE) 37 | set_target_properties(${PROJECT_NAME} PROPERTIES 38 | BUNDLE True 39 | BUNDLE_EXTENSION clap 40 | MACOSX_BUNDLE_GUI_IDENTIFIER org.anira-project.${PROJECT_NAME} 41 | MACOSX_BUNDLE_BUNDLE_NAME ${PROJECT_NAME} 42 | MACOSX_BUNDLE_BUNDLE_VERSION "0.1" 43 | MACOSX_BUNDLE_SHORT_VERSION_STRING "0.1" 44 | MACOSX_BUNDLE_INFO_PLIST ${CMAKE_CURRENT_LIST_DIR}/cmake/anira-clap-demo.plist.in 45 | ) 46 | target_link_libraries(${PROJECT_NAME} "-framework CoreFoundation" "-framework AppKit" "-framework CoreGraphics") 47 | target_compile_definitions(${PROJECT_NAME} PRIVATE IS_MAC=1) 48 | elseif(UNIX) 49 | target_compile_definitions(${PROJECT_NAME} PRIVATE IS_LINUX=1) 50 | set_target_properties(${PROJECT_NAME} PROPERTIES SUFFIX ".clap" PREFIX "") 51 | else() 52 | target_compile_definitions(${PROJECT_NAME} PRIVATE IS_WIN=1) 53 | set_target_properties(${PROJECT_NAME} PROPERTIES SUFFIX ".clap" PREFIX "") 54 | endif() 55 | -------------------------------------------------------------------------------- /include/anira/utils/JsonConfigLoader.h: -------------------------------------------------------------------------------- 1 | #ifndef JSONCONFIGLOADER_H 2 | #define JSONCONFIGLOADER_H 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | 10 | namespace anira { 11 | 12 | class ANIRA_API JsonConfigLoader { 13 | public: 14 | JsonConfigLoader(const std::string& file_path); 15 | JsonConfigLoader(std::istream& stream); 16 | 17 | std::unique_ptr get_context_config(); 18 | std::unique_ptr get_inference_config(); 19 | 20 | private: 21 | struct SingleParameterStruct { 22 | bool m_max_inference_time_set = false; 23 | float m_max_inference_time = 0.f; 24 | unsigned int m_warm_up = anira::InferenceConfig::Defaults::m_warm_up; 25 | bool m_session_exclusive_processor = anira::InferenceConfig::Defaults::m_session_exclusive_processor; 26 | float m_blocking_ratio = anira::InferenceConfig::Defaults::m_blocking_ratio; 27 | unsigned int m_num_parallel_processors = anira::InferenceConfig::Defaults::m_num_parallel_processors; 28 | }; 29 | 30 | void initialize_from_stream(std::istream& stream); 31 | 32 | void parse(const nlohmann::json& config); 33 | void parse_context_config(const nlohmann::json& config); 34 | void parse_inference_config(const nlohmann::json& config); 35 | 36 | static std::vector create_model_data_from_config(const nlohmann::basic_json<>& config); 37 | static std::vector create_tensor_shape_from_config(const nlohmann::basic_json<>& config); 38 | static anira::TensorShapeList parse_tensor_json_shape(const nlohmann::json& shape_node); 39 | static anira::ProcessingSpec create_processing_spec_from_config(const nlohmann::basic_json<>& config, bool& config_required); 40 | static std::vector parse_size_t_json_shape(const nlohmann::json& shape_node, std::string json_key_name); 41 | static SingleParameterStruct create_single_parameters_from_config(const nlohmann::basic_json<>& config, bool& necessary_parameter_set); 42 | 43 | std::unique_ptr m_context_config; 44 | std::unique_ptr m_inference_config; 45 | }; 46 | } // namespace anira 47 | 48 | #endif //JSONCONFIGLOADER_H 49 | -------------------------------------------------------------------------------- /extras/models/hybrid-nn/HybridNNPrePostProcessor.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_HYBRIDNNPREPOSTPROCESSOR_H 2 | #define ANIRA_HYBRIDNNPREPOSTPROCESSOR_H 3 | 4 | #include "HybridNNConfig.h" 5 | #include 6 | 7 | class HybridNNPrePostProcessor : public anira::PrePostProcessor 8 | { 9 | public: 10 | using anira::PrePostProcessor::PrePostProcessor; 11 | 12 | virtual void pre_process(std::vector& input, std::vector& output, [[maybe_unused]] anira::InferenceBackend current_inference_backend) override { 13 | int64_t num_batches = 0; 14 | int64_t num_input_samples = 0; 15 | int64_t num_output_samples = 0; 16 | 17 | #ifdef USE_TFLITE 18 | if (current_inference_backend == anira::InferenceBackend::TFLITE) { 19 | num_batches = m_inference_config.get_tensor_input_shape(anira::InferenceBackend::TFLITE)[0][0]; 20 | num_input_samples = m_inference_config.get_tensor_input_shape(anira::InferenceBackend::TFLITE)[0][1]; 21 | num_output_samples = m_inference_config.get_tensor_output_shape(anira::InferenceBackend::TFLITE)[0][1]; 22 | } 23 | else { 24 | #endif 25 | num_batches = m_inference_config.get_tensor_input_shape()[0][0]; 26 | num_input_samples = m_inference_config.get_tensor_input_shape()[0][2]; 27 | num_output_samples = m_inference_config.get_tensor_output_shape()[0][1]; 28 | #ifdef USE_TFLITE 29 | } 30 | #endif 31 | if ( 32 | #ifdef USE_LIBTORCH 33 | current_inference_backend != anira::InferenceBackend::LIBTORCH && 34 | #endif 35 | #ifdef USE_ONNXRUNTIME 36 | current_inference_backend != anira::InferenceBackend::ONNX && 37 | #endif 38 | #ifdef USE_TFLITE 39 | current_inference_backend != anira::InferenceBackend::TFLITE && 40 | #endif 41 | current_inference_backend != anira::InferenceBackend::CUSTOM) { 42 | throw std::runtime_error("Invalid inference backend"); 43 | } 44 | 45 | for (size_t batch = 0; batch < (size_t) num_batches; batch++) { 46 | size_t base_index = batch * (size_t) num_input_samples; 47 | pop_samples_from_buffer(input[0], output[0], (size_t) num_output_samples, (size_t) (num_input_samples-num_output_samples), base_index); 48 | } 49 | } 50 | }; 51 | 52 | #endif //ANIRA_HYBRIDNNPREPOSTPROCESSOR_H 53 | -------------------------------------------------------------------------------- /examples/juce-audio-plugin/install.cmake: -------------------------------------------------------------------------------- 1 | # ============================================================================== 2 | # Install the library 3 | # ============================================================================== 4 | 5 | # for CMAKE_INSTALL_INCLUDEDIR and others definition 6 | include(GNUInstallDirs) 7 | 8 | # define the dircetory where the library will be installed CMAKE_INSTALL_PREFIX 9 | set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}-${PROJECT_VERSION}" CACHE PATH "Where the library will be installed to" FORCE) 10 | 11 | # ============================================================================== 12 | # Install specific compile definition 13 | # ============================================================================== 14 | 15 | if(ANIRA_WITH_INSTALL) 16 | target_compile_definitions(${TARGET_NAME} 17 | PUBLIC 18 | INSTALL_VERSION 19 | ) 20 | endif() 21 | 22 | set(INSTALL_TARGETS ${TARGET_NAME}_Standalone ${TARGET_NAME}_VST3) 23 | 24 | # at install the rpath is cleared by default so we have to set it again for the installed shared library to find the other libraries 25 | # in this case we set the rpath to the directories where the other libraries are installed 26 | # $ORIGIN in Linux is a special token that gets replaced by the directory of the library at runtime from that point we could navigate to the other libraries 27 | # The same token for macOS is @loader_path 28 | if(UNIX AND NOT APPLE) 29 | foreach(TARGET ${INSTALL_TARGETS}) 30 | set_target_properties(${TARGET} 31 | PROPERTIES 32 | INSTALL_RPATH "$ORIGIN/../lib" 33 | ) 34 | endforeach() 35 | elseif(APPLE) 36 | set(OSX_RPATHS "@loader_path/../lib;@loader_path/../../../../lib;@loader_path/../../../") 37 | list(APPEND INSTALL_TARGETS ${TARGET_NAME}_AU) 38 | if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") 39 | list(APPEND OSX_RPATHS "/opt/intel/oneapi/mkl/latest/lib") 40 | endif() 41 | foreach(TARGET ${INSTALL_TARGETS}) 42 | set_target_properties(${TARGET} 43 | PROPERTIES 44 | INSTALL_RPATH "${OSX_RPATHS}" 45 | ) 46 | endforeach() 47 | endif() 48 | 49 | # install the target and create export-set 50 | install(TARGETS ${INSTALL_TARGETS} 51 | # these get default values from GNUInstallDirs 52 | RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} 53 | LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} 54 | BUNDLE DESTINATION ${CMAKE_INSTALL_BINDIR} 55 | ) 56 | -------------------------------------------------------------------------------- /cmake/msvc-support.cmake: -------------------------------------------------------------------------------- 1 | # ============================================================================== 2 | # Windows specific settings 3 | # ============================================================================== 4 | 5 | # Define the export symbol for MSVC builds (shared library) 6 | target_compile_definitions(${PROJECT_NAME} PRIVATE ANIRA_EXPORTS) 7 | 8 | if(NOT CMAKE_BUILD_TYPE) 9 | message(FATAL_ERROR "You need to specify CMAKE_BUILD_TYPE") 10 | endif() 11 | 12 | if(CMAKE_GENERATOR MATCHES "Visual Studio") 13 | set(ANIRA_DLL "${anira_BINARY_DIR}/${CMAKE_BUILD_TYPE}/anira.dll") 14 | else() 15 | set(ANIRA_DLL "${anira_BINARY_DIR}/anira.dll") 16 | endif() 17 | 18 | list(APPEND ANIRA_SHARED_LIBS_WIN ${ANIRA_DLL}) 19 | 20 | # Add all necessary DLLs to a list for later copying 21 | # Backend DLLs 22 | if(ANIRA_WITH_ONNXRUNTIME) 23 | file(GLOB_RECURSE INFERENCE_ENGINE_DLLS_ONNX "${ANIRA_ONNXRUNTIME_SHARED_LIB_PATH}/*.dll") 24 | list(APPEND ANIRA_SHARED_LIBS_WIN ${INFERENCE_ENGINE_DLLS_ONNX}) 25 | endif(ANIRA_WITH_ONNXRUNTIME) 26 | if (ANIRA_WITH_TFLITE) 27 | file(GLOB_RECURSE INFERENCE_ENGINE_DLLS_TFLITE "${ANIRA_TENSORFLOWLITE_SHARED_LIB_PATH}/*.dll") 28 | list(APPEND ANIRA_SHARED_LIBS_WIN ${INFERENCE_ENGINE_DLLS_TFLITE}) 29 | endif(ANIRA_WITH_TFLITE) 30 | if (ANIRA_WITH_LIBTORCH) 31 | file(GLOB_RECURSE INFERENCE_ENGINE_DLLS_LIBTORCH "${ANIRA_LIBTORCH_SHARED_LIB_PATH}*.dll") 32 | list(APPEND ANIRA_SHARED_LIBS_WIN ${INFERENCE_ENGINE_DLLS_LIBTORCH}) 33 | endif(ANIRA_WITH_LIBTORCH) 34 | 35 | # Google Benchmark and Google Test DLLs 36 | if (ANIRA_WITH_TESTS OR ANIRA_WITH_BENCHMARK) 37 | if(CMAKE_GENERATOR MATCHES "Visual Studio") 38 | list(APPEND ANIRA_SHARED_LIBS_WIN "${CMAKE_BINARY_DIR}/bin/${CMAKE_BUILD_TYPE}/gtest.dll") 39 | list(APPEND ANIRA_SHARED_LIBS_WIN "${CMAKE_BINARY_DIR}/bin/${CMAKE_BUILD_TYPE}/gtest_main.dll") 40 | else() 41 | list(APPEND ANIRA_SHARED_LIBS_WIN "${CMAKE_BINARY_DIR}/bin/gtest.dll") 42 | list(APPEND ANIRA_SHARED_LIBS_WIN "${CMAKE_BINARY_DIR}/bin/gtest_main.dll") 43 | endif() 44 | endif() 45 | 46 | if (ANIRA_WITH_BENCHMARK) 47 | if(CMAKE_GENERATOR MATCHES "Visual Studio") 48 | list(APPEND ANIRA_SHARED_LIBS_WIN "${CMAKE_BINARY_DIR}/_deps/benchmark-build/src/${CMAKE_BUILD_TYPE}/benchmark.dll") 49 | else() 50 | list(APPEND ANIRA_SHARED_LIBS_WIN "${CMAKE_BINARY_DIR}/_deps/benchmark-build/src/benchmark.dll") 51 | endif() 52 | endif() 53 | 54 | # Make a list of all necessary DLLs for the project 55 | get_directory_property(hasParent PARENT_DIRECTORY) 56 | if(hasParent) 57 | set(ANIRA_SHARED_LIBS_WIN ${ANIRA_SHARED_LIBS_WIN} PARENT_SCOPE) 58 | endif() 59 | -------------------------------------------------------------------------------- /docs/sphinx/about.rst: -------------------------------------------------------------------------------- 1 | About 2 | ===== 3 | 4 | **anira** is a C++ library designed to streamline the development of real-time audio applications that integrate neural network inference. It provides a high-performance, real-time safe execution environment for neural networks, ensuring deterministic runtimes that meet the demands of professional audio processing. 5 | 6 | Neural network inference in anira is powered by industry-standard engines, including LibTorch, ONNXRuntime, and TensorFlow Lite. The library offers a unified interface to these engines through the :cpp:class:`anira::InferenceHandler` class, delegating inference execution to a static thread pool. This architecture maintains real-time safety by executing inference outside the audio thread, ensuring applications remain responsive and deterministic. Additionally, anira leverages multiple CPU cores for efficient parallel inference. 7 | 8 | Anira is optimized to minimize latency and supports predefined tensor shapes for neural networks. A key feature is the intelligent adaptation of host audio buffers to neural network input and output tensors, with automatic calculation of the minimum required latency based on the :cpp:struct:`anira::InferenceConfig` struct. If neural network processing exceeds the latency threshold, anira compensates for missing frames to maintain smooth and synchronized audio processing. 9 | 10 | Model inputs and outputs can be preprocessed and postprocessed using built-in functionality. For custom data handling, developers can extend the :cpp:class:`anira::PrePostProcessor` class to implement specialized logic. This flexibility ensures neural network models receive properly formatted data and that results are correctly integrated into the audio processing pipeline. 11 | 12 | The library supports a wide range of neural network architectures, including both stateful and stateless models, with single or multiple input and output tensors. Tensors are categorized as streamable (for time-varying data like audio signals) or non-streamable (for static parameters requiring asynchronous updates). Since version 2.0, anira supports input and output tensors with varying sizes and sampling rates, enabling more complex processing scenarios and greater architectural flexibility. 13 | 14 | Anira also features built-in benchmarking tools, allowing developers to evaluate neural network performance within the same environment as their audio applications. This is essential for optimizing applications to meet real-time processing requirements. 15 | 16 | While anira is primarily focused on audio processing, it is also suitable for other real-time applications such as robotics and computer vision, where both streamable and non-streamable data processing are required. Its design principles and real-time safety features make it a versatile tool for developers across various domains. -------------------------------------------------------------------------------- /cmake/package.cmake: -------------------------------------------------------------------------------- 1 | # to package: 2 | # go into the build dir 3 | # call "cpack -G DEB" 4 | # sometimes it needs to be called twice... 5 | # after installing with "apt install ./libanira*.deb" update the ld cache with "ldconfig" 6 | 7 | set(CPACK_THREADS 10) 8 | 9 | set(CPACK_PACKAGE_NAME "lib${PROJECT_NAME}") 10 | set(CPACK_DEBIAN_PACKAGE_NAME ${CPACK_PACKAGE_NAME}) 11 | set(CPACK_PACKAGE_VENDOR "anira-project") 12 | set(CPACK_VERBATIM_VARIABLES YES) 13 | 14 | set(CPACK_PACKAGE_INSTALL_DIRECTORY ${CPACK_PACKAGE_NAME}) 15 | 16 | #TODO maybe change this to outside of buildtree? 17 | set(CPACK_OUTPUT_FILE_PREFIX "${CMAKE_BINARY_DIR}/packages") 18 | 19 | set(CPACK_PACKAGING_INSTALL_PREFIX "/usr/local") 20 | 21 | set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR}) 22 | set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR}) 23 | set(CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH}) 24 | 25 | set(CPACK_PACKAGE_CONTACT "fares.schulz@tu-berlin.de") 26 | set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Fares Schulz <${CPACK_PACKAGE_CONTACT}>") 27 | 28 | set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_SOURCE_DIR}/LICENSE") 29 | set(CPACK_RESOURCE_FILE_README "${CMAKE_SOURCE_DIR}/README.md") 30 | 31 | #TODO add all actual deps 32 | #TODO add changelog 33 | #TODO add copyright file 34 | 35 | set(CPACK_DEBIAN_FILE_NAME DEB-DEFAULT) 36 | 37 | # each group (or component, if not in group) is built as a seperate package 38 | set(CPACK_COMPONENTS_GROUPING ONE_PER_GROUP) 39 | set(CPACK_DEB_COMPONENT_INSTALL YES) 40 | 41 | # setup components 42 | cpack_add_component(runtime REQUIRED) 43 | cpack_add_component(dev DEPENDS runtime) 44 | 45 | # group all dependency components 46 | cpack_add_component(deps-backends GROUP deps) 47 | cpack_add_component(Devel GROUP deps) 48 | cpack_add_component(Unspecified GROUP deps) 49 | 50 | if (ANIRA_WITH_BENCHMARK) 51 | cpack_add_component(gtest GROUP deps) 52 | cpack_add_component(gmock GROUP deps) 53 | endif() 54 | 55 | # remove -runtime suffix of runtime package, add major version number instead 56 | set(CPACK_DEBIAN_RUNTIME_PACKAGE_NAME ${CPACK_PACKAGE_NAME}${PROJECT_VERSION_MAJOR}) 57 | 58 | # automatically generete dependencies between components 59 | set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON) 60 | 61 | set(CPACK_DEBIAN_RUNTIME_PACKAGE_SECTION libs) 62 | set(CPACK_DEBIAN_DEV_PACKAGE_SECTION libdevel) 63 | 64 | # extremely slow, and doesn't work for the deps package 65 | set(CPACK_DEBIAN_PACKAGE_GENERATE_SHLIBS ON) 66 | 67 | # fix unstripped-binary-or-object error (probably to remove unwanted debug symbols) 68 | set(CPACK_STRIP_FILES YES) 69 | 70 | # set package descriptions, cmake variables 71 | set(CPACK_DEBIAN_RUNTIME_DESCRIPTION "library for real-time inference of neural networks") 72 | set(CPACK_DEBIAN_DEV_DESCRIPTION "header files for libanira${PROJECT_VERSION_MAJOR}") 73 | set(CPACK_DEBIAN_DEPS_DESCRIPTION "misc deps for libanira${PROJECT_VERSION_MAJOR}") 74 | 75 | include(CPack) -------------------------------------------------------------------------------- /test/WavReader.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | // Adapted from https://stackoverflow.com/a/75704890 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | using namespace std; 10 | 11 | struct RIFFHeader{ 12 | char chunk_id[4]; 13 | uint32_t chunk_size; 14 | char format[4]; 15 | }; 16 | 17 | struct ChunkInfo{ 18 | char chunk_id[4]; 19 | uint32_t chunk_size; 20 | }; 21 | 22 | struct FmtChunk{ 23 | uint16_t audio_format; 24 | uint16_t num_channels; 25 | uint32_t sample_rate; 26 | uint32_t byte_rate; 27 | uint16_t block_align; 28 | uint16_t bits_per_sample; 29 | uint16_t extra_params_size; 30 | char* extra_params; 31 | FmtChunk(): extra_params{nullptr} {} 32 | FmtChunk(uint32_t chunk_size): extra_params{new char[chunk_size - 18]} {} 33 | ~FmtChunk(){delete[] extra_params;} 34 | }; 35 | 36 | 37 | struct DataChunk 38 | // We assume 16-bit monochannel samples 39 | { 40 | float* data; 41 | int num_samples; 42 | DataChunk(int s): num_samples{s}, data{new float[s]} {} 43 | ~DataChunk(){delete[] data;} 44 | }; 45 | 46 | inline int read_wav(string path, std::vector& data){ 47 | constexpr char riff_id[4] = {'R','I','F','F'}; 48 | constexpr char format[4] = {'W','A','V','E'}; 49 | constexpr char fmt_id[4] = {'f','m','t',' '}; 50 | constexpr char data_id[4] = {'d','a','t','a'}; 51 | 52 | ifstream ifs{path, ios_base::binary}; 53 | if (!ifs){ 54 | cerr << "Cannot open file " << path << endl; 55 | return -1; 56 | } 57 | 58 | // first read RIFF header 59 | RIFFHeader h; 60 | ifs.read((char*)(&h), sizeof(h)); 61 | if (!ifs || memcmp(h.chunk_id, riff_id, 4) || memcmp(h.format, format, 4)){ 62 | cerr << "Bad formatting" << endl; 63 | return -1; 64 | } 65 | 66 | // read chunk infos iteratively 67 | ChunkInfo ch; 68 | bool fmt_read = false; 69 | bool data_read = false; 70 | while(ifs.read((char*)(&ch), sizeof(ch))){ 71 | // if fmt chunk? 72 | if (memcmp(ch.chunk_id, fmt_id, 4) == 0){ 73 | FmtChunk fmt(ch.chunk_size); 74 | ifs.read((char*)(&fmt), ch.chunk_size); 75 | fmt_read = true; 76 | } 77 | // is data chunk? 78 | else if(memcmp(ch.chunk_id, data_id, 4) == 0){ 79 | DataChunk dat_chunk(ch.chunk_size/sizeof(float)); 80 | ifs.read((char*)dat_chunk.data, ch.chunk_size); 81 | // put data in vector 82 | data.assign(dat_chunk.data, dat_chunk.data + dat_chunk.num_samples); 83 | 84 | data_read = true; 85 | } 86 | // otherwise skip the chunk 87 | else{ 88 | ifs.seekg(ch.chunk_size, ios_base::cur); 89 | } 90 | } 91 | if (!data_read || !fmt_read){ 92 | cout << "Problem when reading data" << endl; 93 | return -1; 94 | } 95 | return 0; 96 | } 97 | -------------------------------------------------------------------------------- /.github/actions/build/action.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | description: "Build the project with cmake" 3 | 4 | inputs: 5 | BUILD_TYPE: 6 | required: true 7 | description: "The build type" 8 | CMAKE_BUILD_PARALLEL_LEVEL: 9 | required: true 10 | description: "The number of parallel builds" 11 | CMAKE_BUILD_ARGS: 12 | required: true 13 | description: "The cmake build arguments" 14 | TARGETS: 15 | required: false 16 | description: "The targets to build" 17 | 18 | runs: 19 | using: "composite" 20 | steps: 21 | # We cache the build to speed up the build process 22 | - name: cache the build 23 | uses: mozilla-actions/sccache-action@v0.0.8 24 | 25 | - name: print cmake build args 26 | shell: bash 27 | run: | 28 | echo "BUILD_TYPE=${{ inputs.BUILD_TYPE }}" 29 | echo "CMAKE_BUILD_PARALLEL_LEVEL=${{ inputs.CMAKE_BUILD_PARALLEL_LEVEL }}" 30 | echo "CMAKE_BUILD_ARGS=${{ inputs.CMAKE_BUILD_ARGS }}" 31 | 32 | # With DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" we can build universal binaries for apple computers, but this is not supported by the inference engines 33 | - name: cmake configure 34 | shell: bash 35 | run: | 36 | if [[ "${{ matrix.name }}" == Linux-x86_64* ]]; then 37 | cmake -B build -G Ninja -DCMAKE_BUILD_TYPE=${{ inputs.BUILD_TYPE }} -DCMAKE_C_COMPILER_LAUNCHER=sccache -DCMAKE_CXX_COMPILER_LAUNCHER=sccache ${{ inputs.CMAKE_BUILD_ARGS }} 38 | elif [[ "${{ matrix.name }}" == macOS-x86_64* ]]; then 39 | cmake -B build -G Ninja -DCMAKE_BUILD_TYPE=${{ inputs.BUILD_TYPE }} -DCMAKE_C_COMPILER_LAUNCHER=sccache -DCMAKE_CXX_COMPILER_LAUNCHER=sccache -DCMAKE_OSX_ARCHITECTURES=x86_64 ${{ inputs.CMAKE_BUILD_ARGS }} 40 | elif [[ "${{ matrix.name }}" == macOS-arm64* ]]; then 41 | cmake -B build -G Ninja -DCMAKE_BUILD_TYPE=${{ inputs.BUILD_TYPE }} -DCMAKE_C_COMPILER_LAUNCHER=sccache -DCMAKE_CXX_COMPILER_LAUNCHER=sccache -DCMAKE_OSX_ARCHITECTURES=arm64 ${{ inputs.CMAKE_BUILD_ARGS }} 42 | elif [[ "${{ matrix.name }}" == Windows-x86_64* ]]; then 43 | cmake -B build -DCMAKE_BUILD_TYPE=${{ inputs.BUILD_TYPE }} -DCMAKE_C_COMPILER_LAUNCHER=sccache -DCMAKE_CXX_COMPILER_LAUNCHER=sccache ${{ inputs.CMAKE_BUILD_ARGS }} 44 | else 45 | cmake -B build -G Ninja -DCMAKE_BUILD_TYPE=${{ inputs.BUILD_TYPE }} -DCMAKE_C_COMPILER_LAUNCHER=sccache -DCMAKE_CXX_COMPILER_LAUNCHER=sccache ${{ inputs.CMAKE_BUILD_ARGS }} 46 | fi; 47 | 48 | - name: build targets 49 | shell: bash 50 | run: | 51 | TARGETS=${{ inputs.TARGETS }} 52 | if [ -n "${TARGETS}" ]; then 53 | for target in ${TARGETS[@]}; do 54 | echo "Building target: $target" 55 | cmake --build build --config ${{ inputs.BUILD_TYPE }} --parallel ${{ inputs.CMAKE_BUILD_PARALLEL_LEVEL }} --target $target 56 | done 57 | else 58 | echo "Building all targets" 59 | cmake --build build --config ${{ inputs.BUILD_TYPE }} --parallel ${{ inputs.CMAKE_BUILD_PARALLEL_LEVEL }} 60 | fi 61 | 62 | -------------------------------------------------------------------------------- /examples/juce-audio-plugin/PluginParameters.h: -------------------------------------------------------------------------------- 1 | #ifndef NN_INFERENCE_TEMPLATE_PLUGINPARAMETERS_H 2 | #define NN_INFERENCE_TEMPLATE_PLUGINPARAMETERS_H 3 | 4 | #include "JuceHeader.h" 5 | 6 | class PluginParameters { 7 | public: 8 | inline static const juce::ParameterID 9 | #if MODEL_TO_USE == 4 || MODEL_TO_USE == 5 10 | GAIN_ID = {"param_gain", 1}, 11 | #endif 12 | #if MODEL_TO_USE == 7 13 | LATENT_0_ID = {"param_latent_0", 1}, 14 | LATENT_1_ID = {"param_latent_1", 1}, 15 | LATENT_2_ID = {"param_latent_2", 1}, 16 | LATENT_3_ID = {"param_latent_3", 1}, 17 | #endif 18 | BACKEND_TYPE_ID = {"param_backend_type", 1}, 19 | DRY_WET_ID = {"param_mix", 1} 20 | ; 21 | 22 | 23 | inline static const juce::String 24 | #if MODEL_TO_USE == 4 || MODEL_TO_USE == 5 25 | GAIN_NAME = "Gain", 26 | #endif 27 | #if MODEL_TO_USE == 7 28 | LATENT_0_NAME = "Latent 0", 29 | LATENT_1_NAME = "Latent 1", 30 | LATENT_2_NAME = "Latent 2", 31 | LATENT_3_NAME = "Latent 3", 32 | #endif 33 | BACKEND_TYPE_NAME = "Backend Type", 34 | DRY_WET_NAME = "Dry/Wet" 35 | ; 36 | 37 | #if MODEL_TO_USE == 6 || MODEL_TO_USE == 7 || MODEL_TO_USE == 8 38 | inline static juce::StringArray backendTypes {"LIBTORCH", "BYPASS"}; 39 | inline static juce::String defaultBackend = "LIBTORCH"; 40 | #else 41 | inline static juce::StringArray backendTypes {"TFLITE", "LIBTORCH", "ONNX", "BYPASS"}; 42 | inline static juce::String defaultBackend = "BYPASS"; 43 | #endif 44 | 45 | static juce::StringArray getPluginParameterList(); 46 | static juce::AudioProcessorValueTreeState::ParameterLayout createParameterLayout(); 47 | 48 | private: 49 | inline static juce::StringArray parameterList; 50 | 51 | #if MODEL_TO_USE == 4 || MODEL_TO_USE == 5 52 | inline static juce::NormalisableRange gainRange {0.0f, 3.981072f, 0.00001f, 0.25f}; 53 | #endif 54 | #if MODEL_TO_USE == 7 55 | inline static juce::NormalisableRange latentRange {-1.0f, 1.0f, 0.00001f}; 56 | #endif 57 | inline static juce::NormalisableRange dryWetRange {0.0f, 1.0f, 0.00001f}; 58 | 59 | #if MODEL_TO_USE == 7 60 | inline static juce::AudioParameterFloatAttributes latentAttributes = juce::AudioParameterFloatAttributes() 61 | .withStringFromValueFunction([](float x, auto) { 62 | return juce::String(x, 2); 63 | }) 64 | .withLabel("Offset"); 65 | #endif 66 | 67 | inline static juce::AudioParameterFloatAttributes percentage_attributes = juce::AudioParameterFloatAttributes() 68 | .withStringFromValueFunction ([] (float x, auto) { 69 | return juce::String(x*100.f, 0) + " %"; 70 | }); 71 | 72 | inline static juce::AudioParameterFloatAttributes db_attributes = juce::AudioParameterFloatAttributes() 73 | .withStringFromValueFunction ([] (float x, auto) { 74 | auto db = juce::Decibels::gainToDecibels(x); 75 | return juce::String(db, 1) + " dB"; 76 | }); 77 | }; 78 | 79 | #endif //NN_INFERENCE_TEMPLATE_PLUGINPARAMETERS_H 80 | -------------------------------------------------------------------------------- /docs/sphinx/conf.py.in: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | 9 | project = 'Anira' 10 | version = '@PROJECT_VERSION@' 11 | copyright = '2025, Valentin Ackva and Fares Schulz' 12 | author = 'Valentin Ackva and Fares Schulz' 13 | 14 | # -- General configuration --------------------------------------------------- 15 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 16 | 17 | extensions = [ 18 | 'breathe', 19 | 'sphinx.ext.autodoc', 20 | 'sphinx.ext.intersphinx', 21 | 'sphinx.ext.viewcode', 22 | 'sphinx.ext.napoleon', 23 | 'sphinx.ext.graphviz', 24 | 'myst_parser' 25 | ] 26 | 27 | # Breathe configuration 28 | breathe_projects = {"Anira": "@CMAKE_CURRENT_BINARY_DIR@/doxygen/xml"} 29 | breathe_default_project = "Anira" 30 | breathe_default_members = ('members', 'undoc-members') 31 | 32 | # Configure Breathe to handle macros better 33 | breathe_domain_by_extension = {"h": "cpp", "hpp": "cpp"} 34 | breathe_implementation_filename_extensions = ['.c', '.cc', '.cpp'] 35 | 36 | templates_path = ['@SPHINX_SOURCE_DIR@/_templates'] 37 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'venv'] 38 | 39 | # -- Options for HTML output ------------------------------------------------- 40 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 41 | 42 | html_theme = "shibuya" 43 | 44 | # Add custom CSS for better syntax highlighting 45 | html_static_path = ['@SPHINX_SOURCE_DIR@/_static'] 46 | html_css_files = [ 47 | 'custom.css', 48 | ] 49 | 50 | html_theme_options = { 51 | "github_url": "https://github.com/anira-project/anira", 52 | "nav_links": [ 53 | { 54 | "title": "About", 55 | "url": "about", 56 | "summary": "About the Anira library" 57 | }, 58 | { 59 | "title": "Getting Started", 60 | "url": "getting_started", 61 | "summary": "How to get started with Anira" 62 | }, 63 | { 64 | "title": "Usage Guide", 65 | "url": "usage", 66 | "summary": "Detailed usage instructions for Anira" 67 | }, 68 | { 69 | "title": "API Documentation", 70 | "url": "api/index", 71 | "children": [ 72 | { 73 | "title": "Class List", 74 | "url": "api/classlist", 75 | "summary": "List of all classes in the API", 76 | }, 77 | { 78 | "title": "Struct List", 79 | "url": "api/structlist", 80 | "summary": "List of all structs in the API", 81 | }, 82 | ] 83 | }, 84 | ] 85 | } -------------------------------------------------------------------------------- /include/anira/utils/InferenceBackend.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_INFERENCEBACKEND_H 2 | #define ANIRA_INFERENCEBACKEND_H 3 | 4 | namespace anira { 5 | 6 | /** 7 | * @brief Enumeration of supported neural network inference backends 8 | * 9 | * The InferenceBackend enum defines the available neural network inference engines 10 | * that can be used for real-time audio processing. Each backend provides different 11 | * performance characteristics, model format support, and platform compatibility. 12 | * 13 | * Backend availability is determined at compile time through preprocessor macros, 14 | * allowing for selective inclusion based on project requirements and dependencies. 15 | * The CUSTOM backend is always available for user-defined inference implementations. 16 | * 17 | * Performance and compatibility considerations: 18 | * - LIBTORCH: PyTorch models, larger memory footprint 19 | * - ONNX: Cross-platform ONNX models, optimized for CPU inference 20 | * - TFLITE: TensorFlow Lite models, optimized for mobile and embedded devices 21 | * - CUSTOM: User-defined backends for specialized inference implementations 22 | * 23 | * @note Backend availability depends on compile-time flags (USE_LIBTORCH, USE_ONNXRUNTIME, USE_TFLITE) 24 | * and the presence of corresponding dependencies in the build system. 25 | */ 26 | enum InferenceBackend { 27 | #ifdef USE_LIBTORCH 28 | /** 29 | * @brief LibTorch (PyTorch C++) inference backend 30 | * 31 | * Uses the LibTorch library for running PyTorch models in C++. This backend is ideal for models trained 32 | * with PyTorch. Requires the LibTorch library to be linked at build time. 33 | * 34 | * Model format: .pt, .pth (PyTorch TorchScript) 35 | * Platform support: Windows, Linux, macOS 36 | */ 37 | LIBTORCH, 38 | #endif 39 | #ifdef USE_ONNXRUNTIME 40 | /** 41 | * @brief ONNX Runtime inference backend 42 | * 43 | * Uses Microsoft's ONNX Runtime for running ONNX (Open Neural Network Exchange) 44 | * format models. This backend is highly optimized for CPU inference and provides 45 | * good cross-platform compatibility. Requires ONNX Runtime to be linked at build time. 46 | * 47 | * Model format: .onnx 48 | * Platform support: Windows, Linux, macOS, mobile platforms 49 | */ 50 | ONNX, 51 | #endif 52 | #ifdef USE_TFLITE 53 | /** 54 | * @brief TensorFlow Lite inference backend 55 | * 56 | * Uses Google's TensorFlow Lite for running quantized and optimized TensorFlow 57 | * models. This backend is designed for mobile and embedded devices with limited 58 | * computational resources. Requires TensorFlow Lite to be linked at build time. 59 | * 60 | * Model format: .tflite 61 | * Platform support: Windows, Linux, macOS, Android, iOS, embedded systems 62 | */ 63 | TFLITE, 64 | #endif 65 | /** 66 | * @brief Custom user-defined inference backend 67 | * 68 | * Placeholder for custom inference implementations. This backend type allows 69 | * users to implement their own inference engines by extending the BackendBase 70 | * class. Always available regardless of compile-time flags. 71 | * 72 | * Model format: User-defined 73 | * Platform support: Depends on user implementation 74 | */ 75 | CUSTOM 76 | }; 77 | 78 | } // namespace anira 79 | 80 | #endif //ANIRA_INFERENCEBACKEND_H -------------------------------------------------------------------------------- /examples/clap-audio-plugin/anira-clap-demo.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_CLAP_PLUGIN_EXAMPLE_H 2 | #define ANIRA_CLAP_PLUGIN_EXAMPLE_H 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | 16 | #include "../../extras/models/hybrid-nn/HybridNNConfig.h" 17 | #include "../../extras/models/hybrid-nn/HybridNNPrePostProcessor.h" 18 | #include "../../extras/models/hybrid-nn/HybridNNBypassProcessor.h" 19 | 20 | #include "utils/DryWetMixer.h" 21 | 22 | namespace clap_plugin_example 23 | { 24 | 25 | struct AniraClapPluginExample : public clap::helpers::Plugin 27 | { 28 | AniraClapPluginExample(const clap_host *host); 29 | ~AniraClapPluginExample(); 30 | 31 | static clap_plugin_descriptor m_desc; 32 | 33 | bool activate(double sampleRate, uint32_t minFrameCount, 34 | uint32_t maxFrameCount) noexcept override; 35 | 36 | enum ParamIds : uint32_t 37 | { 38 | pmDryWet = 14256, 39 | pmBackend = 14257 40 | }; 41 | static constexpr int m_number_params = 2; 42 | 43 | bool implementsParams() const noexcept override { return true; } 44 | bool isValidParamId(clap_id paramId) const noexcept override; 45 | uint32_t paramsCount() const noexcept override; 46 | bool paramsInfo(uint32_t paramIndex, clap_param_info *info) const noexcept override; 47 | bool paramsValue(clap_id paramId, double *value) noexcept override; 48 | 49 | bool paramsValueToText(clap_id paramId, double value, char *display, 50 | uint32_t size) noexcept override; 51 | 52 | protected: 53 | bool paramsTextToValue(clap_id paramId, const char *display, double *value) noexcept override; 54 | 55 | public: 56 | bool implementsAudioPorts() const noexcept override { return true; } 57 | uint32_t audioPortsCount(bool isInput) const noexcept override; 58 | bool audioPortsInfo(uint32_t index, bool isInput, 59 | clap_audio_port_info *info) const noexcept override; 60 | 61 | clap_process_status process(const clap_process *process) noexcept override; 62 | void checkForEvents(const clap_process *process); 63 | void handleInboundEvent(const clap_event_header_t *evt); 64 | 65 | void paramsFlush(const clap_input_events *in, const clap_output_events *out) noexcept override; 66 | 67 | bool implementsLatency() const noexcept override; 68 | uint32_t latencyGet() const noexcept override; 69 | 70 | private: 71 | double m_param_dry_wet{100.0}, m_param_backend{3}; 72 | std::unordered_map m_param_to_value; 73 | uint32_t m_plugin_latency; 74 | 75 | anira::ContextConfig m_anira_context; 76 | 77 | anira::InferenceConfig m_inference_config = hybridnn_config; 78 | HybridNNPrePostProcessor m_pp_processor; 79 | HybridNNBypassProcessor m_bypass_processor; 80 | 81 | anira::InferenceHandler m_inference_handler; 82 | 83 | utils::DryWetMixer m_dry_wet_mixer; 84 | 85 | enum Backend { 86 | OnnxRuntime, 87 | LibTorch, 88 | TensorFlowLite, 89 | Bypassed 90 | }; 91 | }; 92 | 93 | } // namespace clap_plugin_example 94 | 95 | #endif //ANIRA_CLAP_PLUGIN_EXAMPLE_H -------------------------------------------------------------------------------- /examples/juce-audio-plugin/PluginParameters.cpp: -------------------------------------------------------------------------------- 1 | #include "PluginParameters.h" 2 | 3 | juce::AudioProcessorValueTreeState::ParameterLayout PluginParameters::createParameterLayout() { 4 | std::vector> params; 5 | 6 | #if MODEL_TO_USE == 4 || MODEL_TO_USE == 5 7 | params.push_back (std::make_unique (GAIN_ID, 8 | GAIN_NAME, 9 | gainRange, 10 | 1.0f, 11 | db_attributes)); 12 | #endif 13 | #if MODEL_TO_USE == 7 14 | params.push_back (std::make_unique (LATENT_0_ID, 15 | LATENT_0_NAME, 16 | latentRange, 17 | 0.0f, 18 | latentAttributes)); 19 | params.push_back (std::make_unique (LATENT_1_ID, 20 | LATENT_1_NAME, 21 | latentRange, 22 | 0.0f, 23 | latentAttributes)); 24 | params.push_back (std::make_unique (LATENT_2_ID, 25 | LATENT_2_NAME, 26 | latentRange, 27 | 0.0f, 28 | latentAttributes)); 29 | params.push_back (std::make_unique (LATENT_3_ID, 30 | LATENT_3_NAME, 31 | latentRange, 32 | 0.0f, 33 | latentAttributes)); 34 | #endif 35 | 36 | params.push_back (std::make_unique (BACKEND_TYPE_ID, 37 | BACKEND_TYPE_NAME, 38 | backendTypes, 39 | backendTypes.indexOf(defaultBackend))); 40 | 41 | params.push_back( std::make_unique (DRY_WET_ID, 42 | DRY_WET_NAME, 43 | dryWetRange, 44 | 1.0f, 45 | percentage_attributes)); 46 | 47 | if (parameterList.isEmpty()) { 48 | for (const auto & param : params) { 49 | parameterList.add(param->getParameterID()); 50 | } 51 | } 52 | 53 | return { params.begin(), params.end() }; 54 | } 55 | 56 | juce::StringArray PluginParameters::getPluginParameterList() { 57 | return parameterList; 58 | } 59 | -------------------------------------------------------------------------------- /docs/sphinx/latency.rst: -------------------------------------------------------------------------------- 1 | Latency 2 | ======= 3 | 4 | Overview 5 | -------- 6 | 7 | Latency is a critical factor in real-time audio processing applications. The anira framework implements a sophisticated latency calculation system to ensure proper synchronization between audio processing and neural network inference. 8 | 9 | How Latency is Calculated 10 | ------------------------- 11 | 12 | The latency calculation in anira is performed by the :cpp:class:`anira::SessionElement` class. The total system latency consists of several components: 13 | 14 | Buffer Adaptation Latency 15 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 16 | 17 | Accounts for mismatches between the host buffer size and the model's expected input/output sizes. When the host provides audio in different chunk sizes than what the model expects, additional buffering is required to accumulate or split the data appropriately. 18 | 19 | .. note:: 20 | When the host buffer size is a fractional (floating-point) value, this indicates that the host and model process buffers at non-integer ratios. The latency calculation in anira accounts for this by assuming the worst-case scenario: a sample is pushed to the :cpp:class:`anira::InferenceHandler` only when the host buffer accumulates a full sample. For example, if the host buffer size is 0.25f, the :cpp:class:`anira::InferenceHandler` receives one sample every four host buffer cycles, and latency is calculated as if the sample is delivered during the fourth cycle. If your system always sends the sample at the first host buffer cycle, a lower latency is possible—in such cases, consider configuring :cpp:class:`anira::InferenceHandler` with a custom latency value. 21 | 22 | 23 | Inference-Caused Latency 24 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 25 | 26 | Represents the delay introduced by the neural network inference process itself. This includes: 27 | 28 | * Maximum inference time of the model 29 | * Number of parallel inferences that can be processed 30 | * Time spent waiting for inference completion 31 | * Relationship between host buffer timing and inference completion 32 | 33 | Wait Time Calculation 34 | ~~~~~~~~~~~~~~~~~~~~~ 35 | 36 | When using controlled blocking (``blocking_ratio > 0``), the system may wait for inference to complete before continuing. This wait time is calculated based on the host buffer duration and the configured blocking ratio. 37 | 38 | Internal Model Latency 39 | ~~~~~~~~~~~~~~~~~~~~~~~ 40 | 41 | Additional latency that may be inherent to the model itself, such as look-ahead requirements or internal buffering. 42 | 43 | Latency Synchronization 44 | ~~~~~~~~~~~~~~~~~~~~~~~ 45 | 46 | When multiple outputs are present, the system synchronizes latencies across all outputs to ensure coherent processing. This is achieved by calculating a latency ratio and applying it uniformly across all output channels. 47 | 48 | Adaptive Buffer Handling 49 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 50 | 51 | For hosts that support variable buffer sizes (``allow_smaller_buffers``), the system performs additional calculations to handle worst-case scenarios across different buffer sizes, ensuring stable latency regardless of the actual buffer size used. 52 | 53 | Output Behavior 54 | --------------- 55 | 56 | The final latency value represents the total delay (in samples) between when input data enters the system and when the processed output data becomes available. 57 | 58 | .. important:: 59 | Before the first valid output is produced, the :cpp:func:`anira::InferenceHandler::process` and :cpp:func:`anira::InferenceHandler::pop_data` methods will return zeroed data. This ensures real-time audio processing without introducing unexpected delays or artifacts in the output signal. -------------------------------------------------------------------------------- /include/anira/backends/BackendBase.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_BACKENDBASE_H 2 | #define ANIRA_BACKENDBASE_H 3 | 4 | #include "../InferenceConfig.h" 5 | #include "../utils/Buffer.h" 6 | #include "../system/AniraWinExports.h" 7 | #include 8 | 9 | namespace anira { 10 | 11 | class SessionElement; // Forward declaration as we have a circular dependency 12 | 13 | /** 14 | * @brief Abstract base class for all neural network inference backends 15 | * 16 | * The BackendBase class defines the common interface and provides basic functionality 17 | * for all inference backend implementations. It serves as the foundation for specific 18 | * backend implementations such as LibTorch, ONNX Runtime, and TensorFlow Lite processors. 19 | * 20 | * @see LibtorchProcessor, OnnxRuntimeProcessor, TFLiteProcessor, InferenceConfig 21 | */ 22 | class ANIRA_API BackendBase { 23 | public: 24 | /** 25 | * @brief Constructs a BackendBase with the given inference configuration 26 | * 27 | * Initializes the backend processor with a reference to the inference configuration 28 | * that contains all necessary parameters for model loading and processing. 29 | * 30 | * @param inference_config Reference to the inference configuration containing 31 | * model data, tensor shapes, and processing specifications 32 | */ 33 | BackendBase(InferenceConfig& inference_config); 34 | 35 | /** 36 | * @brief Virtual destructor for proper cleanup of derived classes 37 | */ 38 | virtual ~BackendBase() = default; 39 | 40 | /** 41 | * @brief Prepares the backend for inference operations 42 | * 43 | * This method is called during initialization to set up the inference backend. 44 | * The base implementation is empty, but derived classes should override this 45 | * to perform backend-specific initialization such as: 46 | * - Loading neural network models 47 | * - Allocating memory for tensors 48 | * - Configuring inference sessions 49 | * - Performing warm-up inferences 50 | * 51 | * @note This method should be called before any process() calls 52 | * @note Thread-safe: This method should only be called during initialization 53 | */ 54 | virtual void prepare(); 55 | 56 | /** 57 | * @brief Processes input buffers through the neural network model 58 | * 59 | * Performs inference on the provided input buffers and writes results to output buffers. 60 | * The base implementation provides a simple pass-through that copies input to output 61 | * when buffer dimensions match, otherwise clears the output. 62 | * 63 | * @param input Vector of input buffers containing audio or other data to process 64 | * @param output Vector of output buffers to write the processed results 65 | * @param session Shared pointer to session element for thread-safe processing context 66 | * 67 | * @par Thread Safety: 68 | * This method is designed to be called from real-time audio threads and should 69 | * be lock-free and deterministic in execution time. 70 | * 71 | * @warning The session parameter must be valid when using multi-threaded processing 72 | * @note Derived classes should override this method to implement actual inference 73 | */ 74 | virtual void process(std::vector& input, std::vector& output, [[maybe_unused]] std::shared_ptr session); 75 | 76 | InferenceConfig& m_inference_config; ///< Reference to inference configuration containing model and processing parameters 77 | }; 78 | 79 | } // namespace anira 80 | 81 | #endif //ANIRA_BACKENDBASE_H 82 | -------------------------------------------------------------------------------- /cmake/SetupTensorflowLite.cmake: -------------------------------------------------------------------------------- 1 | if (APPLE AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR WIN32) 2 | set(LIBTENSORFLOWLITE_VERSION "2.16.1") 3 | else() 4 | set(LIBTENSORFLOWLITE_VERSION "2.17.0") 5 | endif() 6 | 7 | option(TENSORFLOWLITE_ROOTDIR "tensorflowlite root dir") 8 | set(TENSORFLOWLITE_DIR_NAME "tensorflowlite-${LIBTENSORFLOWLITE_VERSION}-${ANIRA_OPERATING_SYSTEM}-${CMAKE_SYSTEM_PROCESSOR}") 9 | set(TENSORFLOWLITE_ROOTDIR ${CMAKE_CURRENT_SOURCE_DIR}/modules/${TENSORFLOWLITE_DIR_NAME}) 10 | 11 | if(EXISTS ${TENSORFLOWLITE_ROOTDIR}/) 12 | message(STATUS "Tensorflow Lite library found at ${TENSORFLOWLITE_ROOTDIR}") 13 | else() 14 | file(MAKE_DIRECTORY ${TENSORFLOWLITE_ROOTDIR}/) 15 | message(STATUS "Tensorflow Lite library not found - downloading pre-built library.") 16 | 17 | if(WIN32) 18 | set(LIB_TENSORFLOWLITE_PRE_BUILD_LIB_NAME "tensorflowlite_c-${LIBTENSORFLOWLITE_VERSION}-Windows") 19 | endif() 20 | 21 | if(UNIX AND NOT APPLE) 22 | if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64") 23 | set(LIB_TENSORFLOWLITE_PRE_BUILD_LIB_NAME "tensorflowlite_c-${LIBTENSORFLOWLITE_VERSION}-Linux-aarch64") 24 | elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") 25 | set(LIB_TENSORFLOWLITE_PRE_BUILD_LIB_NAME "tensorflowlite_c-${LIBTENSORFLOWLITE_VERSION}-Linux-x86_64") 26 | elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7l") 27 | set(LIB_TENSORFLOWLITE_PRE_BUILD_LIB_NAME "tensorflowlite_c-${LIBTENSORFLOWLITE_VERSION}-Linux-armv7l") 28 | endif() 29 | endif() 30 | 31 | if(UNIX AND APPLE) 32 | if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") 33 | set(LIB_TENSORFLOWLITE_PRE_BUILD_LIB_NAME "tensorflowlite_c-${LIBTENSORFLOWLITE_VERSION}-macOS-x86_64") 34 | elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64") 35 | set(LIB_TENSORFLOWLITE_PRE_BUILD_LIB_NAME "tensorflowlite_c-${LIBTENSORFLOWLITE_VERSION}-macOS-arm64") 36 | endif() 37 | endif() 38 | 39 | set(LIBTENSORFLOWLITE_URL https://github.com/faressc/tflite-c-lib/releases/download/v${LIBTENSORFLOWLITE_VERSION}/${LIB_TENSORFLOWLITE_PRE_BUILD_LIB_NAME}.zip) 40 | 41 | message(STATUS "Downloading ${LIBTENSORFLOWLITE_URL}") 42 | 43 | set(LIBTENSORFLOWLITE_PATH ${CMAKE_BINARY_DIR}/import/${LIB_TENSORFLOWLITE_PRE_BUILD_LIB_NAME}.zip) 44 | 45 | file(DOWNLOAD ${LIBTENSORFLOWLITE_URL} ${LIBTENSORFLOWLITE_PATH} STATUS LIBTENSORFLOWLITE_DOWNLOAD_STATUS SHOW_PROGRESS) 46 | list(GET LIBTENSORFLOWLITE_DOWNLOAD_STATUS 0 LIBTENSORFLOWLITE_DOWNLOAD_STATUS_NO) 47 | 48 | file(ARCHIVE_EXTRACT 49 | INPUT ${LIBTENSORFLOWLITE_PATH} 50 | DESTINATION ${TENSORFLOWLITE_ROOTDIR}/) 51 | 52 | if(EXISTS ${TENSORFLOWLITE_ROOTDIR}/${LIB_TENSORFLOWLITE_PRE_BUILD_LIB_NAME}/) 53 | file(COPY ${TENSORFLOWLITE_ROOTDIR}/${LIB_TENSORFLOWLITE_PRE_BUILD_LIB_NAME}/ DESTINATION ${TENSORFLOWLITE_ROOTDIR}/) 54 | file(REMOVE_RECURSE ${TENSORFLOWLITE_ROOTDIR}/${LIB_TENSORFLOWLITE_PRE_BUILD_LIB_NAME}) 55 | endif() 56 | 57 | if(LIBTENSORFLOWLITE_DOWNLOAD_STATUS_NO) 58 | message(STATUS "Pre-built library not downloaded. Error occurred, try again and check cmake/SetupTensorflowLite.cmake") 59 | file(REMOVE_RECURSE ${TENSORFLOWLITE_ROOTDIR}) 60 | file(REMOVE ${LIBTENSORFLOWLITE_PATH}) 61 | else() 62 | message(STATUS "Linking downloaded TensorflowLite pre-built library.") 63 | endif() 64 | endif() 65 | 66 | set(ANIRA_TENSORFLOWLITE_SHARED_LIB_PATH "${TENSORFLOWLITE_ROOTDIR}") 67 | 68 | get_directory_property(hasParent PARENT_DIRECTORY) 69 | if(hasParent) 70 | set(ANIRA_TENSORFLOWLITE_SHARED_LIB_PATH "${ANIRA_TENSORFLOWLITE_SHARED_LIB_PATH}" PARENT_SCOPE) 71 | endif() 72 | 73 | list(APPEND BACKEND_BUILD_HEADER_DIRS "${TENSORFLOWLITE_ROOTDIR}/include") 74 | list(APPEND BACKEND_BUILD_LIBRARY_DIRS "${TENSORFLOWLITE_ROOTDIR}/lib") -------------------------------------------------------------------------------- /examples/benchmark/simple-benchmark/defineSimpleBenchmark.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "../../../extras/models/cnn/CNNConfig.h" 7 | #include "../../../extras/models/cnn/CNNPrePostProcessor.h" 8 | #include "../../../extras/models/hybrid-nn/HybridNNConfig.h" 9 | #include "../../../extras/models/hybrid-nn/HybridNNPrePostProcessor.h" 10 | #include "../../../extras/models/stateful-rnn/StatefulRNNConfig.h" 11 | #include "../../../extras/models/model-pool/SimpleGainConfig.h" 12 | #include "../../../extras/models/model-pool/SimpleStereoGainConfig.h" 13 | 14 | 15 | /* ============================================================ * 16 | * ========================= Configs ========================== * 17 | * ============================================================ */ 18 | 19 | #define NUM_ITERATIONS 5 20 | #define NUM_REPETITIONS 2 21 | #define BUFFER_SIZE 2048 22 | #define SAMPLE_RATE 44100 23 | 24 | /* ============================================================ * 25 | * ================== BENCHMARK DEFINITIONS =================== * 26 | * ============================================================ */ 27 | 28 | typedef anira::benchmark::ProcessBlockFixture ProcessBlockFixture; 29 | 30 | // anira::InferenceConfig my_inference_config = cnn_config; 31 | // CNNPrePostProcessor my_pp_processor(my_inference_config); 32 | anira::InferenceConfig my_inference_config = hybridnn_config; 33 | HybridNNPrePostProcessor my_pp_processor(my_inference_config); 34 | // anira::InferenceConfig my_inference_config = rnn_config; 35 | // anira::PrePostProcessor my_pp_processor(my_inference_config); 36 | // anira::InferenceConfig my_inference_config = gain_config; 37 | // anira::PrePostProcessor my_pp_processor(my_inference_config); 38 | // anira::InferenceConfig my_inference_config = stereo_gain_config; 39 | // anira::PrePostProcessor my_pp_processor(my_inference_config); 40 | 41 | BENCHMARK_DEFINE_F(ProcessBlockFixture, BM_SIMPLE)(::benchmark::State& state) { 42 | 43 | // The buffer size return in get_buffer_size() is populated by state.range(0) param of the google benchmark 44 | anira::HostConfig host_config = {static_cast(get_buffer_size()), SAMPLE_RATE}; 45 | anira::InferenceBackend inference_backend = anira::InferenceBackend::ONNX; 46 | 47 | m_inference_handler = std::make_unique(my_pp_processor, my_inference_config); 48 | m_inference_handler->prepare(host_config); 49 | m_inference_handler->set_inference_backend(inference_backend); 50 | 51 | m_buffer = std::make_unique>(my_inference_config.get_preprocess_input_channels()[0], host_config.m_buffer_size); 52 | 53 | initialize_repetition(my_inference_config, host_config, inference_backend); 54 | 55 | for (auto _ : state) { 56 | push_random_samples_in_buffer(host_config); 57 | 58 | initialize_iteration(); 59 | 60 | std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); 61 | 62 | m_inference_handler->process(m_buffer->get_array_of_write_pointers(), get_buffer_size()); 63 | 64 | while (!buffer_processed()) { 65 | std::this_thread::sleep_for(std::chrono::nanoseconds (10)); 66 | } 67 | 68 | std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); 69 | 70 | interation_step(start, end, state); 71 | } 72 | repetition_step(); 73 | } 74 | 75 | // /* ============================================================ * 76 | // * ================== BENCHMARK REGISTRATION ================== * 77 | // * ============================================================ */ 78 | 79 | BENCHMARK_REGISTER_F(ProcessBlockFixture, BM_SIMPLE) 80 | ->Unit(benchmark::kMillisecond) 81 | ->Iterations(NUM_ITERATIONS)->Repetitions(NUM_REPETITIONS) 82 | ->Arg(BUFFER_SIZE) 83 | ->UseManualTime(); -------------------------------------------------------------------------------- /docs/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Set up Python virtual environment for Sphinx 2 | find_package(Python3 COMPONENTS Interpreter Development REQUIRED) 3 | set(DOCS_VENV_DIR "${CMAKE_CURRENT_BINARY_DIR}/venv") 4 | 5 | # Use a more robust approach with a custom target for venv creation 6 | add_custom_target(create_docs_venv) 7 | 8 | # Check if venv already exists 9 | if(NOT EXISTS ${DOCS_VENV_DIR}) 10 | add_custom_command( 11 | TARGET create_docs_venv 12 | POST_BUILD 13 | COMMAND ${Python3_EXECUTABLE} -m venv ${DOCS_VENV_DIR} 14 | COMMAND ${DOCS_VENV_DIR}/bin/pip install --upgrade pip 15 | COMMAND ${DOCS_VENV_DIR}/bin/pip install --quiet sphinx breathe shibuya myst-parser 16 | WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} 17 | COMMENT "Creating virtual environment for Sphinx documentation" 18 | VERBATIM 19 | ) 20 | endif() 21 | 22 | # look for Doxygen package 23 | # Needs to be installed i.e. sudo dnf install doxygen graphviz 24 | find_package(Doxygen REQUIRED) 25 | 26 | if (DOXYGEN_FOUND) 27 | # set input and output files 28 | set(DOXYGEN_IN ${CMAKE_CURRENT_SOURCE_DIR}/doxygen/Doxyfile.in) 29 | set(DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/doxygen/Doxyfile) 30 | 31 | # request to configure the file 32 | configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY) 33 | message(STATUS "Doxygen configuration generated") 34 | 35 | # Note: do not put "ALL" - this builds docs together with application EVERY TIME! 36 | add_custom_target(doxygen-docs 37 | COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT} 38 | WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} 39 | COMMENT "Generating API documentation with Doxygen" 40 | VERBATIM ) 41 | else (DOXYGEN_FOUND) 42 | message(WARNING "Doxygen need to be installed to generate the doxygen documentation") 43 | endif (DOXYGEN_FOUND) 44 | 45 | # Set up directories for Sphinx documentation 46 | set(SPHINX_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/sphinx) 47 | set(SPHINX_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx) 48 | 49 | # Use breathe-apidoc from the virtual environment 50 | add_custom_target(breathe-apidocs 51 | COMMAND ${DOCS_VENV_DIR}/bin/breathe-apidoc -o ${SPHINX_SOURCE_DIR}/api 52 | ${CMAKE_CURRENT_BINARY_DIR}/doxygen/xml 53 | -g class,struct -f 54 | COMMAND ${CMAKE_COMMAND} -E remove -f ${SPHINX_SOURCE_DIR}/api/struct/structanira_1_1InferenceConfig_1_1Defaults.rst 55 | COMMAND ${CMAKE_COMMAND} -E remove -f ${SPHINX_SOURCE_DIR}/api/struct/structanira_1_1SessionElement_1_1ThreadSafeStruct.rst 56 | # Add :allow-dot-graphs: directive to all class and struct files 57 | COMMAND bash -c "find ${SPHINX_SOURCE_DIR}/api/class -name '*.rst' -exec sed -i '/^\\.\\.\\s\\+doxygenclass::/a\\ :allow-dot-graphs:' {} +" 58 | COMMAND bash -c "find ${SPHINX_SOURCE_DIR}/api/struct -name '*.rst' -exec sed -i '/^\\.\\.\\s\\+doxygenstruct::/a\\ :allow-dot-graphs:' {} +" 59 | WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} 60 | 61 | COMMENT "Generating API documentation with Breathe using virtual environment" 62 | DEPENDS create_docs_venv doxygen-docs 63 | VERBATIM 64 | ) 65 | 66 | # Configure the Sphinx configuration file 67 | configure_file(${SPHINX_SOURCE_DIR}/conf.py.in ${SPHINX_BUILD_DIR}/conf.py @ONLY) 68 | 69 | # Collect all files in SPHINX_SOURCE_DIR as dependencies 70 | file(GLOB_RECURSE SPHINX_SOURCE_FILES "${SPHINX_SOURCE_DIR}/*") 71 | 72 | add_custom_target(sphinx-docs 73 | COMMAND ${DOCS_VENV_DIR}/bin/sphinx-build -b html 74 | -c ${SPHINX_BUILD_DIR} 75 | ${SPHINX_SOURCE_DIR} 76 | ${SPHINX_BUILD_DIR}/html 77 | WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} 78 | COMMENT "Generating HTML documentation with Sphinx using virtual environment" 79 | DEPENDS create_docs_venv doxygen-docs breathe-apidocs ${SPHINX_SOURCE_FILES} 80 | VERBATIM 81 | ) 82 | 83 | if (ANIRA_WITH_INSTALL) 84 | # Install documentation 85 | install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/sphinx/html/ 86 | DESTINATION share/doc/anira 87 | COMPONENT documentation 88 | OPTIONAL 89 | ) 90 | endif() -------------------------------------------------------------------------------- /docs/sphinx/contributing.rst: -------------------------------------------------------------------------------- 1 | Contributing to anira 2 | ===================== 3 | 4 | We welcome contributions to anira! This document provides guidelines and instructions for contributing to the project. 5 | 6 | Ways to Contribute 7 | ------------------ 8 | 9 | There are many ways to contribute to anira: 10 | 11 | - **Bug reports**: Report issues you encounter 12 | - **Feature requests**: Suggest new features or improvements 13 | - **Documentation**: Help improve the documentation 14 | - **Code contributions**: Fix bugs or implement new features 15 | - **Examples**: Create example projects that use anira 16 | - **Testing**: Help test on different platforms and configurations 17 | 18 | Development Setup 19 | ----------------- 20 | 21 | Prerequisites 22 | ~~~~~~~~~~~~~ 23 | 24 | - C++17 compatible compiler 25 | - CMake 3.14 or higher 26 | - Git 27 | 28 | Getting the Code 29 | ~~~~~~~~~~~~~~~~ 30 | 31 | 1. Fork the anira repository on GitHub 32 | 2. Clone your fork locally: 33 | 34 | .. code-block:: bash 35 | 36 | git clone https://github.com/YOUR-USERNAME/anira.git 37 | cd anira 38 | 39 | 3. Add the original repository as an upstream remote: 40 | 41 | .. code-block:: bash 42 | 43 | git remote add upstream https://github.com/anira-project/anira.git 44 | 45 | Building for Development 46 | ~~~~~~~~~~~~~~~~~~~~~~~~ 47 | 48 | Build with all features enabled: 49 | 50 | .. code-block:: bash 51 | 52 | cmake . -B build -DCMAKE_BUILD_TYPE=Debug -DANIRA_WITH_TESTS=ON -DANIRA_WITH_BENCHMARK=ON -DANIRA_WITH_EXAMPLES=ON -DANIRA_BUILD_DOCS=ON 53 | cmake --build build 54 | 55 | Run tests to verify your setup: 56 | 57 | .. code-block:: bash 58 | 59 | cd build 60 | ctest 61 | 62 | Coding Guidelines 63 | ----------------- 64 | 65 | General 66 | ~~~~~~~ 67 | 68 | - Follow the existing code style 69 | - Write clear, readable, and maintainable code 70 | - Include appropriate documentation for public API 71 | - Add tests for new functionality 72 | 73 | Documentation 74 | ~~~~~~~~~~~~~ 75 | 76 | - Document all public APIs with Doxygen-compatible comments 77 | - Keep the documentation in sync with the code 78 | - Add examples to illustrate usage 79 | 80 | Testing 81 | ~~~~~~~ 82 | 83 | - Write unit tests for new functionality 84 | - Ensure all tests pass before submitting 85 | - If fixing a bug, add a test that reproduces the bug 86 | 87 | Submitting Changes 88 | ------------------ 89 | 90 | Pull Request Process 91 | ~~~~~~~~~~~~~~~~~~~~ 92 | 93 | 1. Create a new branch for your changes: 94 | 95 | .. code-block:: bash 96 | 97 | git checkout -b feature/your-feature-name 98 | 99 | 2. Make your changes and commit them: 100 | 101 | .. code-block:: bash 102 | 103 | git commit -m "Description of your changes" 104 | 105 | 3. Keep your branch updated with upstream: 106 | 107 | .. code-block:: bash 108 | 109 | git fetch upstream 110 | git rebase upstream/main 111 | 112 | 4. Push your branch to your fork: 113 | 114 | .. code-block:: bash 115 | 116 | git push origin feature/your-feature-name 117 | 118 | 5. Create a pull request from your branch to the main repository 119 | 120 | 6. Address any feedback from code reviews 121 | 122 | Code Review 123 | ~~~~~~~~~~~ 124 | 125 | All submissions require review before being merged. We use GitHub pull requests for this purpose. Consult GitHub Help for more information on using pull requests. 126 | 127 | Building Documentation 128 | ---------------------- 129 | 130 | To build the documentation locally: 131 | 132 | .. code-block:: bash 133 | 134 | cmake . -B build -DCMAKE_BUILD_TYPE=Release -DANIRA_BUILD_DOCS=ON 135 | cmake --build build --target sphinx-docs 136 | 137 | The documentation will be built in `build/docs/sphinx/html/`. 138 | 139 | Getting Help 140 | ------------ 141 | 142 | If you have questions or need help with contributing: 143 | 144 | - Open an issue on GitHub 145 | - Reach out to the maintainers 146 | - Check the troubleshooting guide 147 | 148 | Thank you for contributing to anira! 149 | -------------------------------------------------------------------------------- /.github/actions/install/action.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | description: "Build the project with cmake" 3 | 4 | inputs: 5 | BUILD_TYPE: 6 | required: true 7 | description: "The build type" 8 | PROJECT_NAME: 9 | required: true 10 | description: "The project name" 11 | DEV_ID_APP_CERT: 12 | required: true 13 | description: "The developer id application certificate" 14 | DEV_ID_APP_PWD: 15 | required: true 16 | description: "The developer id application password" 17 | DEV_ID_APP: 18 | required: true 19 | description: "The developer id application" 20 | 21 | outputs: 22 | PROJECT_VERSION: 23 | value: ${{ steps.get_project_version.outputs.PROJECT_VERSION }} 24 | description: "The version of the project" 25 | PACKAGE_DIR: 26 | value: ${{ steps.declare_artefact_variables.outputs.PACKAGE_DIR }} 27 | description: "The packaging directory" 28 | PRODUCT_NAME: 29 | value: ${{ steps.declare_artefact_variables.outputs.PRODUCT_NAME }} 30 | description: "The product name" 31 | 32 | runs: 33 | using: "composite" 34 | steps: 35 | # Build the install target 36 | - name: install target 37 | shell: bash 38 | run: cmake --install build --config ${{ inputs.BUILD_TYPE }} 39 | 40 | - name: get project version 41 | id: get_project_version 42 | shell: bash 43 | run: | 44 | version=$(grep 'CMAKE_PROJECT_VERSION:STATIC' build/CMakeCache.txt | cut -d'=' -f2) 45 | echo "PROJECT_VERSION=${version}" >> $GITHUB_OUTPUT 46 | echo "current project version: ${version}" 47 | 48 | # Declaring the product name and the packaging directory 49 | - name: declare artefact variables 50 | id: declare_artefact_variables 51 | shell: bash 52 | run: | 53 | echo "PACKAGE_DIR=artefacts/${{ inputs.PROJECT_NAME }}-${{ steps.get_project_version.outputs.PROJECT_VERSION }}-${{ matrix.name }}" >> $GITHUB_OUTPUT 54 | echo "PRODUCT_NAME=${{ inputs.PROJECT_NAME }}-${{ steps.get_project_version.outputs.PROJECT_VERSION }}-${{ matrix.name }}" >> $GITHUB_OUTPUT 55 | 56 | # Moving the artefacts to a packaging directory 57 | - name: move artefacts 58 | shell: bash 59 | run: | 60 | mkdir -p artefacts; 61 | mv "build/${{ inputs.PROJECT_NAME }}-${{ steps.get_project_version.outputs.PROJECT_VERSION }}" ${{ steps.declare_artefact_variables.outputs.PACKAGE_DIR }}; 62 | 63 | # We need to import the apple developer certificate so that we can codesign our binaries 64 | - name: import certificates (macOS) 65 | uses: apple-actions/import-codesign-certs@v3 66 | if: ${{ matrix.os == 'macOS-latest' }} 67 | with: 68 | # GitHub encrypted secrets 69 | p12-file-base64: ${{ inputs.DEV_ID_APP_CERT }} 70 | p12-password: ${{ inputs.DEV_ID_APP_PWD }} 71 | 72 | # Codesigning all the libraries 73 | - name: codesign (macOS) 74 | shell: bash 75 | if: ${{ matrix.os == 'macOS-latest' }} 76 | run: | 77 | # codesign all libs 78 | codesign --force -s "${{ inputs.DEV_ID_APP }}" -v ${{ steps.declare_artefact_variables.outputs.PACKAGE_DIR }}/lib/*.dylib --deep --strict --options=runtime --timestamp; 79 | 80 | # Zip the artefact 81 | - name: zip artefacts 82 | working-directory: ${{github.workspace}}/artefacts 83 | shell: bash 84 | run: | 85 | if [ "${{ matrix.name }}" == "Linux-x86_64" ]; then 86 | zip -r ${{ steps.declare_artefact_variables.outputs.PRODUCT_NAME }}.zip ${{ steps.declare_artefact_variables.outputs.PRODUCT_NAME }}/ 87 | elif [ "${{ matrix.os }}" == "macOS-latest" ]; then 88 | zip -vr ${{ steps.declare_artefact_variables.outputs.PRODUCT_NAME }}.zip ${{ steps.declare_artefact_variables.outputs.PRODUCT_NAME }}/ -x "*.DS_Store" 89 | elif [ "${{ matrix.name }}" == "Windows-x86_64" ]; then 90 | pwsh -command "Compress-Archive -Path '${{ steps.declare_artefact_variables.outputs.PRODUCT_NAME }}/' -DestinationPath '${{ steps.declare_artefact_variables.outputs.PRODUCT_NAME }}.zip'" 91 | else 92 | echo "Unknown OS"; 93 | fi; 94 | 95 | - name: upload artifact 96 | uses: actions/upload-artifact@v4 97 | with: 98 | name: ${{ steps.declare_artefact_variables.outputs.PRODUCT_NAME }}.zip 99 | path: ${{ steps.declare_artefact_variables.outputs.PACKAGE_DIR }}.zip -------------------------------------------------------------------------------- /test/utils/test_Buffer.cpp: -------------------------------------------------------------------------------- 1 | #include "gtest/gtest.h" 2 | #include 3 | 4 | using namespace anira; 5 | TEST(Buffer, SimpleWrite){ 6 | BufferF buffer = BufferF(1,10); 7 | for (size_t i = 0; i < buffer.get_num_samples(); i++){ 8 | EXPECT_FLOAT_EQ(0.f, buffer.get_sample(0,i)); 9 | } 10 | 11 | buffer.set_sample(0,5, 0.9f); 12 | 13 | for (size_t i = 0; i < buffer.get_num_samples(); i++){ 14 | float expected = i == 5 ? 0.9f : 0.f; 15 | EXPECT_FLOAT_EQ(expected, buffer.get_sample(0,i)); 16 | } 17 | 18 | buffer.clear(); 19 | for (size_t i = 0; i < buffer.get_num_samples(); i++){ 20 | EXPECT_FLOAT_EQ(0.f, buffer.get_sample(0,i)); 21 | } 22 | } 23 | 24 | TEST(Buffer, BlockSwap){ 25 | int block_size = 10; 26 | 27 | MemoryBlock block; 28 | anira::Buffer buffer(1, block_size); 29 | 30 | // fill blocks 31 | block.resize(block_size); 32 | for (int i = 0; i < block_size; i++){ 33 | block[i] = i; 34 | buffer.set_sample(0, i, i+block_size); 35 | } 36 | 37 | // check that buffers were filled corerctly 38 | for (int i = 0; i < block_size; i++) 39 | { 40 | ASSERT_EQ(block[i], i); 41 | ASSERT_EQ(buffer.get_sample(0,i), i+block_size); 42 | } 43 | 44 | int* block_ptr = block.data(); 45 | int* buffer_ptr = buffer.data(); 46 | 47 | // Do the swap 48 | block.swap_data(buffer.get_memory_block()); 49 | buffer.reset_channel_ptr(); 50 | 51 | // check that the blocks were actually swapped 52 | ASSERT_EQ(block_ptr, buffer.data()); 53 | ASSERT_EQ(buffer_ptr, block.data()); 54 | 55 | // check that buffer values were correctly swapped 56 | for (int i = 0; i < block_size; i++) 57 | { 58 | ASSERT_EQ(block[i], i+block_size); 59 | ASSERT_EQ(buffer.get_sample(0,i), i); 60 | } 61 | } 62 | 63 | TEST(Buffer, BufferSwap){ 64 | int block_size = 10; 65 | 66 | anira::Buffer buffer1(1, block_size); 67 | anira::Buffer buffer2(1, block_size); 68 | 69 | // fill buffers 70 | for (int i = 0; i < block_size; i++){ 71 | buffer1.set_sample(0, i, i); 72 | buffer2.set_sample(0, i, i+block_size); 73 | } 74 | 75 | // check that buffers were filled corerctly 76 | for (int i = 0; i < block_size; i++) 77 | { 78 | ASSERT_EQ(buffer1.get_sample(0,i), i); 79 | ASSERT_EQ(buffer2.get_sample(0,i), i+block_size); 80 | } 81 | 82 | int* buffer1_ptr = buffer1.data(); 83 | int* buffer2_ptr = buffer2.data(); 84 | 85 | // Do the swap 86 | buffer1.swap_data(buffer2); 87 | 88 | // check that the blocks were actually swapped 89 | ASSERT_EQ(buffer1_ptr, buffer2.data()); 90 | ASSERT_EQ(buffer2_ptr, buffer1.data()); 91 | 92 | // check that buffer values were correctly swapped 93 | for (int i = 0; i < block_size; i++) 94 | { 95 | ASSERT_EQ(buffer1.get_sample(0,i), i+block_size); 96 | ASSERT_EQ(buffer2.get_sample(0,i), i); 97 | } 98 | } 99 | TEST(Buffer, InvalidSizeSwap){ 100 | anira::Buffer buffer1(1, 5); 101 | anira::Buffer buffer2(1, 6); 102 | int* buffer1_ptr = buffer1.data(); 103 | int* buffer2_ptr = buffer2.data(); 104 | 105 | testing::internal::CaptureStderr(); 106 | buffer1.swap_data(buffer2); 107 | 108 | std::string output = testing::internal::GetCapturedStderr(); 109 | 110 | // check that the blocks were actually swapped 111 | ASSERT_EQ(buffer1_ptr, buffer1.data()); 112 | ASSERT_EQ(buffer2_ptr, buffer2.data()); 113 | ASSERT_EQ(output, std::string("Cannot swap data, buffers have different number of channels or sizes!\n")); 114 | } 115 | 116 | TEST(Buffer, InvalidChannelsSwap){ 117 | anira::Buffer buffer1(2, 5); 118 | anira::Buffer buffer2(1, 5); 119 | int* buffer1_ptr = buffer1.data(); 120 | int* buffer2_ptr = buffer2.data(); 121 | 122 | testing::internal::CaptureStderr(); 123 | buffer1.swap_data(buffer2); 124 | 125 | std::string output = testing::internal::GetCapturedStderr(); 126 | 127 | // check that the blocks were actually swapped 128 | ASSERT_EQ(buffer1_ptr, buffer1.data()); 129 | ASSERT_EQ(buffer2_ptr, buffer2.data()); 130 | ASSERT_EQ(output, std::string("Cannot swap data, buffers have different number of channels or sizes!\n")); 131 | } 132 | -------------------------------------------------------------------------------- /docs/sphinx/examples.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | ======== 3 | 4 | This section provides examples and demonstrations of how to use anira in various contexts, from simple audio processing to complex real-time plugins. 5 | 6 | Built-in Examples 7 | ----------------- 8 | 9 | anira comes with several built-in examples that demonstrate different use cases and integration patterns. These examples are available when building with ``-DANIRA_WITH_EXAMPLES=ON``. 10 | 11 | JUCE Audio Plugin 12 | ~~~~~~~~~~~~~~~~~ 13 | 14 | **Location**: ``examples/juce-audio-plugin/`` 15 | 16 | This example demonstrates how to integrate anira into a JUCE-based VST3 plugin for real-time audio processing. It shows: 17 | 18 | - Setting up anira within a JUCE plugin architecture 19 | - Managing real-time constraints in an audio plugin context 20 | - Handling parameter changes and state management 21 | - Building and deploying a VST3 plugin with neural network inference 22 | 23 | Key files: 24 | 25 | - ``PluginProcessor.h/cpp``: Main plugin processor with anira integration 26 | - ``PluginParameters.h/cpp``: Parameter management 27 | - ``CMakeLists.txt``: Build configuration for JUCE plugin 28 | 29 | **Building**: 30 | 31 | .. code-block:: bash 32 | 33 | cmake . -B build -DANIRA_WITH_EXAMPLES=ON 34 | cmake --build build --target anira-juce-plugin-example_VST3 35 | 36 | .. note:: 37 | In the JUCE plugin example, there are several different models available. You can select the model by changing the ``MODEL_TO_USE`` variable in the ``CMakeLists.txt`` file. 38 | 39 | CLAP Plugin Example 40 | ~~~~~~~~~~~~~~~~~~~ 41 | 42 | **Location**: ``examples/clap-audio-plugin/`` 43 | 44 | Demonstrates anira integration with the CLAP (CLever Audio Plugin) format: 45 | 46 | - CLAP plugin architecture with anira 47 | - Real-time audio processing with neural networks 48 | - Modern plugin format implementation 49 | 50 | Key files: 51 | 52 | - ``anira-clap-demo.h/cpp``: Main CLAP plugin implementation 53 | - ``anira-clap-demo-pluginentry.cpp``: Plugin entry point 54 | 55 | **Building**: 56 | 57 | .. code-block:: bash 58 | 59 | cmake . -B build -DANIRA_WITH_EXAMPLES=ON 60 | cmake --build build --target anira-clap-demo 61 | 62 | Benchmark Examples 63 | ~~~~~~~~~~~~~~~~~~ 64 | 65 | **Location**: ``examples/benchmark/`` 66 | 67 | Three different benchmark examples showing various benchmarking scenarios: 68 | 69 | Simple Benchmark 70 | ^^^^^^^^^^^^^^^^ 71 | 72 | **Location**: ``examples/benchmark/simple-benchmark/`` 73 | 74 | Basic benchmarking setup demonstrating: 75 | 76 | - Single configuration benchmarking 77 | - Basic performance measurement 78 | - Simple benchmark fixture usage 79 | 80 | CNN Size Benchmark 81 | ^^^^^^^^^^^^^^^^^^ 82 | 83 | **Location**: ``examples/benchmark/cnn-size-benchmark/`` 84 | 85 | Benchmarks different CNN model sizes to evaluate: 86 | 87 | - Performance scaling with model complexity 88 | - Memory usage patterns 89 | - Optimal model size selection for real-time constraints 90 | 91 | Advanced Benchmark 92 | ^^^^^^^^^^^^^^^^^^ 93 | 94 | **Location**: ``examples/benchmark/advanced-benchmark/`` 95 | 96 | Comprehensive benchmarking suite featuring: 97 | 98 | - Multiple configuration testing 99 | - Parameterized benchmarks 100 | - Statistical analysis 101 | - Performance comparison across backends 102 | 103 | Minimal Inference Examples 104 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 105 | 106 | **Location**: ``examples/minimal-inference/`` 107 | 108 | These examples show the minimal code required to perform inference with each backend supported by anira. They do not use the anira library, but show how to use the underlying libraries directly. 109 | 110 | External Examples 111 | ----------------- 112 | 113 | Neural Network Inference Template 114 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 115 | 116 | **Repository**: `nn-inference-template `_ 117 | 118 | A more comprehensive JUCE/VST3 plugin template that uses anira for real-time safe neural network inference. This plugin is more complex than the simple JUCE Audio Plugin example and features: 119 | 120 | - Professional GUI implementation 121 | - Advanced parameter management 122 | - State saving and loading 123 | - Real-world plugin architecture patterns 124 | - Production-ready code structure 125 | 126 | This template serves as an excellent starting point for developing commercial audio plugins with neural network processing. 127 | -------------------------------------------------------------------------------- /cmake/SetupOnnxRuntime.cmake: -------------------------------------------------------------------------------- 1 | set(LIBONNXRUNTIME_VERSION 1.19.2) 2 | 3 | option(ONNXRUNTIME_ROOTDIR "onnxruntime root dir") 4 | set(ONNXRUNTIME_DIR_NAME "onnxruntime-${LIBONNXRUNTIME_VERSION}-${ANIRA_OPERATING_SYSTEM}-${CMAKE_SYSTEM_PROCESSOR}") 5 | set(ONNXRUNTIME_ROOTDIR ${CMAKE_CURRENT_SOURCE_DIR}/modules/${ONNXRUNTIME_DIR_NAME}) 6 | 7 | if(EXISTS ${ONNXRUNTIME_ROOTDIR}/) 8 | message(STATUS "ONNX-Runtime library found at ${ONNXRUNTIME_ROOTDIR}") 9 | else() 10 | file(MAKE_DIRECTORY ${ONNXRUNTIME_ROOTDIR}/) 11 | message(STATUS "ONNX-Runtime library not found - downloading pre-built library.") 12 | 13 | if(WIN32) 14 | set(LIB_ONNXRUNTIME_PRE_BUILD_LIB_NAME "onnxruntime-win-x64-${LIBONNXRUNTIME_VERSION}") 15 | set(LIB_ONNXRUNTIME_PRE_BUILD_LIB_TYPE "zip") 16 | endif() 17 | 18 | if(UNIX AND NOT APPLE) 19 | if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64") 20 | set(LIB_ONNXRUNTIME_PRE_BUILD_LIB_NAME "onnxruntime-linux-aarch64-${LIBONNXRUNTIME_VERSION}") 21 | set(LIB_ONNXRUNTIME_PRE_BUILD_LIB_TYPE "tgz") 22 | elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") 23 | set(LIB_ONNXRUNTIME_PRE_BUILD_LIB_NAME "onnxruntime-linux-x64-${LIBONNXRUNTIME_VERSION}") 24 | set(LIB_ONNXRUNTIME_PRE_BUILD_LIB_TYPE "tgz") 25 | elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7l") 26 | set(LIB_ONNXRUNTIME_PRE_BUILD_LIB_NAME "onnxruntime-${LIBONNXRUNTIME_VERSION}-Linux-armv7l") 27 | set(LIB_ONNXRUNTIME_PRE_BUILD_LIB_TYPE "tar.gz") 28 | endif() 29 | endif() 30 | 31 | if(UNIX AND APPLE) 32 | set(LIB_ONNXRUNTIME_PRE_BUILD_LIB_NAME "onnxruntime-osx-universal2-${LIBONNXRUNTIME_VERSION}") 33 | set(LIB_ONNXRUNTIME_PRE_BUILD_LIB_TYPE "tgz") 34 | endif() 35 | 36 | if(UNIX AND NOT APPLE AND CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7l") 37 | set(LIBONNXRUNTIME_URL https://github.com/faressc/onnxruntime-cpp-lib/releases/download/v${LIBONNXRUNTIME_VERSION}/${LIB_ONNXRUNTIME_PRE_BUILD_LIB_NAME}.${LIB_ONNXRUNTIME_PRE_BUILD_LIB_TYPE}) 38 | else() 39 | set(LIBONNXRUNTIME_URL https://github.com/microsoft/onnxruntime/releases/download/v${LIBONNXRUNTIME_VERSION}/${LIB_ONNXRUNTIME_PRE_BUILD_LIB_NAME}.${LIB_ONNXRUNTIME_PRE_BUILD_LIB_TYPE}) 40 | endif() 41 | 42 | message(STATUS "Downloading: ${LIBONNXRUNTIME_URL}") 43 | 44 | set(LIBONNXRUNTIME_PATH ${CMAKE_BINARY_DIR}/import/${LIB_ONNXRUNTIME_PRE_BUILD_LIB_NAME}.${LIB_ONNXRUNTIME_PRE_BUILD_LIB_TYPE}) 45 | 46 | file(DOWNLOAD ${LIBONNXRUNTIME_URL} ${LIBONNXRUNTIME_PATH} STATUS LIBONNXRUNTIME_DOWNLOAD_STATUS SHOW_PROGRESS) 47 | list(GET LIBONNXRUNTIME_DOWNLOAD_STATUS 0 LIBONNXRUNTIME_DOWNLOAD_STATUS_NO) 48 | 49 | if(UNIX AND NOT APPLE AND CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7l") 50 | execute_process( 51 | COMMAND tar -xzf ${LIBONNXRUNTIME_PATH} -C ${ONNXRUNTIME_ROOTDIR}/ 52 | WORKING_DIRECTORY ${ONNXRUNTIME_ROOTDIR}/) 53 | else() 54 | file(ARCHIVE_EXTRACT 55 | INPUT ${LIBONNXRUNTIME_PATH} 56 | DESTINATION ${ONNXRUNTIME_ROOTDIR}/) 57 | endif() 58 | 59 | if(EXISTS ${ONNXRUNTIME_ROOTDIR}/${LIB_ONNXRUNTIME_PRE_BUILD_LIB_NAME}/) 60 | file(COPY ${ONNXRUNTIME_ROOTDIR}/${LIB_ONNXRUNTIME_PRE_BUILD_LIB_NAME}/ DESTINATION ${ONNXRUNTIME_ROOTDIR}/) 61 | file(REMOVE_RECURSE ${ONNXRUNTIME_ROOTDIR}/${LIB_ONNXRUNTIME_PRE_BUILD_LIB_NAME}) 62 | endif() 63 | 64 | if(LIBONNXRUNTIME_DOWNLOAD_STATUS_NO) 65 | message(STATUS "Pre-built library not downloaded. Error occurred, try again and check cmake/SetupOnnxRuntime.cmake") 66 | file(REMOVE_RECURSE ${ONNXRUNTIME_ROOTDIR}) 67 | file(REMOVE ${LIBONNXRUNTIME_PATH}) 68 | else() 69 | message(STATUS "Linking downloaded ONNX-Runtime pre-built library.") 70 | endif() 71 | endif() 72 | 73 | set(ANIRA_ONNXRUNTIME_SHARED_LIB_PATH "${ONNXRUNTIME_ROOTDIR}") 74 | 75 | # Set the variable in the parent scope as well 76 | get_directory_property(hasParent PARENT_DIRECTORY) 77 | if(hasParent) 78 | set(ANIRA_ONNXRUNTIME_SHARED_LIB_PATH "${ANIRA_ONNXRUNTIME_SHARED_LIB_PATH}" PARENT_SCOPE) 79 | endif() 80 | 81 | if(UNIX AND NOT APPLE AND CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7l") 82 | list(APPEND BACKEND_BUILD_HEADER_DIRS "${ONNXRUNTIME_ROOTDIR}/include/onnxruntime") 83 | else() 84 | list(APPEND BACKEND_BUILD_HEADER_DIRS "${ONNXRUNTIME_ROOTDIR}/include") 85 | endif() 86 | 87 | list(APPEND BACKEND_BUILD_LIBRARY_DIRS "${ONNXRUNTIME_ROOTDIR}/lib") -------------------------------------------------------------------------------- /test/utils/test_JsonConfigLoader.cpp: -------------------------------------------------------------------------------- 1 | #include "gtest/gtest.h" 2 | #include 3 | 4 | #ifdef USE_LIBTORCH 5 | #ifdef USE_TFLITE 6 | #ifdef USE_ONNXRUNTIME 7 | 8 | #include "../../extras/models/third-party/ircam-acids/RaveFunkDrumConfig.h" 9 | #include "../../extras/models/third-party/ircam-acids/RaveFunkDrumConfigEncoder.h" 10 | #include "../../extras/models/third-party/ircam-acids/RaveFunkDrumConfigDecoder.h" 11 | #include "../../extras/models/model-pool/SimpleGainConfig.h" 12 | 13 | using namespace anira; 14 | 15 | void expect_inference_config_eq(const InferenceConfig& a, const InferenceConfig& b) { 16 | // High level comparison 17 | EXPECT_EQ(a.m_model_data.size(), b.m_model_data.size()); 18 | EXPECT_EQ(a.m_tensor_shape.size(), b.m_tensor_shape.size()); 19 | EXPECT_FLOAT_EQ(a.m_max_inference_time, b.m_max_inference_time); 20 | EXPECT_EQ(a.m_warm_up, b.m_warm_up); 21 | EXPECT_EQ(a.m_session_exclusive_processor, b.m_session_exclusive_processor); 22 | EXPECT_FLOAT_EQ(a.m_blocking_ratio, b.m_blocking_ratio); 23 | EXPECT_EQ(a.m_num_parallel_processors, b.m_num_parallel_processors); 24 | 25 | // Model data comparison 26 | for (size_t i = 0; i < a.m_model_data.size(); ++i) { 27 | const auto& model_data_a = a.m_model_data[i]; 28 | const auto& model_data_b = b.m_model_data[i]; 29 | 30 | EXPECT_EQ(model_data_a.m_size, model_data_b.m_size) << "Mismatch in m_size for model_data[" << i << "]"; 31 | EXPECT_EQ(model_data_a.m_backend, model_data_b.m_backend) << "Mismatch in m_backend for model_data[" << i << "]"; 32 | EXPECT_EQ(model_data_a.m_model_function, model_data_b.m_model_function) << "Mismatch in m_model_function for model_data[" << i << "]"; 33 | EXPECT_EQ(model_data_a.m_is_binary, model_data_b.m_is_binary) << "Mismatch in m_is_binary for model_data[" << i << "]"; 34 | 35 | ASSERT_NE(model_data_a.m_data, nullptr); 36 | ASSERT_NE(model_data_b.m_data, nullptr); 37 | EXPECT_EQ(std::memcmp(model_data_a.m_data, model_data_b.m_data, model_data_a.m_size), 0) << "Mismatch in model data bytes for model_data[" << i << "]"; 38 | } 39 | 40 | // Tensor shape comparison 41 | for (size_t i = 0; i < a.m_tensor_shape.size(); ++i) { 42 | const auto& tensor_shape_a = a.m_tensor_shape[i]; 43 | const auto& tensor_shape_b = b.m_tensor_shape[i]; 44 | 45 | EXPECT_EQ(tensor_shape_a.m_universal, tensor_shape_b.m_universal) << "Mismatch in m_universal for tensor_shape[" << i << "]"; 46 | 47 | if (!tensor_shape_a.m_universal && !tensor_shape_b.m_universal) { 48 | EXPECT_EQ(tensor_shape_a.m_backend, tensor_shape_b.m_backend) << "Mismatch in m_backend for tensor_shape[" << i << "]"; 49 | } 50 | 51 | EXPECT_EQ(tensor_shape_a.m_tensor_input_shape, tensor_shape_b.m_tensor_input_shape) << "Mismatch in m_tensor_input_shape for tensor_shape[" << i << "]"; 52 | EXPECT_EQ(tensor_shape_a.m_tensor_output_shape, tensor_shape_b.m_tensor_output_shape) << "Mismatch in m_tensor_output_shape for tensor_shape[" << i << "]"; 53 | } 54 | 55 | // ProcessingSpec comparison 56 | const auto& processing_spec_a = a.m_processing_spec; 57 | const auto& processing_spec_b = b.m_processing_spec; 58 | 59 | EXPECT_EQ(processing_spec_a.m_preprocess_input_channels, processing_spec_b.m_preprocess_input_channels); 60 | EXPECT_EQ(processing_spec_a.m_postprocess_output_channels, processing_spec_b.m_postprocess_output_channels); 61 | EXPECT_EQ(processing_spec_a.m_preprocess_input_size, processing_spec_b.m_preprocess_input_size); 62 | EXPECT_EQ(processing_spec_a.m_postprocess_output_size, processing_spec_b.m_postprocess_output_size); 63 | EXPECT_EQ(processing_spec_a.m_internal_model_latency, processing_spec_b.m_internal_model_latency); 64 | 65 | // Final check using the equality operator 66 | EXPECT_EQ(a, b); 67 | } 68 | 69 | // Test basic initialization 70 | TEST(JsonConfigLoader, EqualInferenceConfig) { 71 | std::vector> test_configs; 72 | 73 | JsonConfigLoader funk_drum_json_loader(RAVE_MODEL_FUNK_DRUM_JSON_CONFIG_PATH); 74 | test_configs.push_back({*funk_drum_json_loader.get_inference_config(), rave_funk_drum_config}); 75 | 76 | JsonConfigLoader funk_drum_encode_json_loader(RAVE_MODEL_FUNK_DRUM_ENCODER_JSON_CONFIG_PATH); 77 | test_configs.push_back({*funk_drum_encode_json_loader.get_inference_config(), rave_funk_drum_encoder_config}); 78 | 79 | JsonConfigLoader funk_drum_decode_json_loader(RAVE_MODEL_FUNK_DRUM_DECODER_JSON_CONFIG_PATH); 80 | test_configs.push_back({*funk_drum_decode_json_loader.get_inference_config(), rave_funk_drum_decoder_config}); 81 | 82 | JsonConfigLoader gain_json_loader(SIMPLE_GAIN_JSON_CONFIG_PATH); 83 | test_configs.push_back({*gain_json_loader.get_inference_config(), gain_config}); 84 | 85 | for (const auto& config_pair : test_configs) { 86 | expect_inference_config_eq(config_pair[0], config_pair[1]); 87 | } 88 | } 89 | 90 | #endif // USE_ONNXRUNTIME 91 | #endif // USE_TFLITE 92 | #endif // USE_LIBTORCH -------------------------------------------------------------------------------- /src/backends/TFLiteProcessor.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef _WIN32 4 | #include 5 | #endif 6 | 7 | namespace anira { 8 | 9 | TFLiteProcessor::TFLiteProcessor(InferenceConfig& inference_config) : BackendBase(inference_config) 10 | { 11 | for (unsigned int i = 0; i < m_inference_config.m_num_parallel_processors; ++i) { 12 | m_instances.emplace_back(std::make_shared(m_inference_config)); 13 | } 14 | } 15 | 16 | TFLiteProcessor::~TFLiteProcessor() { 17 | } 18 | 19 | void TFLiteProcessor::prepare() { 20 | for(auto& instance : m_instances) { 21 | instance->prepare(); 22 | } 23 | } 24 | 25 | void TFLiteProcessor::process(std::vector& input, std::vector& output, std::shared_ptr session) { 26 | while (true) { 27 | for(auto& instance : m_instances) { 28 | if (!(instance->m_processing.exchange(true))) { 29 | instance->process(input, output, session); 30 | instance->m_processing.exchange(false); 31 | return; 32 | } 33 | } 34 | } 35 | } 36 | 37 | TFLiteProcessor::Instance::Instance(InferenceConfig& inference_config) : m_inference_config(inference_config) 38 | { 39 | if (inference_config.is_model_binary(anira::InferenceBackend::TFLITE)) { 40 | const anira::ModelData* model_data = m_inference_config.get_model_data(anira::InferenceBackend::TFLITE); 41 | assert(model_data && "Model data not found for binary model!"); 42 | m_model = TfLiteModelCreate(model_data->m_data, model_data->m_size); 43 | } else { 44 | std::string modelpath = m_inference_config.get_model_path(anira::InferenceBackend::TFLITE); 45 | m_model = TfLiteModelCreateFromFile(modelpath.c_str()); 46 | } 47 | 48 | 49 | m_options = TfLiteInterpreterOptionsCreate(); 50 | TfLiteInterpreterOptionsSetNumThreads(m_options, 1); 51 | m_interpreter = TfLiteInterpreterCreate(m_model, m_options); 52 | 53 | // This is necessary when we have dynamic input shapes, it should be done before allocating tensors obviously 54 | for (size_t i = 0; i < m_inference_config.get_tensor_input_shape().size(); i++) { 55 | std::vector input_shape; 56 | std::vector input_shape64 = m_inference_config.get_tensor_input_shape(anira::InferenceBackend::TFLITE)[i]; 57 | for (size_t j = 0; j < input_shape64.size(); j++) { 58 | input_shape.push_back((int) input_shape64[j]); 59 | } 60 | TfLiteInterpreterResizeInputTensor(m_interpreter, i, input_shape.data(), static_cast(input_shape.size())); 61 | } 62 | 63 | TfLiteInterpreterAllocateTensors(m_interpreter); 64 | 65 | m_inputs.resize(m_inference_config.get_tensor_input_shape().size()); 66 | m_input_data.resize(m_inference_config.get_tensor_input_shape().size()); 67 | for (size_t i = 0; i < m_inference_config.get_tensor_input_shape().size(); i++) { 68 | m_input_data[i].resize(m_inference_config.get_tensor_input_size()[i]); 69 | m_inputs[i] = TfLiteInterpreterGetInputTensor(m_interpreter, i); 70 | } 71 | 72 | m_outputs.resize(m_inference_config.get_tensor_output_shape().size()); 73 | for (size_t i = 0; i < m_inference_config.get_tensor_output_shape().size(); i++) { 74 | m_outputs[i] = TfLiteInterpreterGetOutputTensor(m_interpreter, i); 75 | } 76 | 77 | for (size_t i = 0; i < m_inference_config.m_warm_up; i++) { 78 | TfLiteInterpreterInvoke(m_interpreter); 79 | } 80 | } 81 | 82 | TFLiteProcessor::Instance::~Instance() { 83 | TfLiteInterpreterDelete(m_interpreter); 84 | TfLiteInterpreterOptionsDelete(m_options); 85 | TfLiteModelDelete(m_model); 86 | } 87 | 88 | void TFLiteProcessor::Instance::prepare() { 89 | for (size_t i = 0; i < m_inference_config.get_tensor_input_shape().size(); i++) { 90 | m_input_data[i].clear(); 91 | } 92 | } 93 | 94 | void TFLiteProcessor::Instance::process(std::vector& input, std::vector& output, std::shared_ptr session) { 95 | for (size_t i = 0; i < m_inference_config.get_tensor_input_shape().size(); i++) { 96 | m_input_data[i].swap_data(input[i].get_memory_block()); 97 | input[i].reset_channel_ptr(); 98 | // TODO: Check if we can find a solution to avoid copying the data 99 | TfLiteTensorCopyFromBuffer(m_inputs[i], m_input_data[i].data(), m_inference_config.get_tensor_input_size()[i] * sizeof(float)); 100 | } 101 | 102 | // Run inference 103 | TfLiteInterpreterInvoke(m_interpreter); 104 | 105 | // We need to copy the data because we cannot access the data pointer ref of the tensor directly 106 | for (size_t i = 0; i < m_inference_config.get_tensor_output_shape().size(); i++) { 107 | float* output_read_ptr = (float*) TfLiteTensorData(m_outputs[i]); 108 | for (size_t j = 0; j < m_inference_config.get_tensor_output_size()[i]; j++) { 109 | output[i].get_memory_block()[j] = output_read_ptr[j]; 110 | } 111 | } 112 | } 113 | 114 | } // namespace anira -------------------------------------------------------------------------------- /src/utils/RingBuffer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | namespace anira { 5 | 6 | RingBuffer::RingBuffer() = default; 7 | 8 | void RingBuffer::initialize_with_positions(size_t num_channels, size_t num_samples) { 9 | resize(num_channels, num_samples); 10 | clear(); 11 | m_read_pos.resize(get_num_channels()); 12 | m_write_pos.resize(get_num_channels()); 13 | m_is_full.resize(get_num_channels()); 14 | 15 | for (size_t i = 0; i < m_read_pos.size(); i++) { 16 | m_read_pos[i] = 0; 17 | m_write_pos[i] = 0; 18 | m_is_full[i] = false; 19 | } 20 | } 21 | 22 | void RingBuffer::clear_with_positions() { 23 | clear(); 24 | for (size_t i = 0; i < m_read_pos.size(); i++) { 25 | m_read_pos[i] = 0; 26 | m_write_pos[i] = 0; 27 | m_is_full[i] = false; 28 | } 29 | } 30 | 31 | void RingBuffer::push_sample(size_t channel, float sample) { 32 | // Check if we're about to overwrite unread data (buffer overflow) 33 | if (m_is_full[channel]) { 34 | LOG_ERROR << "RingBuffer: Buffer overflow detected for channel " << channel << ". Overwriting oldest sample." << std::endl; 35 | // Advance read position to make room (overwrite oldest sample) 36 | ++m_read_pos[channel]; 37 | if (m_read_pos[channel] >= get_num_samples()) { 38 | m_read_pos[channel] = 0; 39 | } 40 | } 41 | 42 | // Write the sample at the current write position 43 | set_sample(channel, m_write_pos[channel], sample); 44 | 45 | // Advance write position 46 | ++m_write_pos[channel]; 47 | if (m_write_pos[channel] >= get_num_samples()) { 48 | m_write_pos[channel] = 0; 49 | } 50 | 51 | // Update full flag - buffer is full when write position catches up to read position 52 | m_is_full[channel] = (m_write_pos[channel] == m_read_pos[channel]); 53 | } 54 | 55 | float RingBuffer::pop_sample(size_t channel) { 56 | // Check if buffer is empty 57 | if (!m_is_full[channel] && m_read_pos[channel] == m_write_pos[channel]) { 58 | LOG_ERROR << "RingBuffer: Attempted to pop sample from empty buffer for channel " << channel << ". Returning silence (0.0f)." << std::endl; 59 | return 0.0f; 60 | } 61 | 62 | auto sample = get_sample(channel, m_read_pos[channel]); 63 | 64 | ++m_read_pos[channel]; 65 | if (m_read_pos[channel] >= get_num_samples()) { 66 | m_read_pos[channel] = 0; 67 | } 68 | 69 | // Buffer is no longer full after reading 70 | m_is_full[channel] = false; 71 | 72 | return sample; 73 | } 74 | 75 | float RingBuffer::get_future_sample(size_t channel, size_t offset) { 76 | if (offset >= get_available_samples(channel)) { 77 | LOG_ERROR << "RingBuffer: Attempted to get sample with offset " << offset << " for channel " << channel << ", but only " << get_available_samples(channel) << " samples are available. Returning silence (0.0f)." << std::endl; 78 | return 0.0f; 79 | } 80 | 81 | // Calculate the actual position in the buffer 82 | size_t sample_pos = (m_read_pos[channel] + offset) % get_num_samples(); 83 | return get_sample(channel, sample_pos); 84 | } 85 | 86 | float RingBuffer::get_past_sample(size_t channel, size_t offset) { 87 | // offset 0 = the most recently read sample, offset 1 = the sample before that, etc. 88 | if (offset > get_available_past_samples(channel)) { 89 | LOG_ERROR << "RingBuffer: Attempted to get past sample with offset " << offset << " for channel " << channel << ", but only " << get_available_past_samples(channel) << " past samples are available. Returning silence (0.0f)." << std::endl; 90 | return 0.0f; 91 | } 92 | 93 | // Calculate the position of the sample at the given offset behind the read position 94 | size_t sample_pos; 95 | if (offset <= m_read_pos[channel]) { 96 | sample_pos = m_read_pos[channel] - offset; 97 | } else { 98 | sample_pos = get_num_samples() + m_read_pos[channel] - offset; 99 | } 100 | 101 | return get_sample(channel, sample_pos); 102 | } 103 | 104 | size_t RingBuffer::get_available_samples(size_t channel) { 105 | if (m_is_full[channel]) { 106 | return get_num_samples(); // Buffer is completely full 107 | } else if (m_write_pos[channel] >= m_read_pos[channel]) { 108 | return m_write_pos[channel] - m_read_pos[channel]; 109 | } else { 110 | return m_write_pos[channel] + get_num_samples() - m_read_pos[channel]; 111 | } 112 | } 113 | 114 | size_t RingBuffer::get_available_past_samples(size_t channel) { 115 | // Calculate how many samples are available behind the read position 116 | // This represents the "empty" space that could contain past samples 117 | if (m_is_full[channel]) { 118 | return 0; // No past samples available when buffer is full 119 | } else if (m_write_pos[channel] >= m_read_pos[channel]) { 120 | return m_read_pos[channel] + get_num_samples() - m_write_pos[channel]; 121 | } else { 122 | return m_read_pos[channel] - m_write_pos[channel]; 123 | } 124 | } 125 | 126 | } // namespace anira -------------------------------------------------------------------------------- /extras/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(MODEL_REPOSITORIES 2 | "https://github.com/faressc/GuitarLSTM.git hybrid-nn/GuitarLSTM" 3 | "https://github.com/faressc/steerable-nafx.git cnn/steerable-nafx" 4 | "https://github.com/vackva/stateful-lstm.git stateful-rnn/stateful-lstm" 5 | "https://github.com/anira-project/example-models.git model-pool/example-models" 6 | ) 7 | 8 | find_package(Git QUIET) 9 | if(NOT GIT_FOUND) 10 | message(FATAL_ERROR "Git not found") 11 | endif() 12 | 13 | foreach(repo IN LISTS MODEL_REPOSITORIES) 14 | string(REPLACE " " ";" SPLIT_REPO_DETAILS ${repo}) 15 | list(GET SPLIT_REPO_DETAILS 0 REPO_URL) 16 | list(GET SPLIT_REPO_DETAILS 1 INSTALL_PATH) 17 | 18 | set(GIT_CLONE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/models/${INSTALL_PATH}") 19 | 20 | if(NOT EXISTS "${GIT_CLONE_DIR}") 21 | message(STATUS "Cloning ${REPO_URL} (main branch, latest state) into ${GIT_CLONE_DIR}") 22 | execute_process( 23 | COMMAND ${GIT_EXECUTABLE} clone --branch main --single-branch --depth 1 ${REPO_URL} ${GIT_CLONE_DIR} 24 | RESULT_VARIABLE GIT_CLONE_RESULT 25 | ) 26 | if(NOT GIT_CLONE_RESULT EQUAL "0") 27 | message(FATAL_ERROR "Git clone of ${REPO_URL} failed with ${GIT_CLONE_RESULT}") 28 | endif() 29 | endif() 30 | endforeach() 31 | 32 | # Using CACHE PATH to set the default path for the models, since PARENT_SCOPE is only propagated one level up. Cache variables are global. 33 | set(GUITARLSTM_MODELS_PATH_TENSORFLOW "${CMAKE_CURRENT_LIST_DIR}/models/hybrid-nn/GuitarLSTM/tensorflow-version/models/" CACHE PATH "Path to the GuitarLSTM TensorFlow models") 34 | set(GUITARLSTM_MODELS_PATH_PYTORCH "${CMAKE_CURRENT_LIST_DIR}/models/hybrid-nn/GuitarLSTM/pytorch-version/models/" CACHE PATH "Path to the GuitarLSTM PyTorch models") 35 | 36 | set(STEERABLENAFX_MODELS_PATH_TENSORFLOW "${CMAKE_CURRENT_LIST_DIR}/models/cnn/steerable-nafx/models/" CACHE PATH "Path to the SteerableNAFX TensorFlow models") 37 | set(STEERABLENAFX_MODELS_PATH_PYTORCH "${CMAKE_CURRENT_LIST_DIR}/models/cnn/steerable-nafx/models/" CACHE PATH "Path to the SteerableNAFX PyTorch models") 38 | 39 | set(STATEFULLSTM_MODELS_PATH_TENSORFLOW "${CMAKE_CURRENT_LIST_DIR}/models/stateful-rnn/stateful-lstm/models/" CACHE PATH "Path to the StatefulLSTM TensorFlow models") 40 | set(STATEFULLSTM_MODELS_PATH_PYTORCH "${CMAKE_CURRENT_LIST_DIR}/models/stateful-rnn/stateful-lstm/models/" CACHE PATH "Path to the StatefulLSTM PyTorch models") 41 | 42 | set(SIMPLEGAIN_MODEL_PATH "${CMAKE_CURRENT_LIST_DIR}/models/model-pool/example-models/SimpleGainNetwork/models/" CACHE PATH "Path to the SimpleGainNetwork models") 43 | 44 | set(RAVE_FUNK_DRUM_MODEL_URL "https://github.com/anira-project/example-models/raw/refs/heads/third-party/third-party/ircam-acids/RAVE/rave_funk_drum.ts") 45 | set(RAVE_MODEL_DIR "${CMAKE_CURRENT_LIST_DIR}/models/third-party/ircam-acids/RAVE" CACHE PATH "Path to the RAVE model") 46 | set(RAVE_FUNK_DRUM_MODEL_PATH "${RAVE_MODEL_DIR}/rave_funk_drum.ts") 47 | 48 | file(MAKE_DIRECTORY "${RAVE_MODEL_DIR}") 49 | 50 | if(NOT EXISTS "${RAVE_FUNK_DRUM_MODEL_PATH}") 51 | message(STATUS "Downloading RAVE model from ${RAVE_FUNK_DRUM_MODEL_URL} to ${RAVE_FUNK_DRUM_MODEL_PATH}") 52 | file(DOWNLOAD 53 | "${RAVE_FUNK_DRUM_MODEL_URL}" 54 | "${RAVE_FUNK_DRUM_MODEL_PATH}" 55 | SHOW_PROGRESS 56 | STATUS DOWNLOAD_STATUS 57 | LOG DOWNLOAD_LOG 58 | ) 59 | list(GET DOWNLOAD_STATUS 0 DOWNLOAD_RESULT) 60 | if(NOT DOWNLOAD_RESULT EQUAL 0) 61 | message(FATAL_ERROR "Failed to download RAVE model: ${DOWNLOAD_LOG}") 62 | endif() 63 | endif() 64 | 65 | configure_file( 66 | "${CMAKE_CURRENT_SOURCE_DIR}/models/third-party/ircam-acids/RaveFunkDrumConfig.json.in" 67 | "${CMAKE_CURRENT_SOURCE_DIR}/models/third-party/ircam-acids/RaveFunkDrumConfig.json" 68 | ) 69 | 70 | configure_file( 71 | "${CMAKE_CURRENT_SOURCE_DIR}/models/third-party/ircam-acids/RaveFunkDrumConfigDecoder.json.in" 72 | "${CMAKE_CURRENT_SOURCE_DIR}/models/third-party/ircam-acids/RaveFunkDrumConfigDecoder.json" 73 | ) 74 | 75 | configure_file( 76 | "${CMAKE_CURRENT_SOURCE_DIR}/models/third-party/ircam-acids/RaveFunkDrumConfigEncoder.json.in" 77 | "${CMAKE_CURRENT_SOURCE_DIR}/models/third-party/ircam-acids/RaveFunkDrumConfigEncoder.json" 78 | ) 79 | 80 | configure_file( 81 | "${CMAKE_CURRENT_SOURCE_DIR}/models/model-pool/SimpleGainConfig.json.in" 82 | "${CMAKE_CURRENT_SOURCE_DIR}/models/model-pool/SimpleGainConfig.json" 83 | ) 84 | 85 | set(RAVE_MODEL_FUNK_DRUM_JSON_CONFIG_PATH "${CMAKE_CURRENT_SOURCE_DIR}/models/third-party/ircam-acids/RaveFunkDrumConfig.json" CACHE PATH "Path to the RAVE json model config") 86 | set(RAVE_MODEL_FUNK_DRUM_DECODER_JSON_CONFIG_PATH "${CMAKE_CURRENT_SOURCE_DIR}/models/third-party/ircam-acids/RaveFunkDrumConfigDecoder.json" CACHE PATH "Path to the RAVE json model config") 87 | set(RAVE_MODEL_FUNK_DRUM_ENCODER_JSON_CONFIG_PATH "${CMAKE_CURRENT_SOURCE_DIR}/models/third-party/ircam-acids/RaveFunkDrumConfigEncoder.json" CACHE PATH "Path to the RAVE json model config") 88 | set(SIMPLE_GAIN_JSON_CONFIG_PATH "${CMAKE_CURRENT_SOURCE_DIR}/models/model-pool/SimpleGainConfig.json" CACHE PATH "Path to the RAVE json model config") 89 | -------------------------------------------------------------------------------- /src/system/HighPriorityThread.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | namespace anira { 5 | 6 | HighPriorityThread::HighPriorityThread() : m_should_exit(false){ 7 | } 8 | 9 | HighPriorityThread::~HighPriorityThread() { 10 | stop(); 11 | } 12 | 13 | void HighPriorityThread::start() { 14 | if (!m_thread.joinable()) { 15 | m_should_exit = false; 16 | #if __linux__ 17 | pthread_attr_t thread_attr; 18 | pthread_attr_init(&thread_attr); 19 | pthread_attr_setinheritsched(&thread_attr, PTHREAD_EXPLICIT_SCHED); 20 | pthread_setattr_default_np(&thread_attr); 21 | #endif 22 | 23 | 24 | m_thread = std::thread(&HighPriorityThread::run, this); 25 | 26 | #if __linux__ 27 | pthread_attr_destroy(&thread_attr); 28 | #endif 29 | 30 | elevate_priority(m_thread.native_handle()); 31 | m_is_running = true; 32 | } 33 | } 34 | 35 | void HighPriorityThread::stop() { 36 | m_should_exit = true; 37 | if (m_thread.joinable()) { 38 | m_thread.join(); 39 | m_is_running = false; 40 | } 41 | } 42 | 43 | void HighPriorityThread::elevate_priority(std::thread::native_handle_type thread_native_handle, bool is_main_process) { 44 | #if WIN32 45 | if (is_main_process) { 46 | if (!SetPriorityClass(GetCurrentProcess(), REALTIME_PRIORITY_CLASS)) { 47 | LOG_ERROR << "[ERROR] Failed to set real-time priority for process. Error: " << GetLastError() << std::endl; 48 | } 49 | } 50 | 51 | int priorities[] = {THREAD_PRIORITY_TIME_CRITICAL, THREAD_PRIORITY_HIGHEST, THREAD_PRIORITY_ABOVE_NORMAL}; 52 | for (int priority : priorities) { 53 | if (SetThreadPriority(thread_native_handle, priority)) { 54 | return; 55 | } else { 56 | LOG_ERROR << "[ERROR] Failed to set thread priority for Thread. Current priority: " << priority << std::endl; 57 | } 58 | } 59 | #elif __linux__ 60 | int ret; 61 | 62 | if (!is_main_process) { 63 | int attr_inheritsched; 64 | pthread_attr_t thread_attr; 65 | ret = pthread_getattr_np(thread_native_handle, &thread_attr); 66 | ret = pthread_attr_getinheritsched(&thread_attr, &attr_inheritsched); 67 | if(ret != 0) { 68 | LOG_ERROR << "[ERROR] Failed to get Thread scheduling policy and params : " << errno << std::endl;\ 69 | } 70 | if (attr_inheritsched != PTHREAD_EXPLICIT_SCHED) { 71 | LOG_ERROR << "[ERROR] Thread scheduling policy is not PTHREAD_EXPLICIT_SCHED. Possibly thread attributes get inherited from the main process." << std::endl; 72 | } 73 | pthread_attr_destroy(&thread_attr); 74 | } 75 | 76 | int sch_policy; 77 | struct sched_param sch_params; 78 | 79 | ret = pthread_getschedparam(thread_native_handle, &sch_policy, &sch_params); 80 | if(ret != 0) { 81 | LOG_ERROR << "[ERROR] Failed to get Thread scheduling policy and params : " << errno << std::endl; 82 | } 83 | 84 | // Pipewire uses SCHED_FIFO 60 and juce plugin host uses SCHED_FIFO 55 better stay below 85 | sch_params.sched_priority = 50; 86 | 87 | ret = pthread_setschedparam(thread_native_handle, SCHED_FIFO, &sch_params); 88 | if(ret != 0) { 89 | LOG_ERROR << "[ERROR] Failed to set Thread scheduling policy to SCHED_FIFO and increase the sched_priority to " << sch_params.sched_priority << ". Error : " << errno << std::endl; 90 | LOG_INFO << "[WARNING] Give rtprio privileges to the user by adding the user to the realtime/audio group. Or run the application as root." << std::endl; 91 | LOG_INFO << "[WARNING] Instead, trying to set increased nice value for SCHED_OTHER..." << std::endl; 92 | 93 | ret = setpriority(PRIO_PROCESS, 0, -10); 94 | if(ret != 0) { 95 | LOG_ERROR << "[ERROR] Failed to set increased nice value. Error : " << errno << std::endl; 96 | LOG_INFO << "[WARNING] Using default nice value: " << getpriority(PRIO_PROCESS, 0) << std::endl; 97 | } 98 | } 99 | 100 | return; 101 | #elif __APPLE__ 102 | int ret; 103 | 104 | ret = pthread_set_qos_class_self_np(QOS_CLASS_USER_INTERACTIVE, 0); 105 | if (ret != 0) { 106 | LOG_ERROR << "[ERROR] Failed to set Thread QOS class to QOS_CLASS_USER_INTERACTIVE. Error : " << ret << std::endl; 107 | } else { 108 | return; 109 | } 110 | 111 | LOG_ERROR << "[ERROR] Failed to set Thread QOS class and relative priority. Error: " << ret << std::endl; 112 | 113 | qos_class_t qos_class; 114 | int relative_priority; 115 | pthread_get_qos_class_np(pthread_self(), &qos_class, &relative_priority); 116 | 117 | LOG_INFO << "[WARNING] Fallback to default QOS class and relative priority: " << qos_class << " " << relative_priority << std::endl; 118 | return; 119 | #endif 120 | } 121 | 122 | bool HighPriorityThread::should_exit() { 123 | return m_should_exit.load(); 124 | } 125 | 126 | bool HighPriorityThread::is_running() { 127 | return m_thread.joinable() && m_is_running.load(); 128 | } 129 | 130 | } // namespace anira -------------------------------------------------------------------------------- /src/scheduler/InferenceThread.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | namespace anira { 5 | 6 | InferenceThread::InferenceThread(moodycamel::ConcurrentQueue& next_inference) : 7 | m_next_inference(next_inference) 8 | { 9 | } 10 | 11 | InferenceThread::~InferenceThread() { 12 | stop(); 13 | } 14 | 15 | void InferenceThread::run() { 16 | while (!should_exit()) { 17 | constexpr std::array iterations = {4, 32}; 18 | // The times for the exponential backoff. The first loop is insteadly trying to acquire the atomic counter. The second loop is waiting for approximately 100ns. Beyond that, the thread will yield and sleep for 100us. 19 | exponential_backoff(iterations); 20 | } 21 | } 22 | 23 | void InferenceThread::exponential_backoff(std::array iterations) { 24 | for (int i = 0; i < iterations[0]; i++) { 25 | if (should_exit()) return; 26 | if (execute()) return; 27 | } 28 | for (int i = 0; i < iterations[1]; i++) { 29 | if (should_exit()) return; 30 | if (execute()) return; 31 | #if defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) 32 | _mm_pause(); 33 | _mm_pause(); 34 | #elif __aarch64__ 35 | // ISB instruction is better than WFE https://stackoverflow.com/questions/70810121/why-does-hintspin-loop-use-isb-on-aarch64 36 | // Still on linux it maxes out the CPU, so we need to sleep for a while in the next phase 37 | asm volatile("isb sy"); 38 | asm volatile("isb sy"); 39 | asm volatile("isb sy"); 40 | asm volatile("isb sy"); 41 | asm volatile("isb sy"); 42 | asm volatile("isb sy"); 43 | asm volatile("isb sy"); 44 | asm volatile("isb sy"); 45 | #elif __arm__ 46 | asm volatile("yield"); 47 | asm volatile("yield"); 48 | asm volatile("yield"); 49 | asm volatile("yield"); 50 | #endif 51 | } 52 | while (true) { 53 | // The sleep_for function is important - without it, the thread will consume 100% of the CPU. This also applies when we use the ISB or WFE instruction. Also on linux we will get missing samples, because the thread gets suspended by the OS for a certain period once in a while?!? 54 | if (should_exit()) return; 55 | if (execute()) return; 56 | std::this_thread::yield(); 57 | std::this_thread::sleep_for(std::chrono::microseconds(100)); 58 | } 59 | } 60 | 61 | 62 | bool InferenceThread::execute() { 63 | if (m_next_inference.try_dequeue(m_inference_data)) { 64 | if (m_inference_data.m_session->m_initialized.load(std::memory_order::acquire)) { 65 | do_inference(m_inference_data.m_session, m_inference_data.m_thread_safe_struct); 66 | } 67 | return true; 68 | } 69 | return false; 70 | } 71 | 72 | void InferenceThread::do_inference(std::shared_ptr session, std::shared_ptr thread_safe_struct) { 73 | session->m_active_inferences.fetch_add(1, std::memory_order::release); 74 | inference(session, thread_safe_struct->m_tensor_input_data, thread_safe_struct->m_tensor_output_data); 75 | if (session->m_inference_config.m_blocking_ratio > 0.f) { 76 | thread_safe_struct->m_done_semaphore.release(); 77 | } else { 78 | thread_safe_struct->m_done_atomic.store(true, std::memory_order::release); 79 | } 80 | session->m_active_inferences.fetch_sub(1, std::memory_order::release); 81 | } 82 | 83 | void InferenceThread::inference(std::shared_ptr session, std::vector& input, std::vector& output) { 84 | #ifdef USE_LIBTORCH 85 | if (session->m_current_backend.load(std::memory_order_relaxed) == LIBTORCH) { 86 | if (session->m_libtorch_processor != nullptr) { 87 | session->m_libtorch_processor->process(input, output, session); 88 | } 89 | else { 90 | session->m_default_processor.process(input, output, session); 91 | LOG_ERROR << "[ERROR] LibTorch model has not been provided. Using default processor." << std::endl; 92 | } 93 | } 94 | #endif 95 | #ifdef USE_ONNXRUNTIME 96 | if (session->m_current_backend.load(std::memory_order_relaxed) == ONNX) { 97 | if (session->m_onnx_processor != nullptr) { 98 | session->m_onnx_processor->process(input, output, session); 99 | } 100 | else { 101 | session->m_default_processor.process(input, output, session); 102 | LOG_ERROR << "[ERROR] OnnxRuntime model has not been provided. Using default processor." << std::endl; 103 | } 104 | } 105 | #endif 106 | #ifdef USE_TFLITE 107 | if (session->m_current_backend.load(std::memory_order_relaxed) == TFLITE) { 108 | if (session->m_tflite_processor != nullptr) { 109 | session->m_tflite_processor->process(input, output, session); 110 | } 111 | else { 112 | session->m_default_processor.process(input, output, session); 113 | LOG_ERROR << "[ERROR] TFLite model has not been provided. Using default processor." << std::endl; 114 | } 115 | } 116 | #endif 117 | if (session->m_current_backend.load(std::memory_order_relaxed) == CUSTOM) { 118 | session->m_custom_processor->process(input, output, session); 119 | } 120 | } 121 | 122 | } // namespace anira -------------------------------------------------------------------------------- /docs/sphinx/architecture.rst: -------------------------------------------------------------------------------- 1 | Architecture 2 | ============ 3 | 4 | This page describes the high-level architecture of the anira library, its core components, and how they interact with each other. 5 | 6 | System Overview 7 | --------------- 8 | 9 | anira is designed with real-time audio applications in mind, focusing on deterministic performance and thread safety. The architecture consists of several key components working together to provide neural network inference capabilities that can be safely used within audio processing contexts. 10 | 11 | .. code-block:: text 12 | 13 | +-----------------------------------+ 14 | | InferenceHandler | 15 | | (Main user-facing interface) | 16 | +----------------+------------------+ 17 | | 18 | v 19 | +----------------+------------------+ 20 | | InferenceConfig | 21 | | (Model paths, shapes, settings) | 22 | +----------------+------------------+ 23 | | 24 | v 25 | +----------------+------------------+ 26 | | PrePostProcessor | 27 | | (Format conversion, buffering) | 28 | +----------------+------------------+ 29 | | 30 | v 31 | +----------------+------------------+ 32 | | InferenceManager | 33 | | (Thread pool coordination) | 34 | +----------------+------------------+ 35 | | 36 | v 37 | +----------------+------------------+ +----------------------+ 38 | | Backend Processors | <---> | Inference Engines | 39 | | (LibTorch, ONNX, TensorFlow Lite) | | (External libraries) | 40 | +-----------------------------------+ +----------------------+ 41 | 42 | Key Design Principles 43 | --------------------- 44 | 45 | 1. **Real-time Safety** 46 | 47 | * No dynamic memory allocation during audio processing 48 | * Deterministic performance guarantees 49 | * Thread-safety with atomic operations 50 | * Pre-allocated buffers and resources 51 | 52 | 2. **Flexibility** 53 | 54 | * Support for multiple inference backends 55 | * Configurable thread management 56 | * Customizable pre/post-processing 57 | * Support for both stateful and stateless models 58 | 59 | 3. **Performance** 60 | 61 | * Efficient buffer management with zero-copy where possible 62 | * Thread pool to prevent oversubscription 63 | * Optimized tensor operations 64 | * Benchmarking tools for performance analysis 65 | 66 | Component Responsibilities 67 | -------------------------- 68 | 69 | :cpp:class:`anira::InferenceHandler` 70 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | 72 | The primary interface for users, handling the overall integration of neural network inference into audio processing workflows. 73 | 74 | * Manages the audio processing lifecycle 75 | * Provides real-time safe process methods 76 | * Handles buffer management 77 | * Reports latency information 78 | 79 | :cpp:class:`anira::InferenceConfig` 80 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 81 | 82 | Stores configuration data for models and processing parameters. 83 | 84 | * Model paths and backend selection 85 | * Input and output tensor shapes 86 | * Maximum inference time limits 87 | * Memory management settings 88 | 89 | :cpp:class:`anira::PrePostProcessor` 90 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 91 | 92 | Handles data formatting between audio buffers and neural network tensors. 93 | 94 | * Converts audio data to model input format 95 | * Converts model outputs back to audio format 96 | * Manages intermediate buffers 97 | 98 | :cpp:class:`anira::InferenceManager` 99 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 100 | 101 | Coordinates the thread pool and inference scheduling. 102 | 103 | * Manages worker threads 104 | * Schedules inference tasks 105 | * Handles synchronization between audio and inference threads 106 | 107 | Backend Processors 108 | ~~~~~~~~~~~~~~~~~~ 109 | 110 | Backend-specific implementations for different inference engines. 111 | 112 | * :cpp:class:`anira::LibtorchProcessor` - PyTorch C++ API integration 113 | * :cpp:class:`anira::OnnxRuntimeProcessor` - ONNX Runtime integration 114 | * :cpp:class:`anira::TFLiteProcessor` - TensorFlow Lite integration 115 | * :cpp:class:`anira::BackendBase` - For inheritance for custom inference engines 116 | 117 | Data Flow 118 | --------- 119 | 120 | 1. **Audio Input:** The host application provides audio data to the InferenceHandler 121 | 2. **Pre-processing:** The PrePostProcessor converts audio data to tensors 122 | 3. **Scheduling:** The InferenceManager schedules the inference task 123 | 4. **Inference:** A backend processor executes the neural network model 124 | 5. **Post-processing:** The PrePostProcessor converts results back to audio 125 | 6. **Audio Output:** The processed audio is returned to the host application 126 | 127 | Threading Model 128 | --------------- 129 | 130 | anira employs a multi-threaded architecture with careful synchronization: 131 | 132 | * **Audio Thread:** Real-time thread from the host application, never blocked 133 | * **Inference Threads:** Worker threads performing the actual model inference 134 | * **Synchronization:** Lock-free communication with atomic operations and ring buffers 135 | 136 | The system avoids blocking operations in the audio thread and uses a carefully designed thread pool to prevent CPU oversubscription. 137 | -------------------------------------------------------------------------------- /docs/sphinx/troubleshooting.rst: -------------------------------------------------------------------------------- 1 | Troubleshooting & FAQ 2 | ===================== 3 | 4 | This section addresses common issues and questions that may arise when using anira. 5 | 6 | Frequently Asked Questions 7 | -------------------------- 8 | 9 | General 10 | ~~~~~~~ 11 | 12 | What is anira? 13 | ^^^^^^^^^^^^^^ 14 | 15 | Anira is a high-performance library designed for real-time neural network inference in audio applications. It provides a consistent API across multiple inference backends with a focus on deterministic performance suitable for audio processing. 16 | 17 | Which platforms are supported? 18 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 19 | 20 | Anira supports macOS, Linux, and Windows platforms. It has been tested on x86_64, ARM64, and ARM7 architectures. 21 | 22 | Which neural network frameworks are supported? 23 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 24 | 25 | Anira currently supports three inference backends: 26 | - LibTorch 27 | - ONNX Runtime 28 | - TensorFlow Lite 29 | 30 | .. note:: 31 | Custom backends can be integrated as needed. 32 | 33 | Is anira free and open source? 34 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 35 | 36 | Yes, anira is open source and available under the Apache-2.0 license. 37 | 38 | Technical Questions 39 | ~~~~~~~~~~~~~~~~~~~ 40 | 41 | How does anira ensure real-time safety? 42 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 43 | 44 | Anira ensures real-time safety through several mechanisms: 45 | - No dynamic memory allocation during audio processing 46 | - Static thread pool to avoid oversubscription 47 | - Lock-free communication between audio and inference threads 48 | - Pre-allocation of all required resources 49 | - Consistent timing checks and fallback mechanisms 50 | 51 | What's the minimum latency I can achieve? 52 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 53 | 54 | The minimum achievable latency depends on several factors, including model complexity, hardware performance, and audio buffer size. Anira is optimized for low-latency operation and, in ideal conditions, can return inference results within the same audio processing cycle—effectively achieving zero added latency. 55 | 56 | Can I use multiple models simultaneously? 57 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 58 | 59 | Yes, you can use multiple models simultaneously by creating separate :cpp:class:`anira::InferenceHandler` instances, each with its own model configuration. All handlers can share the same thread pool, enabling efficient parallel processing of multiple models. 60 | 61 | Troubleshooting 62 | --------------- 63 | 64 | Compilation Issues 65 | ~~~~~~~~~~~~~~~~~~ 66 | 67 | Missing Backend Dependencies 68 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 69 | 70 | **Issue**: CMake fails to find LibTorch, ONNX Runtime, or TensorFlow Lite. 71 | 72 | **Solution**: You can disable specific backends using CMake options: 73 | - `-DANIRA_WITH_LIBTORCH=OFF` 74 | - `-DANIRA_WITH_ONNXRUNTIME=OFF` 75 | - `-DANIRA_WITH_TFLITE=OFF` 76 | 77 | Alternatively, you can specify custom paths to these dependencies if they are installed in non-standard locations. 78 | 79 | Compilation Errors with C++ Standard 80 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 81 | 82 | **Issue**: Compiler errors related to C++ standard compatibility. 83 | 84 | **Solution**: Anira requires C++17 or later. Ensure your compiler supports C++17. 85 | 86 | Runtime Issues 87 | ~~~~~~~~~~~~~~ 88 | 89 | Audio Glitches or Dropouts 90 | ^^^^^^^^^^^^^^^^^^^^^^^^^^ 91 | 92 | **Issue**: Audio processing experiences dropouts or glitches during inference. 93 | 94 | **Solutions**: 95 | 1. Increase the maximum inference time in your :cpp:struct:`anira::InferenceConfig` to allow more time for model processing. 96 | 2. Reduce the complexity of your neural network model 97 | 3. Increase audio buffer size (though this increases latency) 98 | 4. Check if other processes are consuming CPU resources 99 | 5. Use `anira::benchmark` tools to identify performance bottlenecks 100 | 101 | Model Loading Failures 102 | ^^^^^^^^^^^^^^^^^^^^^^ 103 | 104 | **Issue**: "Failed to load model" or similar errors. 105 | 106 | **Solutions**: 107 | 1. Verify the model file exists at the specified path 108 | 2. Check that the model format is compatible with the selected backend 109 | 3. Ensure tensor shapes in your :cpp:struct:`anira::InferenceConfig` match the model's expected shapes 110 | 4. Try a different backend if available 111 | 112 | Thread Priority Issues 113 | ^^^^^^^^^^^^^^^^^^^^^^ 114 | 115 | **Issue**: Thread priority settings fail, particularly on Linux. 116 | 117 | **Solution**: On Linux, you may need to set the `rtprio` limit for your user. Add the following to `/etc/security/limits.conf`: 118 | 119 | .. code-block:: 120 | 121 | your_username - rtprio 99 122 | 123 | Log out and back in for the changes to take effect. 124 | 125 | Unexpected Results or Crashes 126 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 127 | 128 | **Issue**: Inference produces incorrect outputs or crashes. 129 | 130 | **Solutions**: 131 | 1. Validate tensor shapes in your :cpp:struct:`anira::InferenceConfig` match your model's expectations 132 | 2. Ensure your pre/post-processing logic correctly handles the data format 133 | 3. Try using a different backend to rule out backend-specific issues 134 | 4. Check that your model works correctly outside of anira use the minimal inference example provided in the :doc:`examples` section. 135 | 136 | .. note:: 137 | If you continue to experience issues feel free to file an issue on the [GitHub repository](https://github.com/anira-project/anira/issues). 138 | 139 | -------------------------------------------------------------------------------- /src/backends/OnnxRuntimeProcessor.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | namespace anira { 5 | 6 | OnnxRuntimeProcessor::OnnxRuntimeProcessor(InferenceConfig& inference_config) : BackendBase(inference_config) 7 | { 8 | for (unsigned int i = 0; i < m_inference_config.m_num_parallel_processors; ++i) { 9 | m_instances.emplace_back(std::make_shared(m_inference_config)); 10 | } 11 | } 12 | 13 | OnnxRuntimeProcessor::~OnnxRuntimeProcessor() { 14 | } 15 | 16 | void OnnxRuntimeProcessor::prepare() { 17 | for(auto& instance : m_instances) { 18 | instance->prepare(); 19 | } 20 | } 21 | 22 | void OnnxRuntimeProcessor::process(std::vector& input, std::vector& output, std::shared_ptr session) { 23 | while (true) { 24 | for(auto& instance : m_instances) { 25 | if (!(instance->m_processing.exchange(true))) { 26 | instance->process(input, output, session); 27 | instance->m_processing.exchange(false); 28 | return; 29 | } 30 | } 31 | } 32 | } 33 | 34 | OnnxRuntimeProcessor::Instance::Instance(InferenceConfig& inference_config) : m_memory_info(Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU)), 35 | m_inference_config(inference_config) 36 | { 37 | m_session_options.SetIntraOpNumThreads(1); 38 | 39 | // Check if the model is binary 40 | if (m_inference_config.is_model_binary(anira::InferenceBackend::ONNX)) { 41 | const anira::ModelData* model_data = m_inference_config.get_model_data(anira::InferenceBackend::ONNX); 42 | assert(model_data && "Model data not found for binary model!"); 43 | 44 | // Load model from binary data 45 | m_session = std::make_unique(m_env, model_data->m_data, model_data->m_size, m_session_options); 46 | } else { 47 | // Load model from file path 48 | #ifdef _WIN32 49 | std::string modelpath_str = m_inference_config.get_model_path(anira::InferenceBackend::ONNX); 50 | std::wstring modelpath = std::wstring(modelpath_str.begin(), modelpath_str.end()); 51 | #else 52 | std::string modelpath = m_inference_config.get_model_path(anira::InferenceBackend::ONNX); 53 | #endif 54 | m_session = std::make_unique(m_env, modelpath.c_str(), m_session_options); 55 | } 56 | 57 | m_input_names.resize(m_session->GetInputCount()); 58 | m_output_names.resize(m_session->GetOutputCount()); 59 | m_input_name.clear(); 60 | m_output_name.clear(); 61 | 62 | for (size_t i = 0; i < m_session->GetInputCount(); ++i) { 63 | m_input_name.emplace_back(m_session->GetInputNameAllocated(i, m_ort_alloc)); 64 | m_input_names[i] = m_input_name[i].get(); 65 | } 66 | for (size_t i = 0; i < m_session->GetOutputCount(); ++i) { 67 | m_output_name.emplace_back(m_session->GetOutputNameAllocated(i, m_ort_alloc)); 68 | m_output_names[i] = m_output_name[i].get(); 69 | } 70 | 71 | m_input_data.resize(m_inference_config.get_tensor_input_shape().size()); 72 | m_inputs.clear(); 73 | for (size_t i = 0; i < m_inference_config.get_tensor_input_shape().size(); i++) { 74 | m_input_data[i].resize(m_inference_config.get_tensor_input_size()[i]); 75 | m_inputs.emplace_back(Ort::Value::CreateTensor( 76 | m_memory_info, 77 | m_input_data[i].data(), 78 | m_input_data[i].size(), 79 | m_inference_config.get_tensor_input_shape(anira::InferenceBackend::ONNX)[i].data(), 80 | m_inference_config.get_tensor_input_shape(anira::InferenceBackend::ONNX)[i].size() 81 | )); 82 | } 83 | 84 | for (size_t i = 0; i < m_inference_config.m_warm_up; i++) { 85 | try { 86 | m_outputs = m_session->Run(Ort::RunOptions{nullptr}, m_input_names.data(), m_inputs.data(), m_input_names.size(), m_output_names.data(), m_output_names.size()); 87 | } catch (Ort::Exception &e) { 88 | LOG_ERROR << e.what() << std::endl; 89 | } 90 | } 91 | } 92 | 93 | OnnxRuntimeProcessor::Instance::~Instance() { 94 | // Reseting the session here is very important otherwise new models might not be loaded correctly 95 | m_session.reset(); 96 | } 97 | 98 | void OnnxRuntimeProcessor::Instance::prepare() { 99 | for (auto & i : m_input_data) { 100 | i.clear(); 101 | } 102 | } 103 | 104 | void OnnxRuntimeProcessor::Instance::process(std::vector& input, std::vector& output, std::shared_ptr session) { 105 | for (size_t i = 0; i < m_inference_config.get_tensor_input_shape().size(); i++) { 106 | m_inputs[i] = Ort::Value::CreateTensor( 107 | m_memory_info, 108 | input[i].data(), 109 | input[i].get_num_samples() * input[i].get_num_channels(), 110 | m_inference_config.get_tensor_input_shape(anira::InferenceBackend::ONNX)[i].data(), 111 | m_inference_config.get_tensor_input_shape(anira::InferenceBackend::ONNX)[i].size() 112 | ); 113 | } 114 | 115 | try { 116 | m_outputs = m_session->Run(Ort::RunOptions{nullptr}, m_input_names.data(), m_inputs.data(), m_input_names.size(), m_output_names.data(), m_output_names.size()); 117 | } catch (Ort::Exception &e) { 118 | LOG_ERROR << e.what() << std::endl; 119 | } 120 | 121 | for (size_t i = 0; i < m_outputs.size(); i++) { 122 | const auto output_read_ptr = m_outputs[i].GetTensorMutableData(); 123 | for (size_t j = 0; j < m_inference_config.get_tensor_output_size()[i]; j++) { 124 | output[i].get_memory_block()[j] = output_read_ptr[j]; 125 | } 126 | } 127 | } 128 | 129 | } // namespace anira 130 | -------------------------------------------------------------------------------- /include/anira/ContextConfig.h: -------------------------------------------------------------------------------- 1 | #ifndef ANIRA_CONTEXTCONFIG_H 2 | #define ANIRA_CONTEXTCONFIG_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "anira/utils/InferenceBackend.h" 10 | #include "anira/system/AniraWinExports.h" 11 | 12 | namespace anira { 13 | 14 | /** 15 | * @brief Configuration structure for the inference context and threading behavior 16 | * 17 | * The ContextConfig struct controls global settings for the anira inference system, 18 | * including thread pool management and available inference backends. This configuration 19 | * is shared across all inference sessions within a single context instance. 20 | * 21 | * @par Usage Examples: 22 | * @code 23 | * // Use default configuration (half of available CPU cores) 24 | * anira::ContextConfig default_config; 25 | * 26 | * // Specify custom thread count 27 | * anira::ContextConfig custom_config(4); 28 | * 29 | * // Use with InferenceHandler 30 | * anira::InferenceHandler handler(pp_processor, inference_config, custom_config); 31 | * @endcode 32 | * 33 | * @note This configuration affects global behavior and should be set once during 34 | * application initialization. Changing context configuration during runtime 35 | * requires recreating the context and all associated sessions. 36 | * 37 | * @see Context, InferenceHandler, InferenceBackend 38 | */ 39 | struct ANIRA_API ContextConfig { 40 | /** 41 | * @brief Constructs a ContextConfig with specified thread count 42 | * 43 | * Initializes the context configuration with the given number of inference threads 44 | * and automatically populates the list of available backends based on compile-time 45 | * feature flags. 46 | * 47 | * @param num_threads Number of background inference threads to create 48 | * Default: Half of available CPU cores (minimum 1) 49 | * 50 | * @note The constructor automatically detects and registers available inference 51 | * backends based on compile-time definitions (USE_LIBTORCH, USE_ONNXRUNTIME, USE_TFLITE) 52 | */ 53 | ContextConfig( 54 | unsigned int num_threads = (std::thread::hardware_concurrency() / 2 > 0) ? std::thread::hardware_concurrency() / 2 : 1) : 55 | m_num_threads(num_threads) 56 | { 57 | #ifdef USE_LIBTORCH 58 | m_enabled_backends.push_back(InferenceBackend::LIBTORCH); 59 | #endif 60 | #ifdef USE_ONNXRUNTIME 61 | m_enabled_backends.push_back(InferenceBackend::ONNX); 62 | #endif 63 | #ifdef USE_TFLITE 64 | m_enabled_backends.push_back(InferenceBackend::TFLITE); 65 | #endif 66 | } 67 | 68 | /** 69 | * @brief Number of background inference threads 70 | * 71 | * Controls the size of the thread pool used for neural network inference. 72 | * These threads run at high priority to minimize inference latency and are 73 | * shared across all inference sessions within the context. 74 | * 75 | * @note This value is set during construction and cannot be changed without 76 | * recreating the context. All inference sessions using this context will 77 | * share the same thread pool. 78 | */ 79 | unsigned int m_num_threads; 80 | 81 | /** 82 | * @brief Version string of the anira library 83 | * 84 | * Contains the version of the anira library that was used to create this 85 | * configuration. This is useful for debugging, logging, and ensuring 86 | * compatibility when serializing/deserializing configurations. 87 | * 88 | * @note This field is automatically populated with the ANIRA_VERSION 89 | * macro during construction and should not be modified manually. 90 | */ 91 | std::string m_anira_version = ANIRA_VERSION; 92 | 93 | /** 94 | * @brief List of available inference backends 95 | * 96 | * Contains all inference backends that were detected as available during 97 | * compilation. This list is automatically populated in the constructor 98 | * based on compile-time feature flags: 99 | * 100 | * - InferenceBackend::LIBTORCH (if USE_LIBTORCH is defined) 101 | * - InferenceBackend::ONNX (if USE_ONNXRUNTIME is defined) 102 | * - InferenceBackend::TFLITE (if USE_TFLITE is defined) 103 | * - InferenceBackend::CUSTOM (always available) 104 | * 105 | * @note The CUSTOM backend is not automatically added to this list but is 106 | * always available for use with custom backend implementations. 107 | */ 108 | std::vector m_enabled_backends; 109 | 110 | private: 111 | /** 112 | * @brief Equality comparison operator 113 | * 114 | * Compares two ContextConfig instances for equality by checking all member 115 | * variables. This is used internally for configuration validation and 116 | * context management. 117 | * 118 | * @param other The ContextConfig to compare against 119 | * @return true if all configuration parameters match, false otherwise 120 | **/ 121 | bool operator==(const ContextConfig& other) const { 122 | return 123 | m_num_threads == other.m_num_threads && 124 | m_anira_version == other.m_anira_version && 125 | m_enabled_backends == other.m_enabled_backends; 126 | } 127 | 128 | /** 129 | * @brief Inequality comparison operator 130 | * 131 | * Compares two ContextConfig instances for inequality. This is the logical 132 | * inverse of the equality operator. 133 | * 134 | * @param other The ContextConfig to compare against 135 | * @return true if any configuration parameters differ, false otherwise 136 | * 137 | **/ 138 | bool operator!=(const ContextConfig& other) const { 139 | return !(*this == other); 140 | } 141 | 142 | }; 143 | 144 | } // namespace anira 145 | 146 | #endif //ANIRA_CONTEXTCONFIG_H -------------------------------------------------------------------------------- /src/PrePostProcessor.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace anira { 4 | 5 | PrePostProcessor::PrePostProcessor(InferenceConfig& inference_config) : m_inference_config(inference_config) { 6 | m_inputs.resize(m_inference_config.get_tensor_input_shape().size()); 7 | for (size_t i = 0; i < m_inference_config.get_tensor_input_shape().size(); ++i) { 8 | if(m_inference_config.get_preprocess_input_size()[i] <= 0) { 9 | m_inputs[i].resize(m_inference_config.get_tensor_input_size()[i]); 10 | } 11 | } 12 | m_outputs.resize(m_inference_config.get_tensor_output_shape().size()); 13 | for (size_t i = 0; i < m_inference_config.get_tensor_output_shape().size(); ++i) { 14 | if(m_inference_config.get_postprocess_output_size()[i] <= 0) { 15 | m_outputs[i].resize(m_inference_config.get_tensor_output_size()[i]); 16 | } 17 | } 18 | } 19 | 20 | void PrePostProcessor::pre_process(std::vector& input, std::vector& output, [[maybe_unused]] InferenceBackend current_inference_backend) { 21 | for (size_t tensor_index = 0; tensor_index < m_inference_config.get_tensor_input_shape().size(); tensor_index++) { 22 | if (m_inference_config.get_preprocess_input_size()[tensor_index] > 0) { 23 | pop_samples_from_buffer(input[tensor_index], output[tensor_index], m_inference_config.get_preprocess_input_size()[tensor_index]); 24 | } else { 25 | for (size_t sample = 0; sample < m_inference_config.get_tensor_input_size()[tensor_index]; sample++) { 26 | output[tensor_index].set_sample(0, sample, get_input(tensor_index, sample)); // Non-streamble tensors have no channel count 27 | } 28 | } 29 | } 30 | } 31 | 32 | void PrePostProcessor::post_process(std::vector& input, std::vector& output, [[maybe_unused]] InferenceBackend current_inference_backend) { 33 | for (size_t tensor_index = 0; tensor_index < m_inference_config.get_tensor_output_shape().size(); tensor_index++) { 34 | if (m_inference_config.get_postprocess_output_size()[tensor_index] > 0) { 35 | push_samples_to_buffer(input[tensor_index], output[tensor_index], m_inference_config.get_postprocess_output_size()[tensor_index]); 36 | } else { 37 | for (size_t sample = 0; sample < m_inference_config.get_tensor_output_size()[tensor_index]; sample++) { 38 | set_output(input[tensor_index].get_sample(0, sample), tensor_index, sample); // Non-streamble tensors have no channel count 39 | } 40 | } 41 | } 42 | } 43 | 44 | void PrePostProcessor::pop_samples_from_buffer(RingBuffer& input, BufferF& output, size_t num_samples) { 45 | for (size_t i = 0; i < input.get_num_channels(); i++) { 46 | for (size_t j = 0; j < num_samples; j++) { 47 | output.set_sample(0, j+(i*num_samples), input.pop_sample(i)); // The output buffer is always a single channel buffer 48 | } 49 | } 50 | } 51 | 52 | void PrePostProcessor::pop_samples_from_buffer(RingBuffer& input, BufferF& output, size_t num_new_samples, size_t num_old_samples) { 53 | pop_samples_from_buffer(input, output, num_new_samples, num_old_samples, 0); 54 | } 55 | 56 | void PrePostProcessor::pop_samples_from_buffer(RingBuffer& input, BufferF& output, size_t num_new_samples, size_t num_old_samples, size_t offset) { 57 | int num_total_samples = num_new_samples + num_old_samples; 58 | for (size_t i = 0; i < input.get_num_channels(); i++) { 59 | // int j is important to be signed, because it is used in the condition j >= 0 60 | for (int j = num_total_samples - 1; j >= 0; j--) { 61 | if (j >= num_old_samples) { 62 | output.set_sample(0, (size_t) (num_total_samples - j + num_old_samples - 1) + offset, input.pop_sample(i)); 63 | } else { 64 | output.set_sample(0, (size_t) j + offset, input.get_past_sample(i, num_total_samples - (size_t) j)); 65 | } 66 | } 67 | } 68 | } 69 | 70 | void PrePostProcessor::push_samples_to_buffer(const BufferF& input, RingBuffer& output, size_t num_samples) { 71 | for (size_t i = 0; i < output.get_num_channels(); i++) { 72 | for (size_t j = 0; j < num_samples; j++) { 73 | output.push_sample(i, input.get_sample(0, j+(i*num_samples))); 74 | } 75 | } 76 | } 77 | 78 | void PrePostProcessor::set_input(const float& input, size_t i, size_t j) { 79 | assert(("Index i out of bounds" && i < m_inputs.size())); 80 | assert(("Index j out of bounds" && j < m_inputs[i].size())); 81 | // assert(("Index is streamable, data should be passed via the process method." && this->m_inference_config.get_preprocess_input_size()[i] > 0)); TODO: Why does this not work? 82 | m_inputs[i][j].store(input); 83 | } 84 | 85 | void PrePostProcessor::set_output(const float& output, size_t i, size_t j) { 86 | assert(("Index i out of bounds" && i < m_outputs.size())); 87 | assert(("Index j out of bounds" && j < m_outputs[i].size())); 88 | // assert(("Index is streamable, data should be passed via the process method." && m_inference_config.get_postprocess_output_size()[i] > 0)); 89 | m_outputs[i][j].store(output); 90 | } 91 | 92 | float PrePostProcessor::get_input(size_t i, size_t j) { 93 | assert(("Index i out of bounds" && i < m_inputs.size())); 94 | assert(("Index j out of bounds" && j < m_inputs[i].size())); 95 | // assert(("Index is streamable, data should be retrieved via the process method." && m_inference_config.get_preprocess_input_size()[i] > 0)); 96 | return m_inputs[i][j].load(); 97 | } 98 | 99 | float PrePostProcessor::get_output(size_t i, size_t j) { 100 | assert(("Index i out of bounds" && i < m_outputs.size())); 101 | assert(("Index j out of bounds" && j < m_outputs[i].size())); 102 | // assert(("Index is streamable, data should be retrieved via the process method." && m_inference_config.get_postprocess_output_size()[i] > 0)); 103 | return m_outputs[i][j].load(); 104 | } 105 | 106 | } // namespace anira --------------------------------------------------------------------------------