├── src ├── .nojekyll ├── icon.png ├── misc │ ├── rave.png │ ├── discord.png │ ├── github.png │ ├── vschaos.png │ ├── after_white.png │ └── logo-3107175833.png ├── models │ ├── demo_mc.ts │ ├── effects.ts │ ├── features.ts │ ├── wavetable.ts │ ├── demo_buffers.ts │ └── demo_attributes.ts ├── backend │ ├── parsing_utils.h │ ├── parsing_utils.cpp │ ├── CMakeLists.txt │ └── backend.h ├── extras │ ├── nn~ Overview.maxpat │ └── patch_with_vst.sh ├── patchers │ ├── after_help.maxpat │ └── latent_remote │ │ ├── ierf.gendsp │ │ ├── frand.maxpat │ │ ├── latent_remote.js │ │ ├── M4L.latent_remote.js │ │ └── M4L.latent_remote.maxpat ├── frontend │ ├── maxmsp │ │ ├── nn_tilde │ │ │ ├── nn_tilde_test.cpp │ │ │ ├── CMakeLists.txt │ │ │ └── nn_tilde.cpp │ │ ├── mc.nn_tilde │ │ │ └── CMakeLists.txt │ │ ├── shared │ │ │ ├── max_model_download.h │ │ │ ├── array_tools.h │ │ │ └── dict_utils.h │ │ ├── mcs.nn_tilde │ │ │ └── CMakeLists.txt │ │ └── nn.info │ │ │ └── CMakeLists.txt │ └── puredata │ │ ├── shared │ │ ├── pd_model_download.h │ │ └── pd_buffer_manager.h │ │ └── nn_tilde │ │ ├── nn~-help.pd │ │ └── CMakeLists.txt ├── shared │ ├── static_buffer.h │ └── circular_buffer.h ├── source │ ├── attributes.py │ ├── buffers.py │ ├── features.py │ ├── effects.py │ └── unmix.py ├── CMakeLists.txt └── cmake │ └── add_torch.cmake ├── MANIFEST.in ├── requirements.txt ├── requirements_darwin_x64.txt ├── python_tools ├── templates │ ├── __init__.py │ ├── attributes.py │ └── buffers.py ├── __init__.py ├── test │ ├── utils.py │ ├── test_buffers.py │ ├── test_attributes.py │ └── test_attributes.maxpat ├── codegen.py └── buffer.py ├── assets ├── banner.png ├── max_mc.png ├── max_mcs.png ├── pd_attr.png ├── max_attr.png ├── max_buffer.png ├── max_method.png ├── max_nninfo.png ├── max_void.png ├── pd_buffer.png ├── pd_method.png ├── max_instance.png ├── max_regular.png ├── pd_instance.png └── quarantine_warning.png ├── install ├── MaxAPI.lib ├── mkl.cmake ├── macos_pd_makeub.sh ├── macos_max_makeub.sh └── patch_with_vst.sh ├── scripting ├── effects.ts ├── features.ts ├── README.md ├── features.py ├── effects.py └── unmix.py ├── .gitignore ├── .gitmodules ├── package-info.json.in ├── .github └── workflows │ └── python-publish.yaml ├── setup.py └── extras └── generate_test_model.py /src/.nojekyll: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements.txt -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch==2.5 2 | cached_conv>=2.5.0 -------------------------------------------------------------------------------- /requirements_darwin_x64.txt: -------------------------------------------------------------------------------- 1 | torch1 2 | cached_conv>=2.5.0 -------------------------------------------------------------------------------- /src/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/icon.png -------------------------------------------------------------------------------- /python_tools/templates/__init__.py: -------------------------------------------------------------------------------- 1 | from .buffers import * 2 | from .attributes import * -------------------------------------------------------------------------------- /assets/banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/banner.png -------------------------------------------------------------------------------- /assets/max_mc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/max_mc.png -------------------------------------------------------------------------------- /assets/max_mcs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/max_mcs.png -------------------------------------------------------------------------------- /assets/pd_attr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/pd_attr.png -------------------------------------------------------------------------------- /install/MaxAPI.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/install/MaxAPI.lib -------------------------------------------------------------------------------- /src/misc/rave.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/misc/rave.png -------------------------------------------------------------------------------- /assets/max_attr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/max_attr.png -------------------------------------------------------------------------------- /assets/max_buffer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/max_buffer.png -------------------------------------------------------------------------------- /assets/max_method.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/max_method.png -------------------------------------------------------------------------------- /assets/max_nninfo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/max_nninfo.png -------------------------------------------------------------------------------- /assets/max_void.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/max_void.png -------------------------------------------------------------------------------- /assets/pd_buffer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/pd_buffer.png -------------------------------------------------------------------------------- /assets/pd_method.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/pd_method.png -------------------------------------------------------------------------------- /scripting/effects.ts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/scripting/effects.ts -------------------------------------------------------------------------------- /scripting/features.ts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/scripting/features.ts -------------------------------------------------------------------------------- /src/misc/discord.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/misc/discord.png -------------------------------------------------------------------------------- /src/misc/github.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/misc/github.png -------------------------------------------------------------------------------- /src/misc/vschaos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/misc/vschaos.png -------------------------------------------------------------------------------- /src/models/demo_mc.ts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/models/demo_mc.ts -------------------------------------------------------------------------------- /src/models/effects.ts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/models/effects.ts -------------------------------------------------------------------------------- /assets/max_instance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/max_instance.png -------------------------------------------------------------------------------- /assets/max_regular.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/max_regular.png -------------------------------------------------------------------------------- /assets/pd_instance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/pd_instance.png -------------------------------------------------------------------------------- /src/models/features.ts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/models/features.ts -------------------------------------------------------------------------------- /src/models/wavetable.ts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/models/wavetable.ts -------------------------------------------------------------------------------- /src/misc/after_white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/misc/after_white.png -------------------------------------------------------------------------------- /src/models/demo_buffers.ts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/models/demo_buffers.ts -------------------------------------------------------------------------------- /src/misc/logo-3107175833.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/misc/logo-3107175833.png -------------------------------------------------------------------------------- /assets/quarantine_warning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/assets/quarantine_warning.png -------------------------------------------------------------------------------- /src/models/demo_attributes.ts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/acids-ircam/nn_tilde/HEAD/src/models/demo_attributes.ts -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *build 2 | src/externals 3 | src/frontend/tests 4 | src/tests 5 | .vscode 6 | *libtorch 7 | *DS_Store* 8 | src/docs 9 | dist/ 10 | *.egg-info* -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "src/frontend/maxmsp/min-api"] 2 | path = src/frontend/maxmsp/min-api 3 | url = https://github.com/Cycling74/min-api.git 4 | [submodule "src/json"] 5 | path = src/json 6 | url = https://github.com/nlohmann/json.git 7 | -------------------------------------------------------------------------------- /src/backend/parsing_utils.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | bool to_bool(std::string str); 9 | int to_int(std::string str); 10 | float to_float(std::string str); -------------------------------------------------------------------------------- /python_tools/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | import pathlib 3 | import torch 4 | TMP_FILE_OUTPUT = pathlib.Path(__file__).parent / ".tmpfile" 5 | from .buffer import Buffer 6 | TYPE_HASH = {bool: 0, int: 1, float: 2, str: 3, torch.Tensor: 4, Buffer: 5} 7 | 8 | from . import templates 9 | from .module import Module -------------------------------------------------------------------------------- /src/backend/parsing_utils.cpp: -------------------------------------------------------------------------------- 1 | #include "parsing_utils.h" 2 | 3 | bool to_bool(std::string str) { 4 | if ((str == "0") || (str == "false")) { 5 | return false; 6 | } else { 7 | return true; 8 | } 9 | } 10 | 11 | int to_int(std::string str) { return stoi(str); } 12 | 13 | float to_float(std::string str) { return stof(str); } -------------------------------------------------------------------------------- /src/extras/nn~ Overview.maxpat: -------------------------------------------------------------------------------- 1 | { 2 | "patcher" : { 3 | "fileversion" : 1, 4 | "appversion" : { 5 | "major" : 9, 6 | "minor" : 0, 7 | "revision" : 0, 8 | "architecture" : "x64", 9 | "modernui" : 1 10 | } 11 | , 12 | "classnamespace" : "box", 13 | "rect" : [ 59.0, 106.0, 1000.0, 780.0 ], 14 | "gridsize" : [ 15.0, 15.0 ], 15 | "boxes" : [ ], 16 | "lines" : [ ], 17 | "originid" : "pat-152", 18 | "dependency_cache" : [ ], 19 | "autosave" : 0 20 | } 21 | 22 | } 23 | -------------------------------------------------------------------------------- /src/backend/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10 FATAL_ERROR) 2 | project(backend) 3 | 4 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") 5 | 6 | add_library(backend STATIC parsing_utils.cpp backend.cpp) 7 | target_link_libraries(backend "${TORCH_LIBRARIES}") 8 | set_property(TARGET backend PROPERTY CXX_STANDARD 20) 9 | 10 | if(MSVC) 11 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT") 12 | set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /MT") 13 | set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /MT") 14 | set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MT") 15 | endif() 16 | 17 | 18 | -------------------------------------------------------------------------------- /scripting/README.md: -------------------------------------------------------------------------------- 1 | # Scripting examples in nn~ 2 | 3 | These examples demonstrate how to write simple scripts to incorporate any type of deep models from PyTorch into MaxMSP (and potentially running on GPU). The examples show a variety of different use cases that also help to understand the input/output shapes relationships. 4 | - `effects.py` : apply simple effects to the input (identical input and output shapes) 5 | - `features.py` : compute spectral descriptors from the PyTorch audio library (each input audio buffer produces a single float as output) 6 | - `unmix.py` : apply the unmix deep source separation model (input is split into 4 different audio streams containing « drums », « vocals », « bass » and « others ») 7 | -------------------------------------------------------------------------------- /src/patchers/after_help.maxpat: -------------------------------------------------------------------------------- 1 | { 2 | "patcher" : { 3 | "fileversion" : 1, 4 | "appversion" : { 5 | "major" : 9, 6 | "minor" : 0, 7 | "revision" : 0, 8 | "architecture" : "x64", 9 | "modernui" : 1 10 | } 11 | , 12 | "classnamespace" : "box", 13 | "rect" : [ 607.0, 302.0, 630.0, 418.0 ], 14 | "gridsize" : [ 15.0, 15.0 ], 15 | "toolbars_unpinned_last_save" : 2, 16 | "boxes" : [ { 17 | "box" : { 18 | "fontsize" : 32.0, 19 | "id" : "obj-2", 20 | "maxclass" : "comment", 21 | "numinlets" : 1, 22 | "numoutlets" : 0, 23 | "patching_rect" : [ 121.0, 181.0, 402.0, 42.0 ], 24 | "text" : "Coming soon, we promise!" 25 | } 26 | 27 | } 28 | ], 29 | "lines" : [ ], 30 | "originid" : "pat-428", 31 | "dependency_cache" : [ ], 32 | "autosave" : 0 33 | } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /package-info.json.in: -------------------------------------------------------------------------------- 1 | { 2 | "name" : "nn_tilde", 3 | "displayname" : "nn~", 4 | "version" : "${VERSION}", 5 | "author" : "ACIDS", 6 | "authors" : [ "Antoine Caillon", "Axel Chemla--Romeu-Santos", "Philippe Esling", "Nils Demerlé"], 7 | "description" : "Max interfaces for deep neural generation", 8 | "tags" : [ "audio", "ai", "neural synthesis"], 9 | "website" : "http://www.github.com/acids-ircam/nn_tilde", 10 | "extends" : "", 11 | "extensible" : 1, 12 | "max_version_min" : "8.0.2", 13 | "max_version_max" : "none", 14 | "os" : { 15 | "macintosh" : { 16 | "min_version" : "10.12.x", 17 | "platform" : [ "x64", "aarch64" ] 18 | } 19 | , 20 | "windows" : { 21 | "min_version" : "7", 22 | "platform" : [ "x64" ] 23 | } 24 | 25 | } 26 | , 27 | "homepatcher" : "help_hub.maxpat", 28 | "package_extra" : { 29 | } 30 | , 31 | "c74install" : 1, 32 | "installdate" : 3745215604 33 | } 34 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yaml: -------------------------------------------------------------------------------- 1 | name: Upload Python Package 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | deploy: 13 | 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | - name: Set up Python 19 | uses: actions/setup-python@v3 20 | with: 21 | python-version: '3.10' 22 | - name: Install dependencies 23 | run: | 24 | python -m pip install --upgrade pip setuptools wheel build 25 | python -m pip install -r requirements.txt 26 | - name: Build package 27 | run: NN_TILDE_VERSION=${{ github.ref_name }} python -m build 28 | - name: Publish package 29 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 30 | with: 31 | verbose: true 32 | user: __token__ 33 | password: ${{ secrets.PYPI_TOKEN }} -------------------------------------------------------------------------------- /install/mkl.cmake: -------------------------------------------------------------------------------- 1 | find_package(MKL QUIET) 2 | 3 | if(NOT TARGET caffe2::mkl) 4 | add_library(caffe2::mkl INTERFACE IMPORTED) 5 | endif() 6 | 7 | target_include_directories(caffe2::mkl INTERFACE ${MKL_INCLUDE_DIR}) 8 | #target_link_libraries(caffe2::mkl INTERFACE ${MKL_LIBRARIES}) 9 | foreach(MKL_LIB IN LISTS MKL_LIBRARIES) 10 | if(EXISTS "${MKL_LIB}") 11 | get_filename_component(MKL_LINK_DIR "${MKL_LIB}" DIRECTORY) 12 | if(IS_DIRECTORY "${MKL_LINK_DIR}") 13 | target_link_directories(caffe2::mkl INTERFACE "${MKL_LINK_DIR}") 14 | endif() 15 | endif() 16 | endforeach() 17 | 18 | # TODO: This is a hack, it will not pick up architecture dependent 19 | # MKL libraries correctly; see https://github.com/pytorch/pytorch/issues/73008 20 | set_property( 21 | TARGET caffe2::mkl PROPERTY INTERFACE_LINK_DIRECTORIES 22 | ${MKL_ROOT}/lib ${MKL_ROOT}/lib/intel64 ${MKL_ROOT}/lib/intel64_win ${MKL_ROOT}/lib/win-x64) 23 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import setuptools 4 | 5 | VERSION = os.environ["NN_TILDE_VERSION"] 6 | 7 | with open("README.md", "r") as readme: 8 | readme = readme.read() 9 | 10 | with open("requirements.txt", "r") as requirements: 11 | requirements = requirements.read() 12 | 13 | setuptools.setup( 14 | name="nn_tilde", 15 | version=VERSION, 16 | author="Antoine CAILLON & Axel CHEMLA--ROMEU-SANTOS", 17 | author_email="chemla@ircam.fr", 18 | description="Set of tools to create nn_tilde compatible models.", 19 | long_description=readme, 20 | long_description_content_type="text/markdown", 21 | packages=['nn_tilde', 'nn_tilde.templates'], 22 | package_dir={'nn_tilde': 'python_tools'}, 23 | classifiers=[ 24 | "Programming Language :: Python :: 3", 25 | "License :: OSI Approved :: MIT License", 26 | "Operating System :: OS Independent", 27 | ], 28 | install_requires=requirements.split("\n"), 29 | python_requires='>=3.11', 30 | ) 31 | -------------------------------------------------------------------------------- /python_tools/test/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import importlib 3 | import time 4 | import sys 5 | import pytest 6 | from pathlib import Path 7 | 8 | @pytest.fixture 9 | def out_dir() -> Path: 10 | out_dir = (Path(__file__).parent / "model_out").resolve() 11 | if not out_dir.exists(): 12 | os.makedirs(out_dir) 13 | return out_dir 14 | 15 | @pytest.fixture 16 | def test_name(request): 17 | return request.node.name 18 | 19 | def import_code(code, glob, loc): 20 | outdir = Path('/tmp') / "nn_tilde" / "code" 21 | os.makedirs(outdir, exist_ok=True) 22 | module_name = f"nntilde_tmp_{int(time.time())}" 23 | outpath = outdir / f"{module_name}.py" 24 | with open(outpath, "w+") as f: 25 | f.write(code) 26 | # Load the module 27 | spec = importlib.util.spec_from_file_location(module_name, outpath) 28 | module = importlib.util.module_from_spec(spec) 29 | exec(spec.loader.get_code(module_name), glob) 30 | loc.update(module.__dict__) 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /python_tools/codegen.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import uuid 4 | from . import TMP_FILE_OUTPUT 5 | 6 | 7 | class TmpFileSession(object): 8 | def __init__(self, obj): 9 | self._path = (TMP_FILE_OUTPUT / f"{id(obj)}").resolve() 10 | def get(self): 11 | if not self._path.exists(): 12 | os.makedirs(self._path) 13 | unique_id = str(uuid.uuid4()) 14 | return self._path / f"{unique_id}.py" 15 | def close(self): 16 | 17 | if len(os.listdir(TMP_FILE_OUTPUT)) == 0: 18 | shutil.rmtree(TMP_FILE_OUTPUT, True) 19 | else: 20 | shutil.rmtree(self._path, True) 21 | 22 | 23 | def tmp_file_session(obj): 24 | return TmpFileSession(obj) 25 | 26 | 27 | def method_from_template(file_session: TmpFileSession, template: str, gl = {}, lo = {}): 28 | target_path = file_session.get() 29 | with open(target_path, 'w+') as f: 30 | f.write(template) 31 | code_compiled = compile(template, target_path, 'exec') 32 | exec(code_compiled, gl, lo) 33 | return lo 34 | 35 | -------------------------------------------------------------------------------- /src/frontend/maxmsp/nn_tilde/nn_tilde_test.cpp: -------------------------------------------------------------------------------- 1 | #include "c74_min.h" 2 | #include "c74_min_unittest.h" 3 | #include "nn_tilde.cpp" 4 | #include 5 | 6 | SCENARIO("object produces correct output") { 7 | ext_main(nullptr); 8 | 9 | GIVEN("An instance of nn~ without parameters") { 10 | nn my_object; 11 | WHEN("a buffer is given") { 12 | sample_vector input(4096); 13 | sample_vector output; 14 | 15 | for (int i(0); i < 10; i++) { 16 | for (auto x : input) { 17 | auto y = my_object(x); 18 | output.push_back(y); 19 | } 20 | } 21 | } 22 | } 23 | 24 | GIVEN("An instance of nn~ with parameters") { 25 | atom path("/Users/acaillon/Desktop/nn.ts"), method("forward"); 26 | atoms args = {path, method}; 27 | nn my_object = nn(args); 28 | 29 | WHEN("a buffer is given") { 30 | sample_vector input(4096); 31 | sample_vector output; 32 | 33 | for (int i(0); i < 10; i++) { 34 | for (auto x : input) { 35 | auto y = my_object(x); 36 | output.push_back(y); 37 | } 38 | } 39 | } 40 | } 41 | } -------------------------------------------------------------------------------- /install/macos_pd_makeub.sh: -------------------------------------------------------------------------------- 1 | TARGET_DIR=$1 2 | if [ -z $TARGET_DIR ]; then 3 | TARGET_DIR="nn_tilde" 4 | fi 5 | 6 | echo "${TARGET_DIR}" 7 | 8 | if [[ ! -d "${TARGET_DIR}_arm64" ]]; then 9 | echo "[Error] folder ${TARGET_DIR}_arm64 not found" 10 | exit 11 | fi 12 | 13 | if [[ ! -d "${TARGET_DIR}_x64" ]]; then 14 | echo "[Error] folder ${TARGET_DIR}_arm64 not found" 15 | exit 16 | fi 17 | 18 | if [[ ! -d "${TARGET_DIR}" ]]; then 19 | cp -r ${TARGET_DIR}_arm64 ${TARGET_DIR} 20 | fi 21 | 22 | lipo -create "${TARGET_DIR}_arm64/nn~.pd_darwin" "${TARGET_DIR}_x64/nn~.pd_darwin" -output "${TARGET_DIR}/nn~.pd_darwin" 23 | 24 | for i in $(find ${TARGET_DIR}_x64/*.dylib) 25 | do 26 | arch1="" 27 | arch2="" 28 | if [[ ! -f "${TARGET_DIR}_x64$(basename $i)" || ! -f "${TARGET_DIR}_arm64/$(basename $i)" ]] 29 | then 30 | echo "skipping $i" 31 | continue 32 | fi 33 | is_ub=$(file "${TARGET_DIR}_x64/$(basename $i)" | grep -Eo '2 architectures') 34 | if [[ -n "$is_ub" ]]; then 35 | echo "$i / universal binary; skipping" 36 | continue 37 | fi 38 | arch1=$(file -b "${TARGET_DIR}_x64/$(basename $i)" | grep -Eo 'x86_64|arm64') 39 | arch2=$(file -b "${TARGET_DIR}_arm64/$(basename $i)" | grep -Eo 'x86_64|arm64') 40 | if [[ -z "$arch1" || -z "$arch2" ]]; then 41 | echo "skipping $i" 42 | continue 43 | fi 44 | if [[ ! "$arch1" == "$arch2" ]]; then 45 | echo "$i / arch1 : $arch1; arch2 : $arch2" 46 | lipo -create "${TARGET_DIR}_arm64/$(basename $i)" "${TARGET_DIR}_x64/$(basename $i)" -output "${TARGET_DIR}/$(basename $i)" 47 | fi 48 | done 49 | -------------------------------------------------------------------------------- /python_tools/templates/attributes.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | from .. import TYPE_HASH, Buffer 4 | 5 | TYPE_HASH_R = {v: k for k, v in TYPE_HASH.items()} 6 | 7 | def _get_sig_type(param): 8 | param = TYPE_HASH_R.get(int(param), None) 9 | if param in [int, float, bool, str]: 10 | return param.__name__ 11 | elif param in [torch.Tensor]: 12 | return "torch.Tensor" 13 | elif param in [Buffer]: 14 | # return "Tuple[torch.Tensor, int]" 15 | return "str" 16 | else: 17 | raise TypeError('type %s not known'%type(param)) 18 | 19 | def get_attribute_setter(attribute_name, attribute_params): 20 | signature_atoms = [f'{attribute_name}{i}: {_get_sig_type(attribute_params[i])}' for i in range(len(attribute_params))] 21 | signature = ", ".join(signature_atoms) 22 | setter_atoms = [] 23 | for i in range(len(attribute_params)): 24 | if attribute_params[i] == TYPE_HASH[Buffer]: 25 | setter_atoms.append(f'Buffer.copy(self.{attribute_name}[{i}])') 26 | else: 27 | setter_atoms.append(f"{attribute_name}{i}") 28 | setter = f"self.{attribute_name} = (" + ", ".join(setter_atoms) + ",)" 29 | template = f"@torch.jit.export\ndef set_{attribute_name}(self, {signature}) -> int:\n\tres=0\n" 30 | template += f"\t{setter}\n" 31 | template += "\treturn res" 32 | return template 33 | 34 | def get_attribute_getter(attribute_name, attribute_params): 35 | def _export_arg(attribute_name, attribute_params, i): 36 | if attribute_params[i] == TYPE_HASH[Buffer]: 37 | return 'self.' + attribute_name+'['+str(i)+'].to_str()' 38 | else: 39 | return 'self.' + attribute_name+'['+str(i)+']' 40 | template = f"@torch.jit.export\ndef get_{attribute_name}(self):\n" 41 | template+= f"\treturn {', '.join([_export_arg(attribute_name, attribute_params, i) for i in range(len(attribute_params))])}," 42 | return template 43 | 44 | -------------------------------------------------------------------------------- /install/macos_max_makeub.sh: -------------------------------------------------------------------------------- 1 | TARGET_DIR=$1 2 | if [ -z $TARGET_DIR ]; then 3 | TARGET_DIR="nn_tilde" 4 | fi 5 | 6 | echo "${TARGET_DIR}" 7 | 8 | if [[ ! -d "${TARGET_DIR}_arm64" ]]; then 9 | echo "[Error] folder ${TARGET_DIR}_arm64 not found" 10 | exit 11 | fi 12 | 13 | if [[ ! -d "${TARGET_DIR}_x64" ]]; then 14 | echo "[Error] folder ${TARGET_DIR}_arm64 not found" 15 | exit 16 | fi 17 | 18 | if [[ ! -d "${TARGET_DIR}" ]]; then 19 | cp -r ${TARGET_DIR}_arm64 ${TARGET_DIR} 20 | fi 21 | 22 | for i in $(find ${TARGET_DIR}_x64/externals/*/Contents/MacOS -type f -perm -111) 23 | do 24 | echo "UBing file $i..." 25 | lipo -create "${TARGET_DIR}_arm64/externals/$(basename $i).mxo/Contents/MacOS/$(basename $i)" "${TARGET_DIR}_x64/externals/$(basename $i).mxo/Contents/MacOS/$(basename $i)" -output "${TARGET_DIR}/externals/$(basename $i).mxo/Contents/MacOS/$(basename $i)" 26 | done 27 | 28 | for i in $(find ${TARGET_DIR}_x64/support/*.dylib) 29 | do 30 | arch1="" 31 | arch2="" 32 | if [[ ! -f "${TARGET_DIR}_x64/support/$(basename $i)" || ! -f "${TARGET_DIR}_arm64/support/$(basename $i)" ]] 33 | then 34 | echo "skipping $i" 35 | continue 36 | fi 37 | is_ub=$(file "${TARGET_DIR}_x64/support/$(basename $i)" | grep -Eo '2 architectures') 38 | if [[ -n "$is_ub" ]]; then 39 | echo "$i / universal binary; skipping" 40 | continue 41 | fi 42 | arch1=$(file -b "${TARGET_DIR}_x64/support/$(basename $i)" | grep -Eo 'x86_64|arm64') 43 | arch2=$(file -b "${TARGET_DIR}_arm64/support/$(basename $i)" | grep -Eo 'x86_64|arm64') 44 | if [[ -z "$arch1" || -z "$arch2" ]]; then 45 | echo "skipping $i" 46 | continue 47 | fi 48 | if [[ ! "$arch1" == "$arch2" ]]; then 49 | echo "$i / arch1 : $arch1; arch2 : $arch2" 50 | lipo -create "${TARGET_DIR}_arm64/support/$(basename $i)" "${TARGET_DIR}_x64/support/$(basename $i)" -output "${TARGET_DIR}/support/$(basename $i)" 51 | fi 52 | done 53 | -------------------------------------------------------------------------------- /python_tools/templates/buffers.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | def set_buffer_attribute_template(buffer_names): 4 | template = "@torch.jit.export\ndef set_buffer_attribute(self, buffer_name: str, buffer: torch.Tensor, sr: int) -> int:\n" 5 | if len(buffer_names.value) == 0: 6 | template += "\treturn -1" 7 | return template 8 | for b in buffer_names.value: 9 | buffer_names_parts = b.split('#') 10 | if len(buffer_names_parts) != 2: 11 | raise ValueError('Invalid buffer name : '%b) 12 | attribute_name, buffer_idx = buffer_names_parts 13 | template += f"\tif (buffer_name == \"{b}\"): return self.{attribute_name}[{buffer_idx}].set_value(buffer, sr)\n" 14 | template += "\treturn -1" 15 | return template 16 | 17 | def clear_buffer_attribute_template(buffer_names): 18 | template = "@torch.jit.export\ndef clear_buffer_attribute(self, buffer_name: str) -> None:\n" 19 | if len(buffer_names.value) == 0: 20 | template += "\treturn" 21 | return template 22 | for b in buffer_names.value: 23 | buffer_names_parts = b.split('#') 24 | if len(buffer_names_parts) != 2: 25 | raise ValueError('Invalid buffer name : '%b) 26 | attribute_name, buffer_idx = buffer_names_parts 27 | template += f"\tif (buffer_name == \"{b}\"): return self.{attribute_name}[{buffer_idx}].init_value()\n" 28 | return template 29 | 30 | def is_buffer_empty_template(buffer_names): 31 | template = "@torch.jit.export\ndef is_buffer_empty(self, buffer_name: str) -> bool:\n" 32 | if len(buffer_names.value) == 0: 33 | template += "\treturn True" 34 | return template 35 | for b in buffer_names.value: 36 | buffer_names_parts = b.split('#') 37 | if len(buffer_names_parts) != 2: 38 | raise ValueError('Invalid buffer name : '%b) 39 | attribute_name, buffer_idx = buffer_names_parts 40 | template += f"\tif (buffer_name == \"{b}\"): return not self.{attribute_name}[{buffer_idx}].has_value\n" 41 | template += "\treturn True" 42 | return template 43 | -------------------------------------------------------------------------------- /src/shared/static_buffer.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | 6 | template 7 | class StaticBuffer { 8 | 9 | public: 10 | using BufferData = std::vector; 11 | using BufferShape = std::array; 12 | 13 | private: 14 | std::shared_ptr _data; 15 | BufferShape _dims; 16 | double _samplingRate; 17 | 18 | public: 19 | 20 | StaticBuffer() { 21 | _dims = BufferShape({0, 0}); 22 | }; 23 | 24 | StaticBuffer(const size_t dim1, const size_t dim2, const double samplingRate = -1) { 25 | _dims = BufferShape({dim1, dim2}); 26 | _data = std::make_shared(get_empty_buffer(dim1, dim2)); 27 | _samplingRate = samplingRate; 28 | }; 29 | 30 | StaticBuffer(BufferData data, const double samplingRate) { 31 | _dims = BufferShape({data.dims(), data[0].dims()}); 32 | _data = std::make_shared(data); 33 | _samplingRate = samplingRate; 34 | }; 35 | 36 | StaticBuffer(const StaticBuffer &buffer) { 37 | _dims = buffer._dims; 38 | _data = buffer._data; 39 | _samplingRate = buffer._samplingRate; 40 | } 41 | 42 | BufferShape dims () { 43 | return _dims; 44 | } 45 | 46 | double sr() { return _samplingRate; } 47 | 48 | static BufferData get_empty_buffer(const size_t dim1, const size_t dim2) { 49 | return BufferData(dim1 * dim2, 0.f); 50 | }; 51 | 52 | void clear() { 53 | _data.get()->clear(); 54 | } 55 | 56 | void reset() { 57 | std::fill(_data.get()->begin(), _data.get()->end(), 0.0f); 58 | }; 59 | 60 | torch::Tensor to_tensor() { 61 | auto obj = torch::from_blob(_data.get()->data(), {(long long)_dims[0], (long long)_dims[1]}, torch::kFloat); 62 | return obj; 63 | }; 64 | 65 | float& at(const size_t dim1, const size_t dim2) { 66 | return _data.get()->at(dim1 * _dims[0] + dim2); 67 | } 68 | 69 | void put(data_type data, const size_t dim1, const size_t dim2) { 70 | at(dim1, dim2) = data; 71 | } 72 | }; -------------------------------------------------------------------------------- /src/source/attributes.py: -------------------------------------------------------------------------------- 1 | try: 2 | import nn_tilde 3 | except ImportError: 4 | import os, sys 5 | sys.path.append(os.path.join(os.path.dirname(__file__) , ".." , "..")) 6 | import python_tools as nn_tilde 7 | 8 | from typing import List 9 | import torch 10 | 11 | 12 | class AttributeFoo(nn_tilde.Module): 13 | def __init__(self): 14 | super().__init__() 15 | self._valid_animals_ = torch.jit.Attribute(["horse", "goose", "chicken", "pig", "dog", "cat"], List[str]) 16 | self.register_attribute("attr_int", 0) 17 | self.register_attribute("attr_float", 0.) 18 | self.register_attribute("attr_str", "apple") 19 | self.register_attribute("attr_enum", "horse") 20 | self.register_attribute("attr_bool", False) 21 | self.register_attribute("attr_list", [0, "christophe", 1., True]) 22 | self.register_method("forward", 1, 1, 2, 1, test_method=False) 23 | self.finish() 24 | 25 | @torch.jit.export 26 | def set_attr_enum(self, animal: str) -> int: 27 | # a custom setter can accept only given values of an incoming symbol. 28 | # a setter function should return 0 if value is accepted, or -1 if refused. 29 | if animal not in self._valid_animals_: 30 | return -1 31 | self.attr_enum = (animal,) 32 | return 0 33 | 34 | @torch.jit.export 35 | def set_attr_list(self, val1: int, val2: str, val3: float, val4: bool): 36 | # when defining a custom setter for a list, all values must be unfolded in the function 37 | # signature (this is a TorchScript constraint). 38 | self.attr_list = (val1, val2, val3, val4) 39 | return 0 40 | 41 | @torch.jit.export 42 | def forward(self, x: torch.Tensor): 43 | x = torch.zeros(x.shape[:-2] + (2, x.shape[-1])) 44 | x[..., 0, :] = self.attr_int[0] 45 | x[..., 1, :] = self.attr_float[0] 46 | return x 47 | 48 | 49 | if __name__ == '__main__': 50 | # Create your target class 51 | model = AttributeFoo() 52 | # Export it to a torchscript model 53 | model.export_to_ts('src/models/demo_attributes.ts') 54 | 55 | -------------------------------------------------------------------------------- /python_tools/buffer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from typing import List, Dict, Optional 3 | 4 | class Buffer(): 5 | value: torch.Tensor 6 | min_samples: int 7 | max_samples: int 8 | def __init__(self, tensor: Optional[torch.Tensor] = None, 9 | min_samples: int = -1, 10 | max_samples: int = -1, 11 | sr: int | float | None = -1): 12 | self.value = torch.tensor(0) 13 | self.init_value() 14 | if tensor is not None: 15 | self.value = tensor 16 | self.min_samples = min_samples 17 | self.max_samples = max_samples 18 | self.sr = -1 if sr is None else int(sr) 19 | 20 | def check_bounds(self, x: torch.Tensor) -> bool: 21 | is_ok = True 22 | if self.min_samples != -1: 23 | is_ok = is_ok and x.shape[-1] >= self.min_samples 24 | if self.max_samples != -1: 25 | is_ok = is_ok and x.shape[-1] <= self.max_samples 26 | return is_ok 27 | 28 | def from_buffer(self, buffer: "Buffer"): 29 | return self.set_value(buffer.value, sr = buffer.sr) 30 | 31 | @staticmethod 32 | def copy(buffer: "Buffer"): 33 | buffer_n = Buffer() 34 | buffer_n.from_buffer(buffer) 35 | return buffer_n 36 | 37 | def set_value(self, x: torch.Tensor, sr: int | float | None = None) -> int: 38 | _has_valid_bounds = self.check_bounds(x) 39 | if not _has_valid_bounds: 40 | return -1 41 | if sr is None: 42 | self.sr = -1 43 | else: 44 | self.sr = int(sr) 45 | self.value = x.clone() 46 | return 0 47 | 48 | @property 49 | def shape(self) -> List[int]: 50 | if self.has_value: 51 | return self.value.shape 52 | else: 53 | return torch.Size([]) 54 | 55 | @property 56 | def has_value(self) -> bool: 57 | return self.value.numel() != 0 58 | 59 | def get_value(self) -> torch.Tensor: 60 | return self.value 61 | 62 | def init_value(self) -> None: 63 | self.value = torch.zeros(0, 0, 0) 64 | 65 | def to_str(self) -> str: 66 | if self.has_value: 67 | out = f"Buffer(min={self.value.min()}, max={self.value.max()}, sr={self.sr}, shape={self.shape})" 68 | else: 69 | out = "Buffer(empty)" 70 | return out 71 | 72 | 73 | BUFFER_ATTRIBUTES_TYPE = Dict[str, Buffer] -------------------------------------------------------------------------------- /src/frontend/puredata/shared/pd_model_download.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "m_pd.h" 3 | #include "../../../shared/model_download.h" 4 | 5 | namespace fs = std::filesystem; 6 | 7 | template 8 | class PdModelDownloader : public ModelDownloader 9 | { 10 | const pd_struct *d_parent; 11 | 12 | public: 13 | PdModelDownloader(const pd_struct *parent) : d_parent(parent) 14 | { 15 | d_cert_path = cert_path_from_path(""); 16 | } 17 | PdModelDownloader(const pd_struct *parent, const std::string path) : d_parent(parent) 18 | { 19 | d_path = path; 20 | d_cert_path = cert_path_from_path(path); 21 | } 22 | 23 | void print_to_parent(const std::string &message, const std::string &canal) override 24 | { 25 | if (d_parent != nullptr) 26 | { 27 | if (canal == "cout") 28 | { 29 | post(message.c_str()); 30 | } 31 | else if (canal == "cwarn") 32 | { 33 | post(message.c_str()); 34 | } 35 | else if (canal == "cerr") 36 | { 37 | pd_error(d_parent, "nn~: %s", message.c_str()); 38 | } 39 | } 40 | }; 41 | 42 | fs::path cert_path_from_path(fs::path path) 43 | { 44 | #if defined(_WIN32) || defined(_WIN64) 45 | const char *homeDir = std::getenv("USERPROFILE"); 46 | std::string perm_path_str = std::string(homeDir) + "/Documents/Pd/externals/nn_tilde/cacert.pem"; 47 | find_and_replace_char(perm_path_str, '/', '\\'); 48 | fs::path perm_path = perm_path_str; 49 | if (fs::exists(perm_path)) 50 | { 51 | return perm_path; 52 | } 53 | perm_path = "C:\\Program Files\\Pd\\extra\\cacert.pem"; 54 | if (fs::exists(perm_path)) 55 | { 56 | return perm_path; 57 | } 58 | std::string path_str = path.string(); 59 | find_and_replace_char(path_str, '/', '\\'); 60 | perm_path = path_str + "\\cacert.pem"; 61 | return perm_path; 62 | #elif defined(__APPLE__) || defined(__MACH__) 63 | std::string perm_path = "/etc/ssl/cert.pem"; 64 | #elif defined(__linux__) 65 | std::string perm_path = "/etc/ssl/certs/ca-certificates.crt"; 66 | #else 67 | std::string perm_path = ""; 68 | #endif 69 | return perm_path; 70 | } 71 | 72 | void fill_dict(void *dict_to_fill) override 73 | { 74 | return; 75 | } 76 | }; 77 | -------------------------------------------------------------------------------- /src/source/buffers.py: -------------------------------------------------------------------------------- 1 | try: 2 | import nn_tilde 3 | except ImportError: 4 | import os, sys 5 | sys.path.append(os.path.join(os.path.dirname(__file__) , ".." , "..")) 6 | import python_tools as nn_tilde 7 | 8 | from typing import List, Tuple 9 | import torch 10 | 11 | 12 | class BufferFoo(nn_tilde.Module): 13 | buffer: Tuple[nn_tilde.Buffer] 14 | def __init__(self, test_method: bool = False): 15 | super().__init__() 16 | # access to max buffers are registered with nn_tilde.Buffer, that can be optionally 17 | # given an optional minimum and/or maximum buffer length. 18 | self.register_attribute("buf", (nn_tilde.Buffer(None, 64, 2048))) 19 | self.register_method('loudness', 1, 1, 1, 1, test_method=test_method) 20 | self.register_method('shape', 1, 1, 2, 1, test_method=test_method) 21 | self.register_method('get_sr', 1, 1, 1, 1, test_method=test_method) 22 | self.finish() 23 | 24 | def get_loudness(self, x: torch.Tensor) -> float: 25 | return x.pow(2).mean().sqrt().item() 26 | 27 | @torch.jit.export 28 | def loudness(self, x: torch.Tensor): 29 | buffer = self.buf[0] 30 | if buffer.has_value: 31 | loudness = self.get_loudness(buffer.value) 32 | return torch.full_like(x, fill_value=loudness) 33 | else: 34 | return torch.zeros_like(x) 35 | 36 | @torch.jit.export 37 | def shape(self, x: torch.Tensor): 38 | is_batched = x.ndim > 2 39 | if not is_batched: 40 | x = x[None] 41 | buffer = self.buf[0] 42 | if buffer.has_value: 43 | out = torch.zeros(x.shape[0], 2, x.shape[-1]) 44 | out[:, 0, :] = buffer.value.shape[0] 45 | out[:, 1, :] = buffer.value.shape[1] 46 | if not is_batched: 47 | out = out[0] 48 | else: 49 | out = torch.zeros_like(x) 50 | return out 51 | 52 | @torch.jit.export 53 | def get_sr(self, x: torch.Tensor): 54 | buffer = self.buf[0] 55 | if buffer.has_value: 56 | if self.buf[0].sr is None: 57 | sr = -1 58 | else: 59 | sr = buffer.sr 60 | return torch.full_like(x, fill_value=sr) 61 | else: 62 | return torch.zeros_like(x) 63 | 64 | if __name__ == '__main__': 65 | # Create your target class 66 | model = BufferFoo() 67 | # Export it to a torchscript model 68 | model.export_to_ts('src/models/demo_buffers.ts') 69 | 70 | -------------------------------------------------------------------------------- /src/shared/circular_buffer.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | template class circular_buffer { 5 | public: 6 | circular_buffer(); 7 | void initialize(size_t size); 8 | bool empty(); 9 | bool full(); 10 | void put(in_type *input_array, int N); 11 | void get(out_type *output_array, int N); 12 | void fill(out_type val); 13 | void reset(); 14 | size_t max_size() { return _max_size; } 15 | 16 | protected: 17 | std::unique_ptr _buffer; 18 | size_t _max_size = 0; 19 | 20 | int _head = 0; 21 | int _tail = 0; 22 | int _count = 0; 23 | bool _full = false; 24 | }; 25 | 26 | template 27 | circular_buffer::circular_buffer() {} 28 | 29 | template 30 | void circular_buffer::initialize(size_t size) { 31 | _buffer = std::make_unique(size); 32 | auto zero_val = static_cast(0.0f); 33 | std::fill(_buffer.get(), _buffer.get() + size, zero_val); 34 | _max_size = size; 35 | } 36 | 37 | template 38 | bool circular_buffer::empty() { 39 | return (!_full && _head == _tail); 40 | } 41 | 42 | template 43 | bool circular_buffer::full() { 44 | return _full; 45 | } 46 | 47 | 48 | template 49 | void circular_buffer::fill(out_type value) { 50 | // if (_max_size > 0) { 51 | // std::fill(_buffer.get(), _buffer.get() + _max_size, value); 52 | // } 53 | } 54 | 55 | 56 | template 57 | void circular_buffer::put(in_type *input_array, int N) { 58 | if (!_max_size) 59 | return; 60 | 61 | while (N--) { 62 | _buffer[_head] = out_type(*(input_array++)); 63 | _head = (_head + 1) % _max_size; 64 | if (_full) 65 | _tail = (_tail + 1) % _max_size; 66 | _full = _head == _tail; 67 | } 68 | } 69 | 70 | template 71 | void circular_buffer::get(out_type *output_array, int N) { 72 | if (!_max_size) 73 | return; 74 | 75 | while (N--) { 76 | if (empty()) { 77 | *(output_array++) = out_type(); 78 | } else { 79 | *(output_array++) = _buffer[_tail]; 80 | _tail = (_tail + 1) % _max_size; 81 | _full = false; 82 | } 83 | } 84 | } 85 | 86 | template 87 | void circular_buffer::reset() { 88 | _head = _tail; 89 | _count = 0; 90 | _full = false; 91 | } -------------------------------------------------------------------------------- /python_tools/test/test_buffers.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | import torch 3 | import pytest 4 | from pathlib import Path 5 | 6 | sys.path.append(str(Path(__file__).parent / ".." / "..")) 7 | from python_tools import Module, Buffer 8 | from utils import out_dir, test_name, import_code 9 | from typing import Tuple 10 | 11 | 12 | class BufferFoo(Module): 13 | buffer: Tuple[Buffer] 14 | def __init__(self, test_method: bool = False): 15 | super().__init__() 16 | self.register_attribute("buf", Buffer(None, 64, 2048)) 17 | self.register_method('loudness', 1, 1, 1, 1, test_method=test_method) 18 | self.register_method('shape', 1, 1, 2, 1, test_method=test_method) 19 | self.register_method('get_sr', 1, 1, 1, 1, test_method=test_method) 20 | self.finish() 21 | 22 | def get_loudness(self, x: torch.Tensor) -> float: 23 | return x.pow(2).mean().sqrt().item() 24 | 25 | @torch.jit.export 26 | def loudness(self, x: torch.Tensor): 27 | buffer = self.buf[0] 28 | if buffer.has_value: 29 | loudness = self.get_loudness(buffer.value) 30 | return torch.full_like(x, fill_value=loudness) 31 | else: 32 | return torch.zeros_like(x) 33 | 34 | @torch.jit.export 35 | def shape(self, x: torch.Tensor): 36 | is_batched = x.ndim > 2 37 | if not is_batched: 38 | x = x[None] 39 | buffer = self.buf[0] 40 | if buffer.has_value: 41 | out = torch.zeros(x.shape[0], 2, x.shape[-1]) 42 | out[:, 0, :] = buffer.value.shape[0] 43 | out[:, 1, :] = buffer.value.shape[1] 44 | if not is_batched: 45 | out = out[0] 46 | else: 47 | out = torch.zeros_like(x) 48 | return out 49 | 50 | @torch.jit.export 51 | def get_sr(self, x: torch.Tensor): 52 | buffer = self.buf[0] 53 | if buffer.has_value: 54 | if self.buf[0].sr is None: 55 | sr = -1 56 | else: 57 | sr = buffer.sr 58 | return torch.full_like(x, fill_value=sr) 59 | else: 60 | return torch.zeros_like(x) 61 | 62 | 63 | @pytest.mark.parametrize('module_class', [BufferFoo]) 64 | def test_buffer_attributes(module_class, out_dir, test_name): 65 | module = module_class() 66 | module.loudness(torch.randn(1, 1, 16)) 67 | module.shape(torch.randn(1, 1, 16)) 68 | module.get_sr(torch.randn(1, 1, 16)) 69 | module.get_buf() 70 | module.set_buf((torch.zeros(1, 64), 44100)) 71 | module.set_buffer_attribute("buffer#0", torch.zeros(1, 64), 44100) 72 | 73 | 74 | @pytest.mark.parametrize('module_class', [BufferFoo]) 75 | def test_scripted_buffer_attributes(module_class, out_dir, test_name): 76 | module = module_class() 77 | scripted = torch.jit.script(module) 78 | module.loudness(torch.randn(1, 1, 16)) 79 | module.shape(torch.randn(1, 1, 16)) 80 | module.get_sr(torch.randn(1, 1, 16)) 81 | module.get_buf() 82 | module.set_buf((torch.zeros(1, 64), 44100)) 83 | module.set_buffer_attribute("buffer#0", torch.zeros(1, 64), 44100) 84 | module.get_buffer_attributes() 85 | torch.jit.save(scripted, out_dir/f"{test_name}.ts") 86 | 87 | -------------------------------------------------------------------------------- /src/patchers/latent_remote/ierf.gendsp: -------------------------------------------------------------------------------- 1 | { 2 | "patcher" : { 3 | "fileversion" : 1, 4 | "appversion" : { 5 | "major" : 8, 6 | "minor" : 6, 7 | "revision" : 0, 8 | "architecture" : "x64", 9 | "modernui" : 1 10 | } 11 | , 12 | "classnamespace" : "dsp.gen", 13 | "rect" : [ 100.0, 668.0, 600.0, 450.0 ], 14 | "bglocked" : 0, 15 | "openinpresentation" : 0, 16 | "default_fontsize" : 12.0, 17 | "default_fontface" : 0, 18 | "default_fontname" : "Arial", 19 | "gridonopen" : 1, 20 | "gridsize" : [ 15.0, 15.0 ], 21 | "gridsnaponopen" : 1, 22 | "objectsnaponopen" : 1, 23 | "statusbarvisible" : 2, 24 | "toolbarvisible" : 1, 25 | "lefttoolbarpinned" : 0, 26 | "toptoolbarpinned" : 0, 27 | "righttoolbarpinned" : 0, 28 | "bottomtoolbarpinned" : 0, 29 | "toolbars_unpinned_last_save" : 2, 30 | "tallnewobj" : 0, 31 | "boxanimatetime" : 200, 32 | "enablehscroll" : 1, 33 | "enablevscroll" : 1, 34 | "devicewidth" : 0.0, 35 | "description" : "", 36 | "digest" : "", 37 | "tags" : "", 38 | "style" : "", 39 | "subpatcher_template" : "", 40 | "assistshowspatchername" : 0, 41 | "boxes" : [ { 42 | "box" : { 43 | "id" : "obj-6", 44 | "maxclass" : "newobj", 45 | "numinlets" : 0, 46 | "numoutlets" : 1, 47 | "outlettype" : [ "" ], 48 | "patching_rect" : [ 302.0, 23.0, 38.0, 22.0 ], 49 | "text" : "in 2 2" 50 | } 51 | 52 | } 53 | , { 54 | "box" : { 55 | "code" : "x = pow(in1, in2);\nout1 = 0.8862269254527579 * x + 0.23201366653465444 * pow(x,3) + 0.12755617530559793 * pow(x,5) + 0.08655212924154752 * pow(x,7) + 0.0649596177453854 * pow(x,9) + 0.051731281984616365 * pow(x,11) + 0.04283672065179733 * pow(x,13) + 0.03646592930853161 * pow(x,15) + 0.03168900502160544 * pow(x,17) + 0.027980632964995214 * pow(x,19);\n", 56 | "fontface" : 0, 57 | "fontname" : "", 58 | "fontsize" : 12.0, 59 | "id" : "obj-5", 60 | "maxclass" : "codebox", 61 | "numinlets" : 2, 62 | "numoutlets" : 1, 63 | "outlettype" : [ "" ], 64 | "patching_rect" : [ 85.0, 93.0, 200.0, 200.0 ] 65 | } 66 | 67 | } 68 | , { 69 | "box" : { 70 | "id" : "obj-1", 71 | "maxclass" : "newobj", 72 | "numinlets" : 0, 73 | "numoutlets" : 1, 74 | "outlettype" : [ "" ], 75 | "patching_rect" : [ 50.0, 14.0, 28.0, 22.0 ], 76 | "text" : "in 1" 77 | } 78 | 79 | } 80 | , { 81 | "box" : { 82 | "id" : "obj-4", 83 | "maxclass" : "newobj", 84 | "numinlets" : 1, 85 | "numoutlets" : 0, 86 | "patching_rect" : [ 176.0, 418.0, 35.0, 22.0 ], 87 | "text" : "out 1" 88 | } 89 | 90 | } 91 | ], 92 | "lines" : [ { 93 | "patchline" : { 94 | "destination" : [ "obj-5", 0 ], 95 | "source" : [ "obj-1", 0 ] 96 | } 97 | 98 | } 99 | , { 100 | "patchline" : { 101 | "destination" : [ "obj-4", 0 ], 102 | "source" : [ "obj-5", 0 ] 103 | } 104 | 105 | } 106 | , { 107 | "patchline" : { 108 | "destination" : [ "obj-5", 1 ], 109 | "source" : [ "obj-6", 0 ] 110 | } 111 | 112 | } 113 | ] 114 | } 115 | 116 | } 117 | -------------------------------------------------------------------------------- /src/patchers/latent_remote/frand.maxpat: -------------------------------------------------------------------------------- 1 | { 2 | "patcher" : { 3 | "fileversion" : 1, 4 | "appversion" : { 5 | "major" : 8, 6 | "minor" : 6, 7 | "revision" : 5, 8 | "architecture" : "x64", 9 | "modernui" : 1 10 | } 11 | , 12 | "classnamespace" : "box", 13 | "rect" : [ 59.0, 119.0, 640.0, 480.0 ], 14 | "bglocked" : 0, 15 | "openinpresentation" : 0, 16 | "default_fontsize" : 12.0, 17 | "default_fontface" : 0, 18 | "default_fontname" : "Arial", 19 | "gridonopen" : 1, 20 | "gridsize" : [ 15.0, 15.0 ], 21 | "gridsnaponopen" : 1, 22 | "objectsnaponopen" : 1, 23 | "statusbarvisible" : 2, 24 | "toolbarvisible" : 1, 25 | "lefttoolbarpinned" : 0, 26 | "toptoolbarpinned" : 0, 27 | "righttoolbarpinned" : 0, 28 | "bottomtoolbarpinned" : 0, 29 | "toolbars_unpinned_last_save" : 0, 30 | "tallnewobj" : 0, 31 | "boxanimatetime" : 200, 32 | "enablehscroll" : 1, 33 | "enablevscroll" : 1, 34 | "devicewidth" : 0.0, 35 | "description" : "", 36 | "digest" : "", 37 | "tags" : "", 38 | "style" : "", 39 | "subpatcher_template" : "", 40 | "assistshowspatchername" : 0, 41 | "boxes" : [ { 42 | "box" : { 43 | "comment" : "", 44 | "id" : "obj-1", 45 | "index" : 1, 46 | "maxclass" : "inlet", 47 | "numinlets" : 0, 48 | "numoutlets" : 1, 49 | "outlettype" : [ "" ], 50 | "patching_rect" : [ 231.0, 45.0, 30.0, 30.0 ] 51 | } 52 | 53 | } 54 | , { 55 | "box" : { 56 | "comment" : "", 57 | "id" : "obj-7", 58 | "index" : 1, 59 | "maxclass" : "outlet", 60 | "numinlets" : 1, 61 | "numoutlets" : 0, 62 | "patching_rect" : [ 231.0, 190.0, 30.0, 30.0 ] 63 | } 64 | 65 | } 66 | , { 67 | "box" : { 68 | "id" : "obj-6", 69 | "maxclass" : "newobj", 70 | "numinlets" : 6, 71 | "numoutlets" : 1, 72 | "outlettype" : [ "" ], 73 | "patching_rect" : [ 231.0, 159.0, 97.0, 22.0 ], 74 | "text" : "scale 0. 1. #1 #2" 75 | } 76 | 77 | } 78 | , { 79 | "box" : { 80 | "id" : "obj-5", 81 | "maxclass" : "newobj", 82 | "numinlets" : 2, 83 | "numoutlets" : 1, 84 | "outlettype" : [ "float" ], 85 | "patching_rect" : [ 231.0, 126.0, 52.0, 22.0 ], 86 | "text" : "/ 10000." 87 | } 88 | 89 | } 90 | , { 91 | "box" : { 92 | "id" : "obj-4", 93 | "maxclass" : "newobj", 94 | "numinlets" : 2, 95 | "numoutlets" : 1, 96 | "outlettype" : [ "" ], 97 | "patching_rect" : [ 231.0, 97.0, 86.0, 22.0 ], 98 | "text" : "random 10000" 99 | } 100 | 101 | } 102 | ], 103 | "lines" : [ { 104 | "patchline" : { 105 | "destination" : [ "obj-4", 0 ], 106 | "source" : [ "obj-1", 0 ] 107 | } 108 | 109 | } 110 | , { 111 | "patchline" : { 112 | "destination" : [ "obj-5", 0 ], 113 | "source" : [ "obj-4", 0 ] 114 | } 115 | 116 | } 117 | , { 118 | "patchline" : { 119 | "destination" : [ "obj-6", 0 ], 120 | "source" : [ "obj-5", 0 ] 121 | } 122 | 123 | } 124 | , { 125 | "patchline" : { 126 | "destination" : [ "obj-7", 0 ], 127 | "source" : [ "obj-6", 0 ] 128 | } 129 | 130 | } 131 | ] 132 | } 133 | 134 | } 135 | -------------------------------------------------------------------------------- /src/frontend/maxmsp/mc.nn_tilde/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The Min-DevKit Authors. All rights reserved. 2 | # Use of this source code is governed by the MIT License found in the License.md file. 3 | 4 | cmake_minimum_required(VERSION 3.10 FATAL_ERROR) 5 | 6 | set(CMAKE_CXX_STANDARD 20) 7 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 8 | set(CMAKE_CXX_EXTENSIONS OFF) 9 | 10 | set(C74_MIN_API_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../min-api) 11 | include(${C74_MIN_API_DIR}/script/min-pretarget.cmake) 12 | 13 | if (APPLE) 14 | set(CMAKE_OSX_DEPLOYMENT_TARGET "10.12") 15 | endif() 16 | 17 | 18 | ############################################################# 19 | # MAX EXTERNAL 20 | ############################################################# 21 | 22 | execute_process( 23 | COMMAND git describe --tags 24 | WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} 25 | OUTPUT_VARIABLE VERSION 26 | OUTPUT_STRIP_TRAILING_WHITESPACE 27 | ) 28 | message(${VERSION}) 29 | add_definitions(-DVERSION="${VERSION}") 30 | 31 | 32 | 33 | set( 34 | SOURCE_FILES 35 | mc.nn_tilde.cpp 36 | ) 37 | 38 | add_library( 39 | ${PROJECT_NAME} 40 | MODULE 41 | ${SOURCE_FILES} 42 | ) 43 | 44 | 45 | include(${C74_MIN_API_DIR}/script/min-posttarget.cmake) 46 | 47 | if (MSVC) 48 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT") 49 | set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20) 50 | set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) 51 | target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_20) 52 | endif() 53 | 54 | include_directories( 55 | "${C74_INCLUDES}" 56 | "${CMAKE_CURRENT_SOURCE_DIR}/../shared" 57 | "${CMAKE_CURRENT_SOURCE_DIR}/../../shared" 58 | ) 59 | 60 | if (MSVC) 61 | include_directories(${VCPKG_INCLUDE_DIR}) 62 | link_directories(${VCPKG_LIB_DIR}) 63 | endif() 64 | 65 | 66 | target_link_libraries(${PROJECT_NAME} PRIVATE backend) 67 | 68 | if (UNIX) 69 | set(CONDA_ENV_PATH "${CMAKE_SOURCE_DIR}/../env") 70 | set(CURL_INCLUDE_DIR "${CONDA_ENV_PATH}/include") 71 | set(CURL_LIBRARY "${CONDA_ENV_PATH}/lib/libcurl.dylib") 72 | include_directories(${CURL_INCLUDE_DIR}) 73 | elseif(MSVC) 74 | set(VCPKG_PATH "${CMAKE_SOURCE_DIR}/../vcpkg") 75 | set(CURL_INCLUDE_DIR "${VCPKG_PATH}/packages/curl_x64-windows/include") 76 | set(CURL_LIBRARY "${VCPKG_PATH}/packages/curl_x64-windows/lib/libcurl.lib") 77 | endif() 78 | 79 | target_link_libraries(${PROJECT_NAME} PRIVATE nlohmann_json::nlohmann_json) 80 | target_link_libraries(${PROJECT_NAME} PRIVATE ${CURL_LIBRARY}) 81 | 82 | 83 | if (APPLE) # SEARCH FOR TORCH DYLIB IN THE LOADER FOLDER 84 | set_target_properties(${PROJECT_NAME} PROPERTIES 85 | BUILD_WITH_INSTALL_RPATH FALSE 86 | LINK_FLAGS "-Wl,-rpath,@loader_path/" 87 | ) 88 | endif() 89 | 90 | 91 | if (APPLE) # COPY DYLIBS IN THE LOADER FOLDER 92 | add_custom_command( 93 | TARGET ${PROJECT_NAME} 94 | POST_BUILD 95 | COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/../env/ssl/cert.pem" "$" 96 | COMMAND ${CMAKE_SOURCE_DIR}/../env/bin/python ${CMAKE_SOURCE_DIR}/../install/dylib_fix.py -p "$" -o "${CMAKE_SOURCE_DIR}/support" -l "${torch_dir}/libtorch" "${CMAKE_BINARY_DIR}/_deps" "${CMAKE_SOURCE_DIR}/../env" "${HOMEBREW_PREFIX}" --sign_id "${SIGN_ID}" 97 | COMMENT "Fixing libraries, certificates, permissions, codesigning, quarantine" 98 | ) 99 | 100 | endif() 101 | 102 | -------------------------------------------------------------------------------- /src/frontend/maxmsp/shared/max_model_download.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "c74_min.h" 10 | #include 11 | #include "dict_utils.h" 12 | #include "../../../shared/model_download.h" 13 | 14 | 15 | 16 | #ifndef MAX_DOWNLOADS 17 | #define MAX_DOWNLOADS 2 18 | #endif 19 | 20 | 21 | 22 | namespace max = c74::max; 23 | namespace min = c74::min; 24 | 25 | 26 | class MaxModelDownloader: public ModelDownloader { 27 | c74::min::object_base* d_parent; 28 | 29 | public: 30 | MaxModelDownloader(c74::min::object_base* obj); 31 | MaxModelDownloader(c74::min::object_base* obj, std::string external_name); 32 | MaxModelDownloader(c74::min::object_base* obj, fs::path download_location); 33 | 34 | void fill_dict(void* dict_to_fill); 35 | void print_to_parent(const std::string &message, const std::string &canal); 36 | fs::path cert_path_from_path(fs::path path) { 37 | #if defined(_WIN32) || defined(_WIN64) 38 | std::string perm_path = (path / ".." / ".." / "support" / "cacert.pem").string(); 39 | find_and_replace_char(perm_path, '/', '\\'); 40 | #elif defined(__APPLE__) || defined(__MACH__) 41 | std::string perm_path = path / "Contents" / "MacOS" / "cert.pem"; 42 | #else 43 | std::string perm_path = (path / "..").string(); 44 | #endif 45 | return perm_path; 46 | } 47 | 48 | void set_model_directory(const std::string &external_path) { 49 | d_path = fs::absolute(fs::path(external_path) / ".." / ".." / "models"); 50 | if (!fs::exists(d_path)) { 51 | fs::create_directories(d_path); 52 | } 53 | } 54 | }; 55 | 56 | 57 | MaxModelDownloader::MaxModelDownloader(c74::min::object_base* obj): d_parent(obj) { 58 | // d_path = d_path / ".." / "nn_tilde" / "models"; 59 | min::path path = min::path("nn~", min::path::filetype::external); 60 | std::string path_str = path; 61 | if (path) { 62 | set_model_directory(path_str); 63 | d_cert_path = cert_path_from_path(fs::path(path_str)); 64 | } 65 | } 66 | 67 | MaxModelDownloader::MaxModelDownloader(c74::min::object_base* obj, std::string external_name): d_parent(obj) { 68 | min::path path = min::path(external_name, min::path::filetype::external); 69 | std::string path_str = path; 70 | fs::path fs_path(path_str); 71 | if (path) { 72 | d_cert_path = cert_path_from_path(fs::path(path_str)); 73 | set_model_directory(path_str); 74 | } 75 | } 76 | 77 | MaxModelDownloader::MaxModelDownloader(c74::min::object_base* obj, fs::path download_location): ModelDownloader(download_location), d_parent(obj) { 78 | d_path = download_location; 79 | } 80 | 81 | 82 | void MaxModelDownloader::print_to_parent(const std::string &message, const std::string &canal) { 83 | std::string method = "print"; 84 | min::atoms args = {string_id(), canal, message}; 85 | d_parent->try_call(method, args); 86 | } 87 | 88 | void MaxModelDownloader::fill_dict(void* dict_to_fill) { 89 | if (dict_to_fill == nullptr) { 90 | throw "dict is empty"; 91 | } 92 | min::dict* max_dict = static_cast(dict_to_fill); 93 | if (!max_dict->valid()) { 94 | throw "dict is invalid"; 95 | } 96 | auto json_models = d_available_models; 97 | nn_tools::fill_dict_with_json(max_dict, json_models); 98 | } -------------------------------------------------------------------------------- /src/frontend/maxmsp/mcs.nn_tilde/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The Min-DevKit Authors. All rights reserved. 2 | # Use of this source code is governed by the MIT License found in the License.md file. 3 | 4 | cmake_minimum_required(VERSION 3.10 FATAL_ERROR) 5 | 6 | set(CMAKE_CXX_STANDARD 20) 7 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 8 | set(CMAKE_CXX_EXTENSIONS OFF) 9 | 10 | set(C74_MIN_API_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../min-api) 11 | include(${C74_MIN_API_DIR}/script/min-pretarget.cmake) 12 | 13 | if (APPLE) 14 | set(CMAKE_OSX_DEPLOYMENT_TARGET "10.12") 15 | endif() 16 | 17 | 18 | ############################################################# 19 | # MAX EXTERNAL 20 | ############################################################# 21 | 22 | execute_process( 23 | COMMAND git describe --tags 24 | WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} 25 | OUTPUT_VARIABLE VERSION 26 | OUTPUT_STRIP_TRAILING_WHITESPACE 27 | ) 28 | message(${VERSION}) 29 | add_definitions(-DVERSION="${VERSION}") 30 | 31 | 32 | set( 33 | SOURCE_FILES 34 | mcs.nn_tilde.cpp 35 | ) 36 | 37 | add_library( 38 | ${PROJECT_NAME} 39 | MODULE 40 | ${SOURCE_FILES} 41 | ) 42 | 43 | 44 | include(${C74_MIN_API_DIR}/script/min-posttarget.cmake) 45 | 46 | if (MSVC) 47 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT") 48 | set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20) 49 | set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) 50 | target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_20) 51 | endif() 52 | 53 | include_directories( 54 | "${C74_INCLUDES}" 55 | "${CMAKE_CURRENT_SOURCE_DIR}/../shared" 56 | "${CMAKE_CURRENT_SOURCE_DIR}/../../shared" 57 | ) 58 | 59 | if (MSVC) 60 | include_directories(${VCPKG_INCLUDE_DIR}) 61 | link_directories(${VCPKG_LIB_DIR}) 62 | endif() 63 | 64 | 65 | target_link_libraries(${PROJECT_NAME} PRIVATE backend) 66 | 67 | if (UNIX) 68 | set(CONDA_ENV_PATH "${CMAKE_SOURCE_DIR}/../env") 69 | set(CURL_INCLUDE_DIR "${CONDA_ENV_PATH}/include") 70 | set(CURL_LIBRARY "${CONDA_ENV_PATH}/lib/libcurl.dylib") 71 | include_directories(${CURL_INCLUDE_DIR}) 72 | elseif(MSVC) 73 | set(VCPKG_PATH "${CMAKE_SOURCE_DIR}/../vcpkg") 74 | set(CURL_INCLUDE_DIR "${VCPKG_PATH}/packages/curl_x64-windows/include") 75 | set(CURL_LIBRARY "${VCPKG_PATH}/packages/curl_x64-windows/lib/libcurl.lib") 76 | endif() 77 | 78 | target_link_libraries(${PROJECT_NAME} PRIVATE nlohmann_json::nlohmann_json) 79 | target_link_libraries(${PROJECT_NAME} PRIVATE ${CURL_LIBRARY}) 80 | 81 | 82 | if (APPLE) # SEARCH FOR TORCH DYLIB IN THE LOADER FOLDER 83 | set_target_properties(${PROJECT_NAME} PROPERTIES 84 | BUILD_WITH_INSTALL_RPATH FALSE 85 | LINK_FLAGS "-Wl,-rpath,@loader_path/" 86 | ) 87 | endif() 88 | 89 | set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -g") 90 | 91 | if (APPLE) # COPY DYLIBS IN THE LOADER FOLDER 92 | 93 | add_custom_command( 94 | TARGET ${PROJECT_NAME} 95 | POST_BUILD 96 | COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/../env/ssl/cert.pem" "$" 97 | COMMAND ${CMAKE_SOURCE_DIR}/../env/bin/python ${CMAKE_SOURCE_DIR}/../install/dylib_fix.py -p "$" -o "${CMAKE_SOURCE_DIR}/support" -l "${torch_dir}/libtorch" "${CMAKE_BINARY_DIR}/_deps" "${CMAKE_SOURCE_DIR}/../env" "${HOMEBREW_PREFIX}" --sign_id "${SIGN_ID}" 98 | COMMENT "Fixing libraries, certificates, permissions, codesigning, quarantine" 99 | ) 100 | 101 | endif() 102 | 103 | -------------------------------------------------------------------------------- /src/frontend/maxmsp/nn.info/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The Min-DevKit Authors. All rights reserved. 2 | # Use of this source code is governed by the MIT License found in the License.md file. 3 | 4 | cmake_minimum_required(VERSION 3.10 FATAL_ERROR) 5 | 6 | set(CMAKE_CXX_STANDARD 20) 7 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 8 | set(CMAKE_CXX_EXTENSIONS OFF) 9 | 10 | set(C74_MIN_API_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../min-api) 11 | include(${C74_MIN_API_DIR}/script/min-pretarget.cmake) 12 | 13 | if (APPLE) 14 | set(CMAKE_OSX_DEPLOYMENT_TARGET "10.12") 15 | endif() 16 | 17 | 18 | ############################################################# 19 | # MAX EXTERNAL 20 | ############################################################# 21 | 22 | execute_process( 23 | COMMAND git describe --tags 24 | WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} 25 | OUTPUT_VARIABLE VERSION 26 | OUTPUT_STRIP_TRAILING_WHITESPACE 27 | ) 28 | message("version : ${VERSION}") 29 | add_definitions(-DVERSION="${VERSION}") 30 | 31 | 32 | 33 | set( 34 | SOURCE_FILES 35 | nn.info.cpp 36 | ) 37 | 38 | add_library( 39 | ${PROJECT_NAME} 40 | MODULE 41 | ${SOURCE_FILES} 42 | ) 43 | 44 | include_directories( 45 | "${C74_INCLUDES}", 46 | "${CMAKE_CURRENT_SOURCE_DIR}/../shared" 47 | "${CMAKE_CURRENT_SOURCE_DIR}/../../shared" 48 | ) 49 | 50 | if (MSVC) 51 | include_directories(${VCPKG_INCLUDE_DIR}) 52 | link_directories(${VCPKG_LIB_DIR}) 53 | endif() 54 | 55 | 56 | target_link_libraries(${PROJECT_NAME} PRIVATE backend) 57 | 58 | if (UNIX) 59 | set(CONDA_ENV_PATH "${CMAKE_SOURCE_DIR}/../env") 60 | set(CURL_INCLUDE_DIR "${CONDA_ENV_PATH}/include") 61 | set(CURL_LIBRARY "${CONDA_ENV_PATH}/lib/libcurl.dylib") 62 | include_directories(${CURL_INCLUDE_DIR}) 63 | elseif(MSVC) 64 | set(VCPKG_PATH "${CMAKE_SOURCE_DIR}/../vcpkg") 65 | set(CURL_INCLUDE_DIR "${VCPKG_PATH}/packages/curl_x64-windows/include") 66 | set(CURL_LIBRARY "${VCPKG_PATH}/packages/curl_x64-windows/lib/libcurl.lib") 67 | endif() 68 | 69 | target_link_libraries(${PROJECT_NAME} PRIVATE nlohmann_json::nlohmann_json) 70 | target_link_libraries(${PROJECT_NAME} PRIVATE ${CURL_LIBRARY}) 71 | 72 | 73 | 74 | include(${C74_MIN_API_DIR}/script/min-posttarget.cmake) 75 | 76 | if (MSVC) 77 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT") 78 | set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20) 79 | set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) 80 | target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_20) 81 | endif() 82 | 83 | 84 | if (APPLE) # SEARCH FOR TORCH DYLIB IN THE LOADER FOLDER 85 | set_target_properties(${PROJECT_NAME} PROPERTIES 86 | BUILD_WITH_INSTALL_RPATH FALSE 87 | LINK_FLAGS "-Wl,-rpath,@loader_path/" 88 | ) 89 | endif() 90 | 91 | 92 | 93 | 94 | if (APPLE) # COPY DYLIBS IN THE LOADER FOLDER 95 | 96 | add_custom_command( 97 | TARGET ${PROJECT_NAME} 98 | POST_BUILD 99 | COMMAND echo "signing with ${SIGN_ID}" 100 | COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/../env/ssl/cert.pem" "$" 101 | COMMAND ${CMAKE_SOURCE_DIR}/../env/bin/python ${CMAKE_SOURCE_DIR}/../install/dylib_fix.py -p "$" -o "${CMAKE_SOURCE_DIR}/support" -l "${torch_dir}/libtorch" "${CMAKE_BINARY_DIR}/_deps" "${CMAKE_SOURCE_DIR}/../env" "${HOMEBREW_PREFIX}" --sign_id "${SIGN_ID}" 102 | COMMENT "Fixing libraries, certificates, permissions, codesigning, quarantine" 103 | ) 104 | 105 | endif() 106 | -------------------------------------------------------------------------------- /src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10 FATAL_ERROR) 2 | project(nn_tilde) 3 | 4 | set(CMAKE_POSITION_INDEPENDENT_CODE ON) 5 | 6 | configure_file( 7 | "${CMAKE_SOURCE_DIR}/../install/max-linker-flags.txt" "${CMAKE_SOURCE_DIR}/frontend/maxmsp/min-api/max-sdk-base/script/max-linker-flags.txt" 8 | COPYONLY 9 | ) 10 | 11 | if (NOT DEFINED SIGN_ID) 12 | if (DEFINED $ENV{SIGN_ID}) 13 | set(SIGN_ID $ENV{SIGN_ID}) 14 | else() 15 | set(SIGN_ID "-") 16 | endif() 17 | endif() 18 | 19 | message("Copying ${CMAKE_SOURCE_DIR}/../install/MaxAPI.lib" "${CMAKE_SOURCE_DIR}/frontend/maxmsp/min-api/max-sdk-base/c74support/max-includes/x64" ) 20 | configure_file( 21 | "${CMAKE_SOURCE_DIR}/../install/MaxAPI.lib" "${CMAKE_SOURCE_DIR}/frontend/maxmsp/min-api/max-sdk-base/c74support/max-includes/x64/MaxAPI.lib" 22 | COPYONLY 23 | ) 24 | 25 | configure_file( 26 | "${CMAKE_SOURCE_DIR}/../install/patch_with_vst.sh" "${CMAKE_SOURCE_DIR}/extras" 27 | COPYONLY 28 | ) 29 | 30 | 31 | include(${CMAKE_SOURCE_DIR}/cmake/add_torch.cmake) 32 | list(PREPEND CMAKE_PREFIX_PATH "${torch_dir}/libtorch") 33 | find_package(Torch REQUIRED PATHS ${torch_dir}/libtorch/lib) 34 | 35 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") 36 | 37 | set(CONDA_ENV_PATH "${CMAKE_SOURCE_DIR}/../env") 38 | if (MSVC) 39 | set(CMAKE_PREFIX_PATH "${CMAKE_PREFIX_PATH}:${CONDA_ENV_PATH}") 40 | else() 41 | set(CMAKE_PREFIX_PATH "${CMAKE_PREFIX_PATH};${CONDA_ENV_PATH}") 42 | endif() 43 | 44 | 45 | if (APPLE) 46 | set(CMAKE_OSX_DEPLOYMENT_TARGET "10.12") 47 | endif() 48 | 49 | set(CMAKE_CXX_STANDARD 20) 50 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 51 | set(SET CMP00076) 52 | 53 | 54 | # import json 55 | add_subdirectory(json) 56 | 57 | if (UNIX) 58 | if (APPLE) 59 | add_compile_options(-std=c++20) 60 | set(CMAKE_CXX_FLAGS "-faligned-allocation") 61 | if (CMAKE_OSX_ARCHITECTURES STREQUAL "") 62 | set(CMAKE_OSX_ARCHITECTURES ${CMAKE_HOST_SYSTEM_PROCESSOR}) 63 | endif() 64 | message("Building for architecture : ${CMAKE_OSX_ARCHITECTURES} ") 65 | endif() 66 | endif() 67 | 68 | 69 | 70 | add_subdirectory(backend) # DEEP LEARNING BACKEND 71 | 72 | execute_process( 73 | COMMAND git describe --tags 74 | WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} 75 | OUTPUT_VARIABLE VERSION 76 | OUTPUT_STRIP_TRAILING_WHITESPACE 77 | ) 78 | 79 | if (NOT DEFINED NO_PUREDATA) 80 | set(NO_PUREDATA 0) 81 | endif() 82 | 83 | if (NO_PUREDATA EQUAL 0) 84 | if ("${PUREDATA_INCLUDE_DIR}" STREQUAL "") 85 | set(PUREDATA_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/pd_include") 86 | execute_process( 87 | COMMAND cmake -E make_directory "${PUREDATA_INCLUDE_DIR}" 88 | ) 89 | file(DOWNLOAD "https://raw.githubusercontent.com/pure-data/pure-data/master/src/m_pd.h" "${PUREDATA_INCLUDE_DIR}/m_pd.h") 90 | endif() 91 | add_subdirectory(frontend/puredata/nn_tilde) # PURE DATA EXTERNAL 92 | else() 93 | if (UNIX AND NOT APPLE) 94 | message(FATAL_ERROR "NO_PUREDATA needs to be off on Linux, otherwise no task are available") 95 | endif() 96 | endif() 97 | 98 | configure_file(${CMAKE_CURRENT_SOURCE_DIR}/../package-info.json.in "${CMAKE_CURRENT_SOURCE_DIR}/package-info.json") 99 | 100 | if(APPLE OR MSVC) 101 | # MAX MSP EXTERNAL 102 | add_subdirectory(frontend/maxmsp/nn.info) 103 | add_subdirectory(frontend/maxmsp/nn_tilde) 104 | add_subdirectory(frontend/maxmsp/mc.nn_tilde) 105 | add_subdirectory(frontend/maxmsp/mcs.nn_tilde) 106 | endif() 107 | -------------------------------------------------------------------------------- /src/frontend/maxmsp/nn_tilde/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The Min-DevKit Authors. All rights reserved. 2 | # Use of this source code is governed by the MIT License found in the License.md file. 3 | 4 | cmake_minimum_required(VERSION 3.10 FATAL_ERROR) 5 | 6 | set(CMAKE_CXX_STANDARD 20) 7 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 8 | set(CMAKE_CXX_EXTENSIONS OFF) 9 | 10 | set(C74_MIN_API_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../min-api) 11 | include(${C74_MIN_API_DIR}/script/min-pretarget.cmake) 12 | 13 | if (APPLE) 14 | set(CMAKE_OSX_DEPLOYMENT_TARGET "10.12") 15 | endif() 16 | 17 | 18 | ############################################################# 19 | # MAX EXTERNAL 20 | ############################################################# 21 | 22 | execute_process( 23 | COMMAND git describe --tags 24 | WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} 25 | OUTPUT_VARIABLE VERSION 26 | OUTPUT_STRIP_TRAILING_WHITESPACE 27 | ) 28 | message(${VERSION}) 29 | add_definitions(-DVERSION="${VERSION}") 30 | 31 | 32 | 33 | set( 34 | SOURCE_FILES 35 | nn_tilde.cpp 36 | ) 37 | 38 | add_library( 39 | ${PROJECT_NAME} 40 | MODULE 41 | ${SOURCE_FILES} 42 | ) 43 | 44 | 45 | include(${C74_MIN_API_DIR}/script/min-posttarget.cmake) 46 | 47 | if (MSVC) 48 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT") 49 | set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20) 50 | set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) 51 | target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_20) 52 | endif() 53 | 54 | include_directories( 55 | "${C74_INCLUDES}" 56 | "${CMAKE_CURRENT_SOURCE_DIR}/../shared" 57 | "${CMAKE_CURRENT_SOURCE_DIR}/../../shared" 58 | ) 59 | 60 | include_directories( 61 | "${MAX_SDK_INCLUDES}" 62 | "${MAX_SDK_MSP_INCLUDES}" 63 | "${MAX_SDK_JIT_INCLUDES}" 64 | ) 65 | 66 | if (MSVC) 67 | include_directories(${VCPKG_INCLUDE_DIR}) 68 | link_directories(${VCPKG_LIB_DIR}) 69 | endif() 70 | 71 | target_include_directories(${PROJECT_NAME} PRIVATE "${CMAKE_SOURCE_DIR}/frontend/maxmsp/min-api/max-sdk-base/c74support/max-includes") 72 | target_link_libraries(${PROJECT_NAME} PRIVATE backend) 73 | 74 | if (UNIX) 75 | set(CONDA_ENV_PATH "${CMAKE_SOURCE_DIR}/../env") 76 | set(CURL_INCLUDE_DIR "${CONDA_ENV_PATH}/include") 77 | set(CURL_LIBRARY "${CONDA_ENV_PATH}/lib/libcurl.dylib") 78 | include_directories(${CURL_INCLUDE_DIR}) 79 | elseif(MSVC) 80 | set(VCPKG_PATH "${CMAKE_SOURCE_DIR}/../vcpkg") 81 | set(CURL_INCLUDE_DIR "${VCPKG_PATH}/packages/curl_x64-windows/include") 82 | set(CURL_LIBRARY "${VCPKG_PATH}/packages/curl_x64-windows/lib/libcurl.lib") 83 | endif() 84 | 85 | target_link_libraries(${PROJECT_NAME} PRIVATE nlohmann_json::nlohmann_json) 86 | target_link_libraries(${PROJECT_NAME} PRIVATE ${CURL_LIBRARY}) 87 | 88 | 89 | if (APPLE) # SEARCH FOR TORCH DYLIB IN THE LOADER FOLDER 90 | set_target_properties(${PROJECT_NAME} PROPERTIES 91 | BUILD_WITH_INSTALL_RPATH FALSE 92 | # LINK_FLAGS "-Wl,-rpath,@loader_path/" 93 | LINK_FLAGS "-Wl" 94 | ) 95 | 96 | add_custom_command( 97 | TARGET ${PROJECT_NAME} 98 | POST_BUILD 99 | COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/../env/ssl/cert.pem" "$" 100 | COMMAND ${CMAKE_SOURCE_DIR}/../env/bin/python ${CMAKE_SOURCE_DIR}/../install/dylib_fix.py -p "$" -o "${CMAKE_SOURCE_DIR}/support" -l "${torch_dir}/libtorch" "${CMAKE_BINARY_DIR}/_deps" "${CMAKE_SOURCE_DIR}/../env" "${HOMEBREW_PREFIX}" --sign_id "${SIGN_ID}" 101 | COMMENT "Fixing libraries, certificates, permissions, codesigning, quarantine" 102 | ) 103 | endif() 104 | 105 | 106 | if (MSVC) 107 | set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20) 108 | endif() 109 | -------------------------------------------------------------------------------- /src/frontend/puredata/shared/pd_buffer_manager.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "m_pd.h" 3 | #include "../../../backend/backend.h" 4 | #include "../../../shared/static_buffer.h" 5 | 6 | #ifndef CHECK_BUFFERS_INTERVAL 7 | #define CHECK_BUFFERS_INTERVAL 100 8 | #endif 9 | 10 | 11 | template 12 | class PdBufferManager { 13 | 14 | Backend *m_backend; 15 | pd_class *m_obj; 16 | t_clock *m_clock; 17 | std::vector m_buffer_attributes; 18 | std::unordered_map m_array_to_buffer; 19 | double m_sample_rate; 20 | 21 | public: 22 | bool m_monitor_arrays; 23 | 24 | PdBufferManager(Backend *backend, pd_class* nn_obj): 25 | m_backend(backend), m_obj(nn_obj) 26 | { } 27 | 28 | void init_buffer_list(Backend *backend = nullptr) { 29 | if (backend != nullptr) { 30 | m_backend = backend; 31 | } 32 | // clear previous buffers 33 | m_buffer_attributes.clear(); 34 | // init model buffers 35 | std::vector model_buffers; 36 | try { 37 | model_buffers = m_backend->get_buffer_attributes(); 38 | } catch (std::exception &e) { 39 | throw std::string("could not retrieve buffers from model. Caught error : ") + e.what(); 40 | } 41 | // if (!model_buffers.size()) { 42 | // std::cout << "no buffers found" << std::endl; 43 | // } 44 | // create buffer references for each of model buffers 45 | for (auto & element : model_buffers) { 46 | // std::cout << "adding buffer " << element << " to buffer manager."; 47 | m_buffer_attributes.push_back(element); 48 | post("nn~: %s", element.c_str()); 49 | } 50 | } 51 | 52 | void add_array_monitor(const std::string target_pd_buffer, const std::string buffer_name) { 53 | t_garray *garray; 54 | if (!(garray = (t_garray *)pd_findbyclass(gensym(target_pd_buffer.c_str()), garray_class))) { 55 | throw std::string("table " + buffer_name + " not found"); 56 | } 57 | m_array_to_buffer[target_pd_buffer] = buffer_name; 58 | } 59 | 60 | template 61 | StaticBuffer static_buffer_from_name(const std::string buffer_name) { 62 | t_garray *garray; 63 | if (!(garray = (t_garray *)pd_findbyclass(gensym(buffer_name.c_str()), garray_class))) { 64 | throw std::string("table " + buffer_name + " not found"); 65 | } 66 | t_word *table_data; 67 | int table_size; 68 | if (!(garray_getfloatwords(garray, &table_size, &table_data))) { 69 | throw std::string("could not access table ") + std::string(buffer_name); 70 | } 71 | auto buffer = StaticBuffer(1, table_size, (double)sys_getsr()); 72 | for (int i(0); i < table_size; ++i) 73 | buffer.put(static_cast((table_data + i)->w_float), 0, i); 74 | return buffer; 75 | } 76 | 77 | template 78 | void append_if_buffer_element(Backend::BufferMap &buffers, std::string target_pd_buffer, std::string attribute_name, int index) { 79 | if (m_backend->is_buffer_element_of_attribute(attribute_name, index)) { 80 | auto buffer_name = m_backend->get_buffer_name(attribute_name, index); 81 | buffers[buffer_name] = static_buffer_from_name(target_pd_buffer); 82 | } else { 83 | std::ostringstream error; 84 | // error << std::to_string(index) << "th element of attribute " << attribute_name << "does not seem to be a buffer"; 85 | throw error.str(); 86 | } 87 | } 88 | 89 | auto get_buffer_attributes() { return m_buffer_attributes; } 90 | 91 | }; -------------------------------------------------------------------------------- /src/backend/backend.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include "../shared/static_buffer.h" 6 | #include 7 | #include 8 | 9 | 10 | 11 | struct MethodProperties { 12 | std::string name = ""; 13 | int channels_in = -1; 14 | int channels_out = -1; 15 | int ratio_in = -1; 16 | int ratio_out = -1; 17 | }; 18 | 19 | struct AttributeProperties { 20 | std::string name = ""; 21 | std::vector attribute_types = {}; 22 | }; 23 | 24 | struct ModelInfo { 25 | using MethodDict = std::unordered_map; 26 | using AttributeDict = std::unordered_map; 27 | MethodDict method_properties = {}; 28 | AttributeDict attribute_properties = {}; 29 | }; 30 | 31 | struct LockedModel { 32 | torch::jit::script::Module* model; 33 | std::mutex mutex; 34 | }; 35 | 36 | 37 | class Backend { 38 | 39 | protected: 40 | torch::jit::script::Module m_model; 41 | int m_loaded, m_in_dim, m_in_ratio, m_out_dim, m_out_ratio; 42 | std::string m_path; 43 | std::mutex m_model_mutex; 44 | std::vector m_available_methods; 45 | std::vector m_buffer_attributes; 46 | c10::DeviceType m_device; 47 | bool m_use_gpu; 48 | std::vector retrieve_buffer_attributes(); 49 | std::unique_ptr set_attribute_thread; 50 | double m_sr; 51 | 52 | public: 53 | using DataType = float; 54 | using ArgsType = std::vector; 55 | using KwargsType = std::unordered_map; 56 | using BufferMap = std::map>; 57 | 58 | Backend(); 59 | void perform(std::vector &in_buffer, 60 | std::vector &out_buffer, 61 | std::string method, 62 | int n_batches, int n_out_channels, int n_vec); 63 | bool has_method(std::string method_name); 64 | bool has_settable_attribute(std::string attribute); 65 | std::vector get_available_methods(LockedModel *model = nullptr); 66 | std::vector get_available_attributes(); 67 | std::vector get_settable_attributes(); 68 | std::vector get_attribute(std::string attribute_name); 69 | std::string get_attribute_as_string(std::string attribute_name); 70 | void set_attribute(std::string attribute_name, 71 | std::vector attribute_args, 72 | const Backend::BufferMap &buffer_array); 73 | 74 | // buffer attributes 75 | bool is_buffer_element_of_attribute(std::string attribute_name, int attribute_elt_idx); 76 | bool is_tensor_element_of_attribute(std::string attribute_name, int attribute_elt_idx); 77 | // auto get_buffer_attribtues() { return m_buffer_attributes; } 78 | std::string get_buffer_name(std::string attribute_name, int attribute_elt_idx); 79 | int update_buffer(std::string buffer_id, StaticBuffer &buffer); 80 | int reset_buffer(std::string); 81 | 82 | std::vector get_method_params(std::string method); 83 | int get_higher_ratio(); 84 | int load(std::string path, double sampleRate, const std::string* target_method = nullptr); 85 | int reload(); 86 | void set_sample_rate(double sampleRate); 87 | bool is_loaded(); 88 | torch::jit::script::Module get_model() { return m_model; } 89 | void use_gpu(bool value); 90 | std::vector get_buffer_attributes(); 91 | 92 | ArgsType empty_args() { return ArgsType(); } 93 | KwargsType empty_kwargs() { return KwargsType(); } 94 | std::pair empty_inputs() { 95 | return std::make_pair(empty_args(), empty_kwargs()); 96 | } 97 | 98 | ModelInfo get_model_info(); 99 | const std::unordered_map id_to_string_hash = { 100 | {0, "bool"}, 101 | {1, "int"}, 102 | {2, "float"}, 103 | {3, "string"}, 104 | {4, "tensor"}, 105 | {5, "buffer"} 106 | }; 107 | }; 108 | -------------------------------------------------------------------------------- /install/patch_with_vst.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Default values 4 | pd=0 5 | max=0 6 | 7 | function print_help() { 8 | echo "Usage: $0 [--pd_path[=val]] (default: ~/Documents/Pd) [--max[=val]] (by default, look in all ~/Documents/Max X/ ; if specified, look for externals sub-folder)" 9 | } 10 | 11 | # Parse arguments 12 | while [[ $# -gt 0 ]]; do 13 | case "$1" in 14 | --pd=*) 15 | pd=1 16 | pd_path="${1#*=}" 17 | shift 18 | ;; 19 | --pd) 20 | pd=1 21 | shift 22 | ;; 23 | --max=*) 24 | max=1 25 | max_path="${1#*=}" 26 | shift 27 | ;; 28 | --max) 29 | max=1 30 | shift 31 | ;; 32 | --help|-h) 33 | print_help 34 | exit 0 35 | ;; 36 | *) 37 | echo "Unknown option: $1" 38 | shift 39 | ;; 40 | esac 41 | done 42 | 43 | 44 | function patch_max_external() { 45 | find "$1" -name "nn_tilde" -type d -mindepth 2 -print0 | while IFS= read -r -d '' ext_dir; do 46 | if [[ -d "$ext_dir/externals" ]]; then 47 | echo "found nn_tilde at $ext_dir"; 48 | find "$ext_dir/externals" -name "*.mxo" -print0 | while IFS= read -r -d '' ext_path; do 49 | # echo "found external at $ext_path"; 50 | ext_name=$(basename "$ext_path") 51 | find "$ext_dir/support" -name "*.dylib" -print0 | while IFS= read -r -d '' dylib_path; do 52 | dylib_name=$(basename "$dylib_path") 53 | echo "fixing library : $dylib_name" 54 | new_path="/Library/Application Support/ACIDS/RAVE/$dylib_name" 55 | if [[ ! -e "$new_path" ]]; then 56 | echo "[WARNING] library not found : $new_path. Patch may not work" 57 | fi 58 | install_name_tool -change "@loader_path/../../../../support/$dylib_name" "/Library/Application Support/ACIDS/RAVE/$dylib_name" "${ext_path}/Contents/MacOS/${ext_name%.*}" 2> /dev/null 59 | done 60 | codesign --deep --force --sign - "${ext_path}/Contents/MacOS/${ext_name%.*}" 61 | done 62 | fi 63 | done 64 | } 65 | 66 | function patch_pd_external() { 67 | ext_dir=$1 68 | if [[ ! "$(basename $ext_dir)" == "nn_tilde" ]]; then 69 | ext_dir="${ext_dir}/externals/nn_tilde" 70 | fi 71 | if [[ -e $(realpath $ext_dir) ]]; then 72 | find "$ext_dir" -name "nn~.pd_*" -print0 | while IFS= read -r -d '' ext_path; do 73 | ext_name=$(basename "$ext_path") 74 | find "$ext_dir" -name "*.dylib" -print0 | while IFS= read -r -d '' dylib_path; do 75 | dylib_name=$(basename "$dylib_path") 76 | echo "fixing library : $dylib_name" 77 | new_path="/Library/Application Support/ACIDS/RAVE/$dylib_name" 78 | if [[ ! -e "$new_path" ]]; then 79 | echo "[WARNING] library not found : $new_path. Patch may not work" 80 | fi 81 | echo install_name_tool -change "@rpath/$dylib_name" "/Library/Application Support/ACIDS/RAVE/$dylib_name" "${ext_path}" 2> /dev/null 82 | done 83 | codesign --deep --force --sign - "${ext_path}" 84 | done 85 | else 86 | echo "folder $ext_dir not found". 87 | fi 88 | } 89 | 90 | 91 | if [[ "$max" -eq 1 ]]; then 92 | if [[ -n "$max_path" ]]; then 93 | patch_max_external "$max_path" 94 | else 95 | find ~/Documents -maxdepth 1 -name "Max *" -type d -print0 | while IFS= read -r -d '' max_dir; do 96 | if [[ -d $max_dir ]]; then 97 | patch_max_external "$max_dir" 98 | else 99 | echo "$max_dir does not exists" 100 | fi 101 | done 102 | fi 103 | fi 104 | 105 | if [[ "$pd" -eq 1 ]]; then 106 | if [[ -n "$max_path" ]]; then 107 | patch_pd_external "$pd_path" 108 | else 109 | patch_pd_external "$HOME/Documents/Pd" 110 | fi 111 | fi -------------------------------------------------------------------------------- /src/extras/patch_with_vst.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Default values 4 | pd=0 5 | max=0 6 | 7 | function print_help() { 8 | echo "Usage: $0 [--pd_path[=val]] (default: ~/Documents/Pd) [--max[=val]] (by default, look in all ~/Documents/Max X/ ; if specified, look for externals sub-folder)" 9 | } 10 | 11 | # Parse arguments 12 | while [[ $# -gt 0 ]]; do 13 | case "$1" in 14 | --pd=*) 15 | pd=1 16 | pd_path="${1#*=}" 17 | shift 18 | ;; 19 | --pd) 20 | pd=1 21 | shift 22 | ;; 23 | --max=*) 24 | max=1 25 | max_path="${1#*=}" 26 | shift 27 | ;; 28 | --max) 29 | max=1 30 | shift 31 | ;; 32 | --help|-h) 33 | print_help 34 | exit 0 35 | ;; 36 | *) 37 | echo "Unknown option: $1" 38 | shift 39 | ;; 40 | esac 41 | done 42 | 43 | 44 | function patch_max_external() { 45 | find "$1" -name "nn_tilde" -type d -mindepth 2 -print0 | while IFS= read -r -d '' ext_dir; do 46 | if [[ -d "$ext_dir/externals" ]]; then 47 | echo "found nn_tilde at $ext_dir"; 48 | find "$ext_dir/externals" -name "*.mxo" -print0 | while IFS= read -r -d '' ext_path; do 49 | # echo "found external at $ext_path"; 50 | ext_name=$(basename "$ext_path") 51 | find "$ext_dir/support" -name "*.dylib" -print0 | while IFS= read -r -d '' dylib_path; do 52 | dylib_name=$(basename "$dylib_path") 53 | echo "fixing library : $dylib_name" 54 | new_path="/Library/Application Support/ACIDS/RAVE/$dylib_name" 55 | if [[ ! -e "$new_path" ]]; then 56 | echo "[WARNING] library not found : $new_path. Patch may not work" 57 | fi 58 | install_name_tool -change "@loader_path/../../../../support/$dylib_name" "/Library/Application Support/ACIDS/RAVE/$dylib_name" "${ext_path}/Contents/MacOS/${ext_name%.*}" 2> /dev/null 59 | done 60 | codesign --deep --force --sign - "${ext_path}/Contents/MacOS/${ext_name%.*}" 61 | done 62 | fi 63 | done 64 | } 65 | 66 | function patch_pd_external() { 67 | ext_dir=$1 68 | if [[ ! "$(basename $ext_dir)" == "nn_tilde" ]]; then 69 | ext_dir="${ext_dir}/externals/nn_tilde" 70 | fi 71 | if [[ -e $(realpath $ext_dir) ]]; then 72 | find "$ext_dir" -name "nn~.pd_*" -print0 | while IFS= read -r -d '' ext_path; do 73 | ext_name=$(basename "$ext_path") 74 | find "$ext_dir" -name "*.dylib" -print0 | while IFS= read -r -d '' dylib_path; do 75 | dylib_name=$(basename "$dylib_path") 76 | echo "fixing library : $dylib_name" 77 | new_path="/Library/Application Support/ACIDS/RAVE/$dylib_name" 78 | if [[ ! -e "$new_path" ]]; then 79 | echo "[WARNING] library not found : $new_path. Patch may not work" 80 | fi 81 | echo install_name_tool -change "@rpath/$dylib_name" "/Library/Application Support/ACIDS/RAVE/$dylib_name" "${ext_path}" 2> /dev/null 82 | done 83 | codesign --deep --force --sign - "${ext_path}" 84 | done 85 | else 86 | echo "folder $ext_dir not found". 87 | fi 88 | } 89 | 90 | 91 | if [[ "$max" -eq 1 ]]; then 92 | if [[ -n "$max_path" ]]; then 93 | patch_max_external "$max_path" 94 | else 95 | find ~/Documents -maxdepth 1 -name "Max *" -type d -print0 | while IFS= read -r -d '' max_dir; do 96 | if [[ -d $max_dir ]]; then 97 | patch_max_external "$max_dir" 98 | else 99 | echo "$max_dir does not exists" 100 | fi 101 | done 102 | fi 103 | fi 104 | 105 | if [[ "$pd" -eq 1 ]]; then 106 | if [[ -n "$max_path" ]]; then 107 | patch_pd_external "$pd_path" 108 | else 109 | patch_pd_external "$HOME/Documents/Pd" 110 | fi 111 | fi -------------------------------------------------------------------------------- /src/frontend/maxmsp/shared/array_tools.h: -------------------------------------------------------------------------------- 1 | #include "ext.h" // standard Max include, always required 2 | #include "ext_obex.h" // required for new style Max object 3 | #include "ext_atomarray.h" // atomarrays 4 | 5 | #include "../../../shared/static_buffer.h" 6 | 7 | #include 8 | 9 | 10 | namespace ArrayTools { 11 | 12 | namespace min = c74::min; 13 | namespace max = c74::max; 14 | 15 | extern "C" max::t_atomarray* arrayobj_findregistered_retain(max::t_symbol* name); 16 | extern "C" max::t_max_err arrayobj_release(max::t_atomarray* aa); 17 | // extern void atomarray_dispose(max::t_atomarray* x); 18 | // extern max::t_atomarray* arrayobj_register(max::t_atomarray* aa, max::t_symbol** name); 19 | // extern max::t_max_err arrayobj_unregister(max::t_atomarray* aa); 20 | // extern max::t_atomarray* arrayobj_findregistered_clone(max::t_symbol* name); 21 | // extern max::t_symbol* arrayobj_namefromptr(max::t_atomarray* aa); 22 | // extern void* max::outlet_array(max::t_outlet* x, max::t_symbol* s); 23 | 24 | 25 | bool is_array(const min::atom &atom) { 26 | bool result = false; 27 | auto name_max = max::atom_getsym(&atom); 28 | max::t_atomarray* aa = arrayobj_findregistered_retain(name_max); 29 | if (aa) { 30 | arrayobj_release(aa); 31 | return true; 32 | } 33 | return false; 34 | } 35 | 36 | long get_length(const min::atom &atom) { 37 | bool result = false; 38 | auto name_max = max::atom_getsym(&atom); 39 | max::t_atomarray* aa = arrayobj_findregistered_retain(name_max); 40 | if (aa) { 41 | long size = atomarray_getsize(aa); 42 | arrayobj_release(aa); 43 | return size; 44 | } 45 | return -1; 46 | } 47 | 48 | void fill_long_vector(std::vector &array, const min::atom &atom) { 49 | auto name_max = max::atom_getsym(&atom); 50 | max::t_atomarray* aa = arrayobj_findregistered_retain(name_max); 51 | if (aa) { 52 | if (array.size() != 0) { 53 | throw "array not empty"; 54 | } 55 | max::t_atomarray* clone = (max::t_atomarray*)object_clone((max::t_object*)aa); // CLONE, do not potentially modify upstream data 56 | max::t_atom atom_elt; 57 | for (long i = 0; i < max::atomarray_getsize(clone); i++) { 58 | max::atomarray_getindex(clone, i, &atom_elt); 59 | array.emplace_back(max::atom_getlong(&atom_elt)); 60 | } 61 | arrayobj_release(aa); 62 | max::atomarray_clear(clone); 63 | max::object_free(clone); 64 | } else { 65 | throw "could not create array"; 66 | } 67 | } 68 | 69 | void fill_float_vector(std::vector &array, const min::atom &atom) { 70 | auto name_max = max::atom_getsym(&atom); 71 | max::t_atomarray* aa = arrayobj_findregistered_retain(name_max); 72 | if (aa) { 73 | if (array.size() != 0) { 74 | throw "array not empty"; 75 | } 76 | max::t_atomarray* clone = (max::t_atomarray*)object_clone((max::t_object*)aa); // CLONE, do not potentially modify upstream data 77 | max::t_atom atom_elt; 78 | for (long i = 0; i < max::atomarray_getsize(clone); i++) { 79 | max::atomarray_getindex(clone, i, &atom_elt); 80 | array.emplace_back(max::atom_getfloat(&atom_elt)); 81 | } 82 | arrayobj_release(aa); 83 | max::atomarray_clear(clone); 84 | max::object_free(clone); 85 | } else { 86 | throw "could not create array"; 87 | } 88 | } 89 | 90 | StaticBuffer static_buffer_from_array(const min::atom &atom) { 91 | auto name_max = max::atom_getsym(&atom); 92 | max::t_atomarray* aa = arrayobj_findregistered_retain(name_max); 93 | if (aa) { 94 | max::t_atomarray* clone = (max::t_atomarray*)object_clone((max::t_object*)aa); // CLONE, do not potentially modify upstream data 95 | long array_size = max::atomarray_getsize(clone); 96 | StaticBuffer out_buffer(1, array_size); 97 | max::t_atom atom_elt; 98 | for (long i = 0; i < max::atomarray_getsize(clone); i++) { 99 | max::atomarray_getindex(clone, i, &atom_elt); 100 | out_buffer.put(max::atom_getfloat(&atom_elt), 0, i); 101 | } 102 | arrayobj_release(aa); 103 | max::atomarray_clear(clone); 104 | max::object_free(clone); 105 | return out_buffer; 106 | } else { 107 | throw std::string("could not create array"); 108 | } 109 | } 110 | 111 | } -------------------------------------------------------------------------------- /src/frontend/maxmsp/shared/dict_utils.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "c74_min.h" 3 | #include 4 | #include "../../../backend/backend.h" 5 | 6 | namespace nn_tools { 7 | 8 | namespace min = c74::min; 9 | namespace max = c74::max; 10 | 11 | void append_to_dictionary(max::t_dictionary* d, max::t_symbol* key, max::t_dictionary* value) { 12 | auto parsed_value = reinterpret_cast(value); 13 | max::dictionary_appenddictionary(d, key, parsed_value); 14 | } 15 | 16 | min::dict dict_from_model_info(const ModelInfo & info) { 17 | auto str = std::stringstream(); 18 | auto new_dict = max::dictionary_new(); 19 | 20 | // // append methods 21 | std::vector method_names {}; 22 | auto method_dict = max::dictionary_new(); 23 | for (auto method_pair: info.method_properties) { 24 | method_names.push_back(method_pair.first); 25 | auto current_method_dict = max::dictionary_new(); 26 | auto method_props = method_pair.second; 27 | max::dictionary_appendlong(current_method_dict, min::symbol("channels_in"), (long)method_props.channels_in); 28 | max::dictionary_appendlong(current_method_dict, min::symbol("channels_out"), (long)method_props.channels_out); 29 | max::dictionary_appendlong(current_method_dict, min::symbol("ratio_in"), (long)method_props.ratio_in); 30 | max::dictionary_appendlong(current_method_dict, min::symbol("ratio_out"), (long)method_props.ratio_out); 31 | append_to_dictionary(method_dict, min::symbol(method_pair.first), current_method_dict); 32 | } 33 | append_to_dictionary(new_dict, min::symbol("methods"), method_dict); 34 | 35 | // // append methods 36 | std::vector attribute_names {}; 37 | auto attribute_dict = max::dictionary_new(); 38 | for (auto attribute_pair: info.attribute_properties) { 39 | attribute_names.push_back(attribute_pair.first); 40 | auto current_attribute_dict = max::dictionary_new(); 41 | auto attr_types = attribute_pair.second.attribute_types; 42 | min::atoms attr_types_atoms(attr_types.size()); 43 | std::transform(attr_types.begin(), attr_types.end(), attr_types_atoms.begin(), 44 | [](const std::string& str) { 45 | return min::atom(str); 46 | }); 47 | max::dictionary_appendatoms(current_attribute_dict, min::symbol("attribute_type"), attr_types_atoms.size(), attr_types_atoms.data()); 48 | append_to_dictionary(attribute_dict, min::symbol(attribute_pair.first), current_attribute_dict); 49 | } 50 | append_to_dictionary(new_dict, min::symbol("attributes"), attribute_dict); 51 | auto out_dict = min::dict(new_dict); 52 | return out_dict; 53 | } 54 | 55 | void json_walk(max::t_dictionary* dict, nlohmann::json json) { 56 | for (auto& el : json.items()) { 57 | // std::cout << "key: " << el.key() << ", value:" << el.value() << '\n'; 58 | min::symbol key = el.key(); 59 | auto val = el.value(); 60 | if (val.is_null()) { 61 | } else if ((json.is_boolean())||(json.is_number_integer())||(json.is_number_unsigned())) { 62 | max::dictionary_appendlong(dict, key, val.get()); 63 | } else if (val.is_number_float()) { 64 | max::dictionary_appendfloat(dict, key, val.get()); 65 | } else if (val.is_string()) { 66 | max::dictionary_appendsym(dict, key, min::symbol(val.get())); 67 | } else if (val.is_array()) { 68 | std::vector atoms; 69 | for (const auto& v: val){ 70 | if ((v.is_boolean())||(v.is_number_integer())||(v.is_number_unsigned())) { 71 | atoms.emplace_back(v.get()); 72 | } else if (v.is_number_float()) { 73 | atoms.emplace_back(v.get()); 74 | } else if (v.is_string()) { 75 | atoms.emplace_back(v.get()); 76 | } 77 | } 78 | max::dictionary_appendatoms(dict, key, atoms.size(), atoms.data()); 79 | } else if (val.is_object()) { 80 | auto sub_dict = max::dictionary_new(); 81 | json_walk(sub_dict, val); 82 | append_to_dictionary(dict, key, sub_dict); 83 | } else { 84 | std::cerr << "Unknown type" << std::endl; 85 | } 86 | } 87 | 88 | } 89 | 90 | void fill_dict_with_json(min::dict* dict_to_fill, nlohmann::json json) { 91 | auto global_dict = max::dictionary_new(); 92 | json_walk(global_dict, json); 93 | auto min_dict = min::dict(global_dict); 94 | dict_to_fill->copyunique(min_dict); 95 | } 96 | } 97 | 98 | -------------------------------------------------------------------------------- /python_tools/test/test_attributes.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | import torch 3 | import pytest 4 | from pathlib import Path 5 | 6 | sys.path.append(str(Path(__file__).parent / ".." / "..")) 7 | from python_tools import Module, TYPE_HASH, Buffer 8 | from types import MethodType 9 | from typing import NoReturn 10 | from utils import out_dir, test_name, import_code 11 | 12 | 13 | class AttributeFoo(Module): 14 | def __init__(self): 15 | super().__init__() 16 | self.register_attribute("attr_int", 0) 17 | self.register_attribute("attr_float", 0.) 18 | self.register_attribute("attr_str", "apple") 19 | self.register_attribute("attr_bool", False) 20 | self.register_method("forward", 1, 1, 2, 1, test_method=False) 21 | self.finish() 22 | 23 | @torch.jit.export 24 | def forward(self, x: torch.Tensor): 25 | x = torch.zeros(x.shape[:-2] + (2, x.shape[-1])) 26 | x[..., 0, :] = self.attr_int[0] 27 | x[..., 1, :] = self.attr_float[0] 28 | return x 29 | 30 | 31 | class ListAttributeFoo(Module): 32 | def __init__(self, n: int, attribute_type: type): 33 | super().__init__() 34 | self.n = n 35 | if attribute_type == torch.Tensor: 36 | attr = tuple([torch.tensor(i) for i in range(n)]) 37 | else: 38 | attr = tuple([attribute_type(i) for i in range(n)]) 39 | self.register_attribute("attr", attr) 40 | self.register_method("forward", 1, 1, n, 1, output_labels=["attribute %d"%i for i in range(len(attr))]) 41 | self.finish() 42 | 43 | def forward(self, x): 44 | out = torch.zeros(x.shape[:1] + (len(self.attr),) + x.shape[2:]) 45 | for i, val in enumerate(self.attr): 46 | out[..., i, :] = float(val) 47 | return out 48 | 49 | def _default(attr_hash: int): 50 | type_hash_r = {v: k for k, v in TYPE_HASH.items()} 51 | attr_hash = type_hash_r[int(attr_hash)] 52 | if attr_hash in [bool, int, float, str]: 53 | return attr_hash(1) 54 | elif attr_hash in [torch.Tensor]: 55 | return torch.tensor(0) 56 | elif attr_hash in [Buffer]: 57 | return (torch.tensor(0), 44100) 58 | else: 59 | raise TypeError('type not known') 60 | 61 | 62 | class TensorAttributeFoo(Module): 63 | def __init__(self): 64 | super().__init__() 65 | self.register_attribute("a", torch.zeros(4)) 66 | self.register_method("forward", 1, 1, 4, 1, test_method=False) 67 | self.finish() 68 | 69 | @torch.jit.export 70 | def forward(self, x: torch.Tensor): 71 | out = torch.zeros(x.shape[0], 4, x.shape[2]) 72 | for i in range(4): 73 | out[:, i] = self.a[0][None, i] 74 | return out 75 | 76 | 77 | @pytest.mark.parametrize('module_class', [AttributeFoo]) 78 | def test_attributes(module_class, out_dir, test_name): 79 | module = module_class() 80 | for attr_name in module.get_attributes().value: 81 | # test getter 82 | attr_params = getattr(module, f"{attr_name}_params") 83 | getattr(module, f"get_{attr_name}")() 84 | # test setter 85 | getattr(module, f"set_{attr_name}")(_default(attr_params[0])) 86 | module(torch.zeros(1, 1, 16)) 87 | scripted = torch.jit.script(module) 88 | torch.jit.save(scripted, out_dir/f"{test_name}.ts") 89 | 90 | 91 | @pytest.mark.parametrize('n', [1, 4]) 92 | @pytest.mark.parametrize('attr_type', [str, bool, int, float]) 93 | @pytest.mark.parametrize('module_class', [ListAttributeFoo]) 94 | def test_list_attributes(n, attr_type, module_class, out_dir, test_name): 95 | module = module_class(n, attr_type) 96 | module.get_attr() 97 | module.set_attr(*([_default(module.attr_params[0])] * n)) 98 | module(torch.zeros(1, 1, 16)) 99 | scripted = torch.jit.script(module) 100 | torch.jit.save(scripted, out_dir/f"{test_name}.ts") 101 | 102 | 103 | 104 | @pytest.mark.parametrize('n', [1, 4]) 105 | @pytest.mark.parametrize('attr_type', [str, bool, int, float]) 106 | @pytest.mark.parametrize('module_class', [ListAttributeFoo]) 107 | def test_list_attributes(n, attr_type, module_class, out_dir, test_name): 108 | module = module_class(n, attr_type) 109 | module.get_attr() 110 | module.set_attr(*([_default(module.attr_params[0])] * n)) 111 | module(torch.zeros(1, 1, 16)) 112 | scripted = torch.jit.script(module) 113 | torch.jit.save(scripted, out_dir/f"{test_name}.ts") 114 | 115 | 116 | @pytest.mark.parametrize('module_class', [TensorAttributeFoo]) 117 | def test_tensor_attributes(module_class, out_dir, test_name): 118 | module = module_class() 119 | target_attr = torch.Tensor([1,2,3,4]) 120 | module.set_a(torch.Tensor(target_attr)) 121 | out = module.get_a()[0] 122 | assert out.eq(target_attr).all() 123 | module(torch.zeros(1, 1, 16)) 124 | scripted = torch.jit.script(module) 125 | torch.jit.save(scripted, out_dir/f"{test_name}.ts") 126 | -------------------------------------------------------------------------------- /src/cmake/add_torch.cmake: -------------------------------------------------------------------------------- 1 | set(torch_dir ${CMAKE_CURRENT_BINARY_DIR}/../torch) 2 | set(torch_lib_name torch) 3 | 4 | message("first looking for lib in : ${torch_dir}") 5 | find_library(torch_lib 6 | NAMES ${torch_lib_name} 7 | PATHS ${torch_dir}/libtorch/lib 8 | ) 9 | 10 | function (download_library url out) 11 | message("download ${url} to ${out}...") 12 | file(DOWNLOAD 13 | ${url} 14 | ${out}/torch_cc.zip 15 | SHOW_PROGRESS 16 | ) 17 | execute_process(COMMAND ${CMAKE_COMMAND} -E tar -xf torch_cc.zip 18 | COMMAND remove -f ${out}/torch_cc.zip 19 | WORKING_DIRECTORY ${out}) 20 | endfunction() 21 | 22 | if (DEFINED torch_version) 23 | message("setting torch version : ${torch_version}") 24 | else() 25 | set(torch_version "2.5.1") 26 | message("torch version : ${torch_version}") 27 | endif() 28 | 29 | if (NOT torch_lib) 30 | set(NEEDS_DL TRUE) 31 | else() 32 | set(NEEDS_DL FALSE) 33 | if (UNIX AND NOT APPLE) 34 | if (torch_lib STREQUAL "/usr/lib/libtorch.so") 35 | set(NEEDS_DL TRUE) 36 | endif() 37 | endif() 38 | endif() 39 | 40 | 41 | if (NEEDS_DL) 42 | message(STATUS "Downloading torch C API pre-built") 43 | # Download 44 | if (UNIX AND NOT APPLE) # Linux 45 | set(torch_url "https://download.pytorch.org/libtorch/cpu/libtorch-shared-with-deps-${torch_version}%2Bcpu.zip") 46 | download_library(${torch_url} ${torch_dir}) 47 | elseif (UNIX AND APPLE) # OSX 48 | if (NOT IS_DIRECTORY ${torch_dir}) 49 | if (EXISTS ${CMAKE_SOURCE_DIR}/../install/torch_ub) 50 | execute_process(COMMAND cp -r ${CMAKE_SOURCE_DIR}/../install/torch_ub ${torch_dir}) 51 | elseif(DEFINED TORCH_MAC_UB_URL) 52 | download_library(${TORCH_MAC_UB_URL} /tmp) 53 | execute_process( 54 | COMMAND mkdir -p ${torch_dir} 55 | COMMAND echo $(ls /tmp) 56 | COMMAND mv /tmp/torch ${torch_dir}/libtorch 57 | ) 58 | else() 59 | execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${torch_dir}) 60 | set(torch_url "https://download.pytorch.org/libtorch/cpu/libtorch-macos-arm64-${torch_version}.zip") 61 | download_library(${torch_url} ${torch_dir}) 62 | endif() 63 | endif() 64 | else() 65 | 66 | execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${torch_dir}) 67 | download_library("https://download.pytorch.org/libtorch/cpu/libtorch-win-shared-with-deps-${torch_version}%2Bcpu.zip" ${torch_dir}) 68 | endif() 69 | # Check if architecutre == ARM64 70 | # if (NOT DEFINED APPLE_ARM64) 71 | # set (APPLE_ARM64 (CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")) 72 | # endif() 73 | ## If ARM, download both libraries and pre-compile UB libraries 74 | # if (APPLE_ARM64) 75 | # download arm64 library 76 | # if (NOT IS_DIRECTORY ${torch_dir}) 77 | # download_library("https://anaconda.org/pytorch/pytorch/${torch_version}/download/osx-arm64/pytorch-${torch_version}-py3.10_0.tar.bz2" ${torch_dir}-arm64) 78 | # execute_process(COMMAND mkdir ${torch_dir}) 79 | # execute_process(COMMAND cp -r ${torch_dir}-arm64/lib/python3.10/site-packages/torch ${torch_dir}/libtorch) 80 | # endif() 81 | # # download x86 library 82 | # if (EXISTS ${CMAKE_SOURCE_DIR}/../install/torch_x86) 83 | # execute_process(COMMAND cp -r ${CMAKE_SOURCE_DIR}/../install/torch_x86 ${torch_dir}-x86) 84 | # else() 85 | # if (NOT DEFINED TORCH_MAC_UB_URL) 86 | # message(FATAL_ERROR "If not provided, please give a valid URL for Apple universal library") 87 | # endif() 88 | # download_library(${TORCH_MAC_X86_URL} ${torch_dir}) 89 | # endif() 90 | # message("found libtorch for x86 at : " ${torch_dir}-x86) 91 | # # export UB libs to main path 92 | # execute_process(COMMAND mkdir ${torch_dir}-x86) 93 | # execute_process(COMMAND cp /opt/homebrew/opt/llvm/lib/libomp.dylib ${torch_dir}/libtorch/lib/) 94 | # execute_process(COMMAND find ${torch_dir}/libtorch/lib -maxdepth 1 -type f -execdir lipo -create ${torch_dir}/libtorch/lib/{} ${torch_dir}-x86/libtorch/lib/{} -output ${torch_dir}/libtorch/lib/{} \;) 95 | # else() 96 | # if (EXISTS ${CMAKE_SOURCE_DIR}/../install/torch_x86) 97 | # execute_process(COMMAND cp ${CMAKE_SOURCE_DIR}/../install/torch_x86 ${torch_dir}) 98 | # else() 99 | # if (NOT DEFINED TORCH_MAC_X86_URL) 100 | # message(FATAL_ERROR "If not provided, please give a valid URL for Apple x86 library") 101 | # endif() 102 | # download_library(${TORCH_MAC_X86_URL} ${torch_dir}) 103 | # endif() 104 | # message("found libtorch for x86 at : " ${torch_dir}) 105 | 106 | endif() 107 | 108 | # Find the libraries again 109 | message("${torch_dir}") 110 | find_library(torch_lib 111 | NAMES ${torch_lib_name} 112 | PATHS ${torch_dir}/libtorch/lib 113 | ) 114 | 115 | if (NOT torch_lib) 116 | message(FATAL_ERROR "torch could not be included") 117 | endif() 118 | -------------------------------------------------------------------------------- /src/source/features.py: -------------------------------------------------------------------------------- 1 | # 2 | # NN~ - Scripting library 3 | # features.py : Simple scripting example for waveform-to-float case. 4 | # 5 | # We demonstrate the basic mecanisms for using the nn~ environment. 6 | # In this case, any function from Python can be used to wrap it inside a nn~ model. 7 | # 8 | # ACIDS - IRCAM : Philippe Esling, Axel Chemla--Romeu-Santos, Antoine Caillon 9 | # 10 | 11 | from typing import List, Tuple 12 | import numpy as np 13 | import librosa 14 | # Pytorch audio operations 15 | import torch 16 | import torchaudio.functional as F 17 | from torchaudio.transforms import Spectrogram 18 | # Import the nn~ library 19 | try: 20 | import nn_tilde 21 | except ImportError: 22 | import os, sys 23 | sys.path.append(os.path.join(os.path.dirname(__file__) , ".." , "..")) 24 | import python_tools as nn_tilde 25 | 26 | 27 | class AudioFeatures(nn_tilde.Module): 28 | 29 | def __init__(self, 30 | nfft=1024, 31 | hop_size=256, 32 | skip_features=None): 33 | super().__init__(sr=44100) 34 | self.nfft = nfft 35 | self.hop_size = hop_size 36 | transform = Spectrogram(n_fft=nfft, 37 | win_length=nfft, 38 | hop_length=hop_size, 39 | center=False, 40 | normalized=True) 41 | self.transform = transform 42 | self.skip_features = skip_features 43 | # ----------------- 44 | # Register attributes 45 | # ----------------- 46 | # self.register_attribute('sr', 44100) 47 | self.register_buffer('audio_buffer', torch.zeros((1, 1, nfft - hop_size))) 48 | # Pre-compute frequency bins 49 | self.freq = torch.fft.rfftfreq(n = nfft, d = 1.0 / self.sr[0]) 50 | 51 | # ----------------- 52 | # Register methods 53 | # ----------------- 54 | self.register_method( 55 | 'rms', 56 | in_channels=1, 57 | in_ratio=1, 58 | out_channels=1, 59 | out_ratio=1024, 60 | input_labels=['(signal) signal to monitor'], 61 | output_labels=['(signal) rms value'], 62 | ) 63 | 64 | # REGISTER METHODS 65 | self.register_method( 66 | 'centroid', 67 | in_channels=1, 68 | in_ratio=1, 69 | out_channels=1, 70 | out_ratio=self.hop_size, 71 | input_labels=['(signal) signal to monitor'], 72 | output_labels=['(signal) spectral centroid value'], 73 | ) 74 | 75 | # REGISTER METHODS 76 | self.register_method( 77 | 'flatness', 78 | in_channels=1, 79 | in_ratio=1, 80 | out_channels=1, 81 | out_ratio=self.hop_size, 82 | input_labels=['(signal) signal to monitor'], 83 | output_labels=['(signal) flatness value'], 84 | ) 85 | 86 | # REGISTER METHODS 87 | self.register_method( 88 | 'bandwidth', 89 | in_channels=1, 90 | in_ratio=1, 91 | out_channels=1, 92 | out_ratio=self.hop_size, 93 | input_labels=['(signal) signal to monitor'], 94 | output_labels=['(signal) bandwidth value'], 95 | ) 96 | 97 | def _compute_spectrogram(self, x: torch.Tensor): 98 | # X : B x hop_size 99 | if self.audio_buffer.shape[0] != x.shape[0]: 100 | print("Resizing and resetting buffer - the batch size has changed") 101 | self.audio_buffer = torch.zeros((x.shape[0], 1, self.nfft - self.hop_size)).to(x) 102 | self.freq = torch.fft.rfftfreq(n = self.nfft, d = 1.0 / self.get_sample_rate()) 103 | # Using the previous buffer information 104 | x = torch.cat([self.audio_buffer, x], dim=-1) 105 | # Compute the transform 106 | spec = self.transform(x)[:, 0] 107 | self.audio_buffer = x[..., -(self.nfft - self.hop_size):] 108 | if self.skip_features is not None: 109 | spec = spec[:, :self.skip_features] 110 | return spec 111 | 112 | @torch.jit.export 113 | def rms(self, x: torch.Tensor): 114 | x = x.reshape(x.shape[0], x.shape[1], 1024, -1) 115 | rms = x.pow(2).sum(-2).sqrt() / x.size(-1) 116 | return rms 117 | 118 | @torch.jit.export 119 | def centroid(self, x: torch.Tensor): 120 | # Compute the current spectrogram 121 | spectro = self._compute_spectrogram(x) 122 | # Compute the center frequencies of each bin 123 | if self.freq is None: 124 | self.freq = torch.fft.rfftfreq(n = self.nfft, d = 1.0 / self.sr[0]) 125 | if len(self.freq.shape) == 1: 126 | self.freq = self.freq[None, :, None].expand_as(spectro) 127 | # Column-normalize S 128 | centroid = torch.sum(self.freq * torch.nn.functional.normalize(spectro, p=1.0, dim=-2), dim=-2) 129 | return centroid[:, None, :] 130 | 131 | @torch.jit.export 132 | def bandwidth(self, x: torch.Tensor, amin: float = 1e-10, power: float = 2.0, p: float = 2.0): 133 | # Compute the current spectrogram 134 | spectro = self._compute_spectrogram(x) 135 | # Compute the center frequencies of each bin 136 | if self.freq is None: 137 | self.freq = torch.fft.rfftfreq(n = self.nfft, d = 1.0 / self.sr[0]) 138 | if len(self.freq.shape) == 1: 139 | self.freq = self.freq[None, :, None].expand_as(spectro) 140 | # Normalize spectro 141 | spectro_normed = torch.nn.functional.normalize(spectro, p=1.0, dim=-2) 142 | # Compute centroid 143 | centroid = torch.sum(self.freq * spectro_normed, dim=-2)[:, None, :] 144 | # Compute the deviation 145 | deviation = torch.abs(self.freq - centroid) 146 | # Compute bandwidth 147 | bandwidth = torch.sum(spectro_normed * deviation**p, dim=-2, keepdim=True) ** (1.0 / p) 148 | return bandwidth 149 | 150 | @torch.jit.export 151 | def flatness(self, x: torch.Tensor, amin: float = 1e-10, power: float = 2.0): 152 | # Compute the current spectrogram 153 | spectro = self._compute_spectrogram(x) 154 | S_thresh = torch.maximum(spectro**power, torch.zeros(1) + amin) 155 | gmean = torch.exp(torch.mean(torch.log(S_thresh), dim=-2, keepdim=True)) 156 | amean = torch.mean(S_thresh, dim=-2, keepdim=True) 157 | flatness = gmean / amean 158 | return flatness 159 | 160 | 161 | if __name__ == '__main__': 162 | # Create your target class 163 | model = AudioFeatures() 164 | # Export it to a torchscript model 165 | model.export_to_ts('src/models/features.ts') -------------------------------------------------------------------------------- /src/frontend/maxmsp/nn_tilde/nn_tilde.cpp: -------------------------------------------------------------------------------- 1 | #include "../shared/nn_base.h" 2 | #include "c74_min.h" 3 | 4 | template 5 | void model_perform(nn_class* nn_instance) { 6 | std::vector in_model, out_model; 7 | for (int c(0); c < nn_instance->m_model_in; c++) 8 | in_model.push_back(nn_instance->m_in_model[c].get()); 9 | for (int c(0); c < nn_instance->m_model_out; c++) 10 | out_model.push_back(nn_instance->m_out_model[c].get()); 11 | 12 | if (nn_instance->had_buffer_reset) { 13 | nn_instance->had_buffer_reset = false; 14 | } 15 | 16 | nn_instance->m_model->perform(in_model, out_model, 17 | nn_instance->m_method, 18 | 1, nn_instance->m_out_model.size(), nn_instance->m_buffer_size); 19 | } 20 | 21 | template 22 | void model_perform_async(nn_class *nn_instance) { 23 | while (!nn_instance->can_perform()){ 24 | std::this_thread::sleep_for(std::chrono::milliseconds(REFRESH_THREAD_INTERVAL)); 25 | if (nn_instance->m_should_stop_perform_thread) { 26 | return; 27 | } 28 | } 29 | std::vector in_model, out_model; 30 | 31 | if (nn_instance->wait_for_buffer_reset) { 32 | nn_instance->init_buffers(); 33 | } 34 | 35 | for (int c(0); c < nn_instance->m_model_in; c++) 36 | in_model.push_back(nn_instance->m_in_model[c].get()); 37 | for (int c(0); c < nn_instance->m_model_out; c++) 38 | out_model.push_back(nn_instance->m_out_model[c].get()); 39 | 40 | while (!nn_instance->m_should_stop_perform_thread) { 41 | if (nn_instance->m_data_available_lock.try_acquire_for( 42 | std::chrono::milliseconds(REFRESH_THREAD_INTERVAL))) { 43 | if (nn_instance->wait_for_buffer_reset) { 44 | nn_instance->init_buffers(); 45 | } 46 | if (nn_instance->had_buffer_reset) { 47 | in_model.clear(); 48 | for (int c(0); c < nn_instance->m_model_in; c++) { 49 | in_model.push_back(nn_instance->m_in_model[c].get()); 50 | } 51 | out_model.clear(); 52 | for (int c(0); c < nn_instance->m_model_out; c++) { 53 | out_model.push_back(nn_instance->m_out_model[c].get()); 54 | } 55 | nn_instance->had_buffer_reset = false; 56 | } 57 | nn_instance->m_model->perform(in_model, out_model, 58 | nn_instance->m_method, 59 | 1, nn_instance->m_out_model.size(), nn_instance->m_buffer_size); 60 | nn_instance->m_result_available_lock.release(); 61 | } 62 | } 63 | } 64 | 65 | 66 | 67 | class nn: public nn_base> { 68 | 69 | public: 70 | MIN_DESCRIPTION{"Interface for deep learning models"}; 71 | MIN_TAGS{"audio, deep learning, ai"}; 72 | MIN_AUTHOR{"Antoine Caillon & Axel Chemla--Romeu-Santos"}; 73 | MIN_RELATED{"nn.info, mc.nn~, mcs.nn~"}; 74 | 75 | static std::string get_external_name() { 76 | return std::string("nn~"); 77 | } 78 | nn(const atoms &args = {}) { 79 | init_external(args); 80 | } 81 | 82 | int get_sample_rate() override { 83 | return samplerate(); 84 | } 85 | 86 | void init_process() override { 87 | nn_base::init_process(); 88 | if (m_use_thread) { 89 | m_compute_thread = std::make_unique(model_perform_async, this); 90 | } 91 | } 92 | 93 | void init_external(const atoms &args) override { 94 | DEBUG_PRINT("initializing model"); 95 | init_model(); 96 | DEBUG_PRINT("initializing downloader"); 97 | init_downloader(); 98 | if (!args.size()) { return; } 99 | DEBUG_PRINT("initializing inputs & outputs"); 100 | init_inputs_and_outputs(args); 101 | DEBUG_PRINT("initializing inlets & outlets"); 102 | init_inlets_and_outlets(); 103 | // DEBUG_PRINT("initializing buffers"); 104 | // init_buffers(); 105 | DEBUG_PRINT("initializing process"); 106 | init_process(); 107 | } 108 | void perform(audio_bundle input, audio_bundle output) override; 109 | 110 | message<> maxclass_setup{ 111 | this, "maxclass_setup", 112 | [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { 113 | cout << "nn~ " << VERSION << " - torch " << TORCH_VERSION 114 | << " - 2023-2025 - Antoine Caillon & Axel Chemla--Romeu-Santos" << endl; 115 | cout << "visit https://www.github.com/acids-ircam" << endl; 116 | return {}; 117 | }}; 118 | 119 | }; 120 | 121 | 122 | void nn::perform(audio_bundle input, audio_bundle output) { 123 | auto vec_size = input.frame_count(); 124 | 125 | if (m_ready) { 126 | 127 | // COPY INPUT TO CIRCULAR BUFFER 128 | for (int c(0); c < input.channel_count(); c++) { 129 | auto in = input.samples(c); 130 | m_in_buffer[c].put(in, vec_size); 131 | } 132 | 133 | 134 | if (m_in_buffer[0].full()) { // BUFFER IS FULL 135 | if (!m_use_thread) { 136 | if (wait_for_buffer_reset) { 137 | init_buffers(); 138 | } 139 | // TRANSFER MEMORY BETWEEN INPUT CIRCULAR BUFFER AND MODEL BUFFER 140 | auto n_ins = std::min(n_inlets, m_model_in); 141 | for (int c(0); c < n_ins; c++) 142 | m_in_buffer[c].get(m_in_model[c].get(), m_buffer_size); 143 | 144 | // CALL MODEL PERFORM IN CURRENT THREAD 145 | model_perform(this); 146 | // TRANSFER MEMORY BETWEEN OUTPUT CIRCULAR BUFFER AND MODEL BUFFER 147 | auto n_outs = std::min(n_outlets, m_model_out); 148 | for (int c(0); c < n_outs; c++) 149 | m_out_buffer[c].put(m_out_model[c].get(), m_buffer_size); 150 | } else { 151 | if (m_result_available_lock.try_acquire()) { 152 | // TRANSFER MEMORY BETWEEN INPUT CIRCULAR BUFFER AND MODEL BUFFER 153 | if (wait_for_buffer_reset) { 154 | init_buffers(); 155 | } 156 | auto n_ins = std::min(n_inlets, m_model_in); 157 | for (int c(0); c < n_ins; c++) 158 | m_in_buffer[c].get(m_in_model[c].get(), m_buffer_size); 159 | 160 | // TRANSFER MEMORY BETWEEN OUTPUT CIRCULAR BUFFER AND MODEL BUFFER 161 | auto n_outs = std::min(n_outlets, m_model_out); 162 | for (int c(0); c < n_outs; c++) 163 | m_out_buffer[c].put(m_out_model[c].get(), m_buffer_size); 164 | 165 | 166 | // SIGNAL PERFORM THREAD THAT DATA IS AVAILABLE 167 | m_data_available_lock.release(); 168 | } 169 | } 170 | } 171 | 172 | // COPY CIRCULAR BUFFER TO OUTPUT 173 | for (int c(0); c < output.channel_count(); c++) { 174 | auto out = output.samples(c); 175 | m_out_buffer[c].get(out, vec_size); 176 | } 177 | } 178 | } 179 | 180 | 181 | MIN_EXTERNAL(nn); -------------------------------------------------------------------------------- /scripting/features.py: -------------------------------------------------------------------------------- 1 | # 2 | # NN~ - Scripting library 3 | # features.py : Simple scripting example for waveform-to-float case. 4 | # 5 | # We demonstrate the basic mecanisms for using the nn~ environment. 6 | # In this case, any function from Python can be used to wrap it inside a nn~ model. 7 | # 8 | # ACIDS - IRCAM : Philippe Esling, Axel Chemla--Romeu-Santos, Antoine Caillon 9 | # 10 | 11 | from typing import List, Tuple 12 | import librosa 13 | # Pytorch audio operations 14 | import torch 15 | import torchaudio.functional as F 16 | from torchaudio.transforms import Spectrogram 17 | # Import the nn~ library 18 | import nn_tilde 19 | import numpy as np 20 | 21 | class AudioFeatures(nn_tilde.Module): 22 | 23 | def __init__(self, 24 | nfft=1024, 25 | hop_size=256, 26 | skip_features=None): 27 | super().__init__() 28 | self.nfft = nfft 29 | self.hop_size = hop_size 30 | transform = Spectrogram(n_fft=nfft, 31 | win_length=nfft, 32 | hop_length=hop_size, 33 | center=False, 34 | normalized=True) 35 | self.transform = transform 36 | self.skip_features = skip_features 37 | # ----------------- 38 | # Register attributes 39 | # ----------------- 40 | self.register_attribute('sr', 44100) 41 | self.register_buffer('audio_buffer', torch.zeros((1, 1, nfft - hop_size))) 42 | # Pre-compute frequency bins 43 | self.freq = torch.fft.rfftfreq(n = nfft, d = 1.0 / self.sr[0]) 44 | 45 | # ----------------- 46 | # Register methods 47 | # ----------------- 48 | self.register_method( 49 | 'rms', 50 | in_channels=1, 51 | in_ratio=1, 52 | out_channels=1, 53 | out_ratio=1024, 54 | input_labels=['(signal) signal to monitor'], 55 | output_labels=['(signal) rms value'], 56 | ) 57 | 58 | # REGISTER METHODS 59 | self.register_method( 60 | 'centroid', 61 | in_channels=1, 62 | in_ratio=1, 63 | out_channels=1, 64 | out_ratio=self.hop_size, 65 | input_labels=['(signal) signal to monitor'], 66 | output_labels=['(signal) spectral centroid value'], 67 | ) 68 | 69 | # REGISTER METHODS 70 | self.register_method( 71 | 'flatness', 72 | in_channels=1, 73 | in_ratio=1, 74 | out_channels=1, 75 | out_ratio=self.hop_size, 76 | input_labels=['(signal) signal to monitor'], 77 | output_labels=['(signal) flatness value'], 78 | ) 79 | 80 | # REGISTER METHODS 81 | self.register_method( 82 | 'bandwidth', 83 | in_channels=1, 84 | in_ratio=1, 85 | out_channels=1, 86 | out_ratio=self.hop_size, 87 | input_labels=['(signal) signal to monitor'], 88 | output_labels=['(signal) bandwidth value'], 89 | ) 90 | 91 | def _compute_spectrogram(self, x: torch.Tensor): 92 | # X : B x hop_size 93 | if self.audio_buffer.shape[0] != x.shape[0]: 94 | print("Resizing and resetting buffer - the batch size has changed") 95 | self.audio_buffer = torch.zeros((x.shape[0], 1, self.nfft - self.hop_size)).to(x) 96 | self.freq = torch.fft.rfftfreq(n = self.nfft, d = 1.0 / self.sr[0]) 97 | # Using the previous buffer information 98 | x = torch.cat([self.audio_buffer, x], dim=-1) 99 | # Compute the transform 100 | spec = self.transform(x)[:, 0] 101 | self.audio_buffer = x[..., -(self.nfft - self.hop_size):] 102 | if self.skip_features is not None: 103 | spec = spec[:, :self.skip_features] 104 | return spec 105 | 106 | @torch.jit.export 107 | def rms(self, x: torch.Tensor): 108 | x = x.reshape(x.shape[0], x.shape[1], 1024, -1) 109 | rms = x.pow(2).sum(-2).sqrt() / x.size(-1) 110 | return rms 111 | 112 | @torch.jit.export 113 | def centroid(self, x: torch.Tensor): 114 | # Compute the current spectrogram 115 | spectro = self._compute_spectrogram(x) 116 | # Compute the center frequencies of each bin 117 | if self.freq is None: 118 | self.freq = torch.fft.rfftfreq(n = self.nfft, d = 1.0 / self.sr[0]) 119 | if len(self.freq.shape) == 1: 120 | self.freq = self.freq[None, :, None].expand_as(spectro) 121 | # Column-normalize S 122 | centroid = torch.sum(self.freq * torch.nn.functional.normalize(spectro, p=1.0, dim=-2), dim=-2) 123 | return centroid[:, None, :] 124 | 125 | @torch.jit.export 126 | def bandwidth(self, x: torch.Tensor, amin: float = 1e-10, power: float = 2.0, p: float = 2.0): 127 | # Compute the current spectrogram 128 | spectro = self._compute_spectrogram(x) 129 | # Compute the center frequencies of each bin 130 | if self.freq is None: 131 | self.freq = torch.fft.rfftfreq(n = self.nfft, d = 1.0 / self.sr[0]) 132 | if len(self.freq.shape) == 1: 133 | self.freq = self.freq[None, :, None].expand_as(spectro) 134 | # Normalize spectro 135 | spectro_normed = torch.nn.functional.normalize(spectro, p=1.0, dim=-2) 136 | # Compute centroid 137 | centroid = torch.sum(self.freq * spectro_normed, dim=-2)[:, None, :] 138 | # Compute the deviation 139 | deviation = torch.abs(self.freq - centroid) 140 | # Compute bandwidth 141 | bandwidth = torch.sum(spectro_normed * deviation**p, dim=-2, keepdim=True) ** (1.0 / p) 142 | return bandwidth 143 | 144 | @torch.jit.export 145 | def flatness(self, x: torch.Tensor, amin: float = 1e-10, power: float = 2.0): 146 | # Compute the current spectrogram 147 | spectro = self._compute_spectrogram(x) 148 | S_thresh = torch.maximum(spectro**power, torch.zeros(1) + amin) 149 | gmean = torch.exp(torch.mean(torch.log(S_thresh), dim=-2, keepdim=True)) 150 | amean = torch.mean(S_thresh, dim=-2, keepdim=True) 151 | flatness = gmean / amean 152 | print(flatness.shape) 153 | return flatness 154 | 155 | # defining attribute getters 156 | # WARNING : typing the function's ouptut is mandatory 157 | @torch.jit.export 158 | def get_sr(self) -> int: 159 | return int(self.sr[0]) 160 | 161 | # defining attribute setter 162 | # setters must return an error code : 163 | # return 0 if the attribute has been adequately set, 164 | # return -1 if the attribute was wrong. 165 | @torch.jit.export 166 | def set_sr(self, x: int) -> int: 167 | self.sr = (x, ) 168 | return 0 169 | 170 | if __name__ == '__main__': 171 | # Create your target class 172 | model = AudioFeatures() 173 | # Export it to a torchscript model 174 | model.export_to_ts('features.ts') -------------------------------------------------------------------------------- /extras/generate_test_model.py: -------------------------------------------------------------------------------- 1 | from typing import List, Tuple 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | import nn_tilde 7 | 8 | 9 | class AudioUtils(nn_tilde.Module): 10 | 11 | def __init__(self): 12 | super().__init__() 13 | # REGISTER ATTRIBUTES 14 | self.register_attribute('gain_factor', 1.) 15 | self.register_attribute('polynomial_factors', (1., 0., 0., 0.)) 16 | self.register_attribute('saturate_mode', 'tanh') 17 | self.register_attribute('invert_signal', False) 18 | self.register_attribute('fractal', (2, 0.)) 19 | 20 | # REGISTER METHODS 21 | self.register_method( 22 | 'thru', 23 | in_channels=1, 24 | in_ratio=1, 25 | out_channels=1, 26 | out_ratio=1, 27 | input_labels=['(signal) input signal'], 28 | output_labels=['(signal) output signal'], 29 | ) 30 | self.register_method( 31 | 'invert', 32 | in_channels=1, 33 | in_ratio=1, 34 | out_channels=1, 35 | out_ratio=1, 36 | input_labels=['(signal) input signal'], 37 | output_labels=['(signal) output signal'], 38 | ) 39 | self.register_method( 40 | 'add', 41 | in_channels=2, 42 | in_ratio=1, 43 | out_channels=1, 44 | out_ratio=1, 45 | input_labels=['(signal) first signal', '(signal) second signal'], 46 | output_labels=['(signal) output signal'], 47 | ) 48 | self.register_method( 49 | 'saturate', 50 | in_channels=1, 51 | in_ratio=1, 52 | out_channels=1, 53 | out_ratio=1, 54 | input_labels=['(signal) signal to saturate'], 55 | output_labels=['(signal) saturated signal'], 56 | ) 57 | self.register_method( 58 | 'midside', 59 | in_channels=2, 60 | in_ratio=1, 61 | out_channels=2, 62 | out_ratio=1, 63 | input_labels=['(signal) L channel', '(signal) R channel'], 64 | output_labels=['(signal) Mid channel', '(signal) Side channel'], 65 | ) 66 | self.register_method( 67 | 'rms', 68 | in_channels=1, 69 | in_ratio=1, 70 | out_channels=1, 71 | out_ratio=1024, 72 | input_labels=['(signal) signal to monitor'], 73 | output_labels=['(signal) rms value'], 74 | ) 75 | self.register_method( 76 | 'polynomial', 77 | in_channels=1, 78 | in_ratio=1, 79 | out_channels=1, 80 | out_ratio=1, 81 | input_labels=['(signal) signal to distort'], 82 | output_labels=['(signal) distorted signal'], 83 | ) 84 | 85 | self.register_method( 86 | 'fractalize', 87 | in_channels=1, 88 | in_ratio=512, 89 | out_channels=1, 90 | out_ratio=512, 91 | input_labels=['(signal) signal to replicate'], 92 | output_labels=['(signal) fractalized signal'], 93 | ) 94 | 95 | @torch.jit.export 96 | def thru(self, x: torch.Tensor): 97 | return x 98 | 99 | # defining main methods 100 | @torch.jit.export 101 | def invert(self, x: torch.Tensor): 102 | if self.invert_signal[0]: 103 | return x 104 | else: 105 | return -x 106 | 107 | @torch.jit.export 108 | def add(self, x: torch.Tensor): 109 | return x.sum(-2, keepdim=True) / 2 110 | 111 | @torch.jit.export 112 | def fractalize(self, x: torch.Tensor): 113 | fractal_order = int(self.fractal[0]) 114 | fractal_amount = float(self.fractal[1]) 115 | downsampled_signal = x[..., ::fractal_order] 116 | return x 117 | 118 | @torch.jit.export 119 | def polynomial(self, x: torch.Tensor): 120 | out = torch.zeros_like(x) 121 | for i in range(4): 122 | out += self.polynomial_factors[i] * x.pow(i + 1) 123 | return out 124 | 125 | @torch.jit.export 126 | def saturate(self, x: torch.Tensor): 127 | saturate_mode = self.saturate_mode[0] 128 | if saturate_mode == 'tanh': 129 | return torch.tanh(x * self.gain_factor[0]) 130 | elif saturate_mode == 'clip': 131 | return torch.clamp(x * self.gain_factor[0], -1, 1) 132 | 133 | @torch.jit.export 134 | def midside(self, x: torch.Tensor): 135 | l, r = x[..., 0, :], x[..., 1, :] 136 | return torch.stack([(l + r) / 2, (l - r) / 2], dim=-2) 137 | 138 | @torch.jit.export 139 | def rms(self, x: torch.Tensor): 140 | x = x.reshape(x.shape[0], x.shape[1], 1024, -1) 141 | rms = x.pow(2).sum(-2).sqrt() / x.size(-1) 142 | return rms 143 | 144 | # defining attribute getters 145 | # WARNING : typing the function's ouptut is mandatory 146 | @torch.jit.export 147 | def get_gain_factor(self) -> float: 148 | return float(self.gain_factor[0]) 149 | 150 | @torch.jit.export 151 | def get_polynomial_factors(self) -> List[float]: 152 | polynomial_factors: List[float] = [] 153 | for p in self.polynomial_factors: 154 | polynomial_factors.append(float(p)) 155 | return polynomial_factors 156 | 157 | @torch.jit.export 158 | def get_saturate_mode(self) -> str: 159 | return self.saturate_mode[0] 160 | 161 | @torch.jit.export 162 | def get_invert_signal(self) -> bool: 163 | return self.invert_signal[0] 164 | 165 | @torch.jit.export 166 | def get_fractal(self) -> Tuple[int, float]: 167 | return (int(self.fractal[0]), float(self.fractal[1])) 168 | 169 | # defining attribute setter 170 | # setters must return an error code : 171 | # return 0 if the attribute has been adequately set, 172 | # return -1 if the attribute was wrong. 173 | @torch.jit.export 174 | def set_gain_factor(self, x: float) -> int: 175 | self.gain_factor = (x, ) 176 | return 0 177 | 178 | @torch.jit.export 179 | def set_polynomial_factors(self, factor1: float, factor2: float, 180 | factor3: float, factor4: float) -> int: 181 | factors = (factor1, factor2, factor3, factor4) 182 | self.polynomial_factors = factors 183 | return 0 184 | 185 | @torch.jit.export 186 | def set_saturate_mode(self, x: str): 187 | if (x == 'tanh') or (x == 'clip'): 188 | self.saturate_mode = (x, ) 189 | return 0 190 | else: 191 | return -1 192 | 193 | @torch.jit.export 194 | def set_invert_signal(self, x: bool): 195 | self.invert_signal = (x, ) 196 | return 0 197 | 198 | @torch.jit.export 199 | def set_fractal(self, factor: int, amount: float): 200 | if factor <= 0: 201 | return -1 202 | elif factor % 2 != 0: 203 | return -1 204 | self.fractal = (factor, float(amount)) 205 | return 0 206 | 207 | 208 | if __name__ == '__main__': 209 | model = AudioUtils() 210 | model.export_to_ts('multieffect.ts') 211 | -------------------------------------------------------------------------------- /src/patchers/latent_remote/latent_remote.js: -------------------------------------------------------------------------------- 1 | inlets = 1; 2 | outlets = 1; 3 | 4 | var MAX_SLIDERS = 64; 5 | var CURRENT_SLIDERS = 1; 6 | var MAX_COLUMNS = 8; 7 | var SLIDER_X_MARGIN = 10; 8 | var SLIDER_Y_MARGIN = 3; 9 | 10 | // Global UI objects 11 | var InputRoute = null; 12 | var LatentSlider = new Array(MAX_SLIDERS); 13 | var PathInlet = this.patcher.getnamed("input1"); 14 | var PathOutlet = this.patcher.getnamed("output1"); 15 | var PathSymbolInlet = this.patcher.getnamed("symbol_in") 16 | 17 | 18 | // Global UI objects position 19 | var InputRoutPos = [10, 80]; 20 | var OutputRoutPos = [10, 400]; 21 | var LatentSliderPos = [10, 120]; 22 | var LatentSliderSize = [65, 220]; 23 | 24 | 25 | var IN_PACK_NAME = "input_unpack" 26 | var OUT_UNPACK_NAME = "output_pack" 27 | 28 | // make presentation layout 29 | function make_presentation_layout() { 30 | var x_grid = 0; 31 | var y_grid = 0; 32 | for (var i = 0; i < this.CURRENT_SLIDERS; i++) { 33 | post(i, "\n"); 34 | var posX = x_grid * (LatentSliderSize[0] + SLIDER_X_MARGIN); 35 | var posY = y_grid * (LatentSliderSize[1] + SLIDER_Y_MARGIN); 36 | this.LatentSlider[i].setboxattr("presentation_rect", posX, posY, LatentSliderSize[0], LatentSliderSize[1]); 37 | x_grid += 1; 38 | if (x_grid >= MAX_COLUMNS) { 39 | x_grid = 0; 40 | y_grid += 1; 41 | } 42 | } 43 | // 44 | } 45 | 46 | 47 | // generate sliders 48 | function sliders(n_sliders) { 49 | if (n_sliders > MAX_SLIDERS) error('cannot generate more than ' + String(MAX_SLIDERS) + ' sliders.') 50 | if (InputRoute != null) this.patcher.remove(InputRoute); 51 | // create input routing 52 | this.patcher.remove(this.patcher.getnamed(IN_PACK_NAME)); 53 | InputRoute = this.patcher.newdefault(InputRoutPos[0], InputRoutPos[1], "mc.unpack~", n_sliders); 54 | InputRoute.setattr("varname", IN_PACK_NAME); 55 | this.patcher.connect(PathInlet, 0, InputRoute, 0); 56 | // create output routing 57 | this.patcher.remove(this.patcher.getnamed(OUT_UNPACK_NAME)); 58 | OutputRoute = this.patcher.newdefault(OutputRoutPos[0], OutputRoutPos[1], "mc.pack~", n_sliders); 59 | OutputRoute.setattr("varname", OUT_UNPACK_NAME); 60 | this.patcher.connect(OutputRoute, 0, PathOutlet, 0); 61 | // delete existing sliders 62 | for (var i = 0; i < this.CURRENT_SLIDERS; i++) { 63 | this.patcher.remove(LatentSlider[i]); 64 | } 65 | // create sliders 66 | if (n_sliders < 1) { 67 | return 68 | } 69 | // create pak for symout 70 | var symoutOutlet = this.patcher.getnamed("symout"); 71 | var symoutPos = symoutOutlet.getattr("patching_rect"); 72 | var pakName = "pak" 73 | for (var i = 0; i < n_sliders; i++) { 74 | pakName = pakName + " f"; 75 | } 76 | var PakObject = this.patcher.newdefault(symoutPos[0], symoutPos[1] - 20, pakName); 77 | this.patcher.connect(PakObject, 0, symoutOutlet, 0); 78 | var x_grid = 0; 79 | var y_grid = 0; 80 | for (var i = 0; i < n_sliders; i++) { 81 | var posX = LatentSliderPos[0] + i * LatentSliderSize[0]; 82 | var posY = LatentSliderPos[1]; 83 | var currentSlider = this.patcher.newdefault(posX, posY, "bpatcher", "latent_slider"); 84 | //post(i, posX, posY, LatentSliderSize[0], LatentSliderSize[1], "\n"); 85 | //currentSlider.setboxattr("patching_rect", posX, posY, LatentSliderSize[0], LatentSliderSize[1]); 86 | 87 | currentSlider.setboxattr("varname", "slider"+String(i+1)); 88 | this.patcher.connect(InputRoute, i, currentSlider, 0); 89 | 90 | // make layout 91 | var posX = x_grid * (LatentSliderSize[0] + SLIDER_X_MARGIN); 92 | var posY = y_grid * (LatentSliderSize[1] + SLIDER_Y_MARGIN); 93 | currentSlider.setboxattr("patching_rect", posX, posY, LatentSliderSize[0], LatentSliderSize[1]); 94 | currentSlider.setboxattr("presentation", 1); 95 | x_grid += 1; 96 | if (x_grid >= this.MAX_COLUMNS) { 97 | x_grid = 0; 98 | y_grid += 1; 99 | } 100 | // connect to output 101 | this.patcher.connect(currentSlider, 0, OutputRoute, i); 102 | this.LatentSlider[i] = currentSlider; 103 | // connect to symout 104 | this.patcher.connect(currentSlider, 1, PakObject, i); 105 | // make receive obj 106 | var receiveObj = this.patcher.newdefault(posX, posY - 20, "receive", jsarguments[1]+"_"+String(i)) 107 | this.patcher.connect(receiveObj, 0, currentSlider, 0) 108 | } 109 | CURRENT_SLIDERS = n_sliders; 110 | 111 | update_patching_rect(); 112 | } 113 | 114 | function max_columns(n_columns) { 115 | if (n_columns == 0) { 116 | return; 117 | } 118 | if (n_columns > this.CURRENT_SLIDERS) error("Cannot set max columns " + String(n_columns) + " with " + String(this.CURRENT_SLIDERS) + " sliders\n"); 119 | this.MAX_COLUMNS = n_columns; 120 | //make_presentation_layout(); 121 | update_patching_rect(); 122 | } 123 | 124 | function update_patching_rect() { 125 | if (this.patcher.box != null) { 126 | var slider_rect = this.patcher.getnamed("slider1").getboxattr("patching_rect"); 127 | var patching_rect = this.patcher.box.getboxattr("patching_rect") 128 | var presentation_rect = this.patcher.box.getboxattr("presentation_rect") 129 | var target_width = Math.min(this.CURRENT_SLIDERS, this.MAX_COLUMNS) * (slider_rect[2] + SLIDER_X_MARGIN); 130 | var target_height = Math.ceil(this.CURRENT_SLIDERS / this.MAX_COLUMNS) * (slider_rect[3] + SLIDER_Y_MARGIN); 131 | this.patcher.box.setboxattr("patching_rect", patching_rect[0], patching_rect[1], target_width, target_height); 132 | this.patcher.box.setboxattr("presentation_rect", presentation_rect[0], presentation_rect[1], target_width, target_height); 133 | } 134 | } 135 | 136 | function slider_check_size() { 137 | if (this.patcher.box != null) { 138 | var slider_rect = LatentSliderSize; 139 | var patching_rect = this.patcher.box.getboxattr("patching_rect"); 140 | this.patcher.box.setboxattr("patching_rect", patching_rect[0], patching_rect[1], LatentSliderSize[0], LatentSliderSize[1]) 141 | var presentation_rect = this.patcher.box.getboxattr("presentation_rect"); 142 | this.patcher.box.setboxattr("presentation_rect", patching_rect[0], patching_rect[1], LatentSliderSize[0], LatentSliderSize[1]) 143 | } 144 | } 145 | 146 | function clear() { 147 | for (var i = 0; i < this.CURRENT_SLIDERS; i++) { 148 | this.patcher.remove(LatentSlider[i]); 149 | } 150 | this.patcher.remove(this.patcher.getnamed(IN_PACK_NAME)); 151 | this.patcher.remove(this.patcher.getnamed(OUT_UNPACK_NAME)); 152 | } 153 | 154 | 155 | function faders() { 156 | var args = arrayfromargs(arguments); 157 | var mess = args.shift(); 158 | //post("n sliders", CURRENT_SLIDERS); 159 | for (var i = 0; i < CURRENT_SLIDERS; i++) { 160 | //post('setting slider', i, 'to', args[i], '\n'); 161 | messnamed(jsarguments[1]+"_"+String(i), mess, args[i]) 162 | } 163 | } 164 | 165 | function faders_all() { 166 | var args = arrayfromargs(arguments); 167 | var mess = args.shift(); 168 | //post("n sliders", CURRENT_SLIDERS); 169 | for (var i = 0; i < CURRENT_SLIDERS; i++) { 170 | //post('setting slider', i, 'to', args[i], '\n'); 171 | messnamed(jsarguments[1]+"_"+String(i), mess, args) 172 | } 173 | } 174 | 175 | 176 | function fader() { 177 | var args = arrayfromargs(arguments); 178 | var fader_idx = args.shift() - 1; 179 | var mess = args.shift(); 180 | if (fader_idx > CURRENT_SLIDERS) { 181 | error('fader_idx '+String(fader_idx)+' too big') 182 | } 183 | //post("n sliders", CURRENT_SLIDERS); 184 | messnamed(jsarguments[1]+"_"+String(fader_idx), mess, args) 185 | } 186 | 187 | function dump_all() { 188 | post("dump all!!") 189 | } -------------------------------------------------------------------------------- /src/patchers/latent_remote/M4L.latent_remote.js: -------------------------------------------------------------------------------- 1 | inlets = 1; 2 | outlets = 1; 3 | 4 | var MAX_SLIDERS = 64; 5 | var CURRENT_SLIDERS = 1; 6 | var MAX_COLUMNS = 8; 7 | var SLIDER_X_MARGIN = 4; 8 | var SLIDER_Y_MARGIN = 3; 9 | 10 | // Global UI objects 11 | var InputRoute = null; 12 | var LatentSlider = new Array(MAX_SLIDERS); 13 | var PathInlet = this.patcher.getnamed("input1"); 14 | var PathOutlet = this.patcher.getnamed("output1"); 15 | var PathSymbolInlet = this.patcher.getnamed("symbol_in") 16 | 17 | 18 | // Global UI objects position 19 | var InputRoutPos = [10, 80]; 20 | var OutputRoutPos = [10, 400]; 21 | var LatentSliderPos = [10, 120]; 22 | var LatentSliderSize = [51, 168]; 23 | 24 | 25 | var IN_PACK_NAME = "input_unpack" 26 | var OUT_UNPACK_NAME = "output_pack" 27 | 28 | // make presentation layout 29 | function make_presentation_layout() { 30 | var x_grid = 0; 31 | var y_grid = 0; 32 | for (var i = 0; i < this.CURRENT_SLIDERS; i++) { 33 | post(i, "\n"); 34 | var posX = x_grid * (LatentSliderSize[0] + SLIDER_X_MARGIN); 35 | var posY = y_grid * (LatentSliderSize[1] + SLIDER_Y_MARGIN); 36 | this.LatentSlider[i].setboxattr("presentation_rect", posX, posY, LatentSliderSize[0], LatentSliderSize[1]); 37 | x_grid += 1; 38 | if (x_grid >= MAX_COLUMNS) { 39 | x_grid = 0; 40 | y_grid += 1; 41 | } 42 | } 43 | // 44 | } 45 | 46 | 47 | // generate sliders 48 | function sliders(n_sliders) { 49 | if (n_sliders > MAX_SLIDERS) error('cannot generate more than ' + String(MAX_SLIDERS) + ' sliders.') 50 | if (InputRoute != null) this.patcher.remove(InputRoute); 51 | // create input routing 52 | this.patcher.remove(this.patcher.getnamed(IN_PACK_NAME)); 53 | InputRoute = this.patcher.newdefault(InputRoutPos[0], InputRoutPos[1], "mc.unpack~", n_sliders); 54 | InputRoute.setattr("varname", IN_PACK_NAME); 55 | this.patcher.connect(PathInlet, 0, InputRoute, 0); 56 | // create output routing 57 | this.patcher.remove(this.patcher.getnamed(OUT_UNPACK_NAME)); 58 | OutputRoute = this.patcher.newdefault(OutputRoutPos[0], OutputRoutPos[1], "mc.pack~", n_sliders); 59 | OutputRoute.setattr("varname", OUT_UNPACK_NAME); 60 | this.patcher.connect(OutputRoute, 0, PathOutlet, 0); 61 | // delete existing sliders 62 | for (var i = 0; i < this.CURRENT_SLIDERS; i++) { 63 | this.patcher.remove(LatentSlider[i]); 64 | } 65 | // create sliders 66 | if (n_sliders < 1) { 67 | return 68 | } 69 | // create pak for symout 70 | var symoutOutlet = this.patcher.getnamed("symout"); 71 | var symoutPos = symoutOutlet.getattr("patching_rect"); 72 | var pakName = "pak" 73 | for (var i = 0; i < n_sliders; i++) { 74 | pakName = pakName + " f"; 75 | } 76 | var PakObject = this.patcher.newdefault(symoutPos[0], symoutPos[1] - 20, pakName); 77 | this.patcher.connect(PakObject, 0, symoutOutlet, 0) 78 | var x_grid = 0; 79 | var y_grid = 0; 80 | for (var i = 0; i < n_sliders; i++) { 81 | var posX = LatentSliderPos[0] + i * LatentSliderSize[0]; 82 | var posY = LatentSliderPos[1]; 83 | var currentSlider = this.patcher.newdefault(posX, posY, "bpatcher", "M4L.latent_slider"); 84 | //post(i, posX, posY, LatentSliderSize[0], LatentSliderSize[1], "\n"); 85 | //currentSlider.setboxattr("patching_rect", posX, posY, LatentSliderSize[0], LatentSliderSize[1]); 86 | 87 | currentSlider.setboxattr("varname", "slider"+String(i+1)); 88 | this.patcher.connect(InputRoute, i, currentSlider, 0); 89 | 90 | // make layout 91 | var posX = x_grid * (LatentSliderSize[0] + SLIDER_X_MARGIN); 92 | var posY = y_grid * (LatentSliderSize[1] + SLIDER_Y_MARGIN); 93 | currentSlider.setboxattr("patching_rect", posX, posY, LatentSliderSize[0], LatentSliderSize[1]); 94 | currentSlider.setboxattr("presentation", 1); 95 | x_grid += 1; 96 | if (x_grid >= this.MAX_COLUMNS) { 97 | x_grid = 0; 98 | y_grid += 1; 99 | } 100 | // connect to output 101 | this.patcher.connect(currentSlider, 0, OutputRoute, i); 102 | this.LatentSlider[i] = currentSlider; 103 | // connect to symout 104 | this.patcher.connect(currentSlider, 1, PakObject, i); 105 | // make receive obj 106 | var receiveObj = this.patcher.newdefault(posX, posY - 20, "receive", jsarguments[1]+"_"+String(i)) 107 | this.patcher.connect(receiveObj, 0, currentSlider, 0) 108 | } 109 | CURRENT_SLIDERS = n_sliders; 110 | 111 | update_patching_rect(); 112 | } 113 | 114 | function max_columns(n_columns) { 115 | if (n_columns == 0) { 116 | return; 117 | } 118 | if (n_columns > this.CURRENT_SLIDERS) error("Cannot set max columns " + String(n_columns) + " with " + String(this.CURRENT_SLIDERS) + " sliders\n"); 119 | this.MAX_COLUMNS = n_columns; 120 | //make_presentation_layout(); 121 | update_patching_rect(); 122 | } 123 | 124 | function update_patching_rect() { 125 | if (this.patcher.box != null) { 126 | var slider_rect = this.patcher.getnamed("slider1").getboxattr("patching_rect"); 127 | var patching_rect = this.patcher.box.getboxattr("patching_rect") 128 | var presentation_rect = this.patcher.box.getboxattr("presentation_rect") 129 | var target_width = Math.min(this.CURRENT_SLIDERS, this.MAX_COLUMNS) * (slider_rect[2] + SLIDER_X_MARGIN); 130 | var target_height = Math.ceil(this.CURRENT_SLIDERS / this.MAX_COLUMNS) * (slider_rect[3] + SLIDER_Y_MARGIN); 131 | this.patcher.box.setboxattr("patching_rect", patching_rect[0], patching_rect[1], target_width, target_height); 132 | this.patcher.box.setboxattr("presentation_rect", presentation_rect[0], presentation_rect[1], target_width, target_height); 133 | } 134 | } 135 | 136 | function slider_check_size() { 137 | if (this.patcher.box != null) { 138 | var slider_rect = LatentSliderSize; 139 | var patching_rect = this.patcher.box.getboxattr("patching_rect"); 140 | this.patcher.box.setboxattr("patching_rect", patching_rect[0], patching_rect[1], LatentSliderSize[0], LatentSliderSize[1]) 141 | var presentation_rect = this.patcher.box.getboxattr("presentation_rect"); 142 | this.patcher.box.setboxattr("presentation_rect", patching_rect[0], patching_rect[1], LatentSliderSize[0], LatentSliderSize[1]) 143 | } 144 | } 145 | 146 | function clear() { 147 | for (var i = 0; i < this.CURRENT_SLIDERS; i++) { 148 | this.patcher.remove(LatentSlider[i]); 149 | } 150 | this.patcher.remove(this.patcher.getnamed(IN_PACK_NAME)); 151 | this.patcher.remove(this.patcher.getnamed(OUT_UNPACK_NAME)); 152 | } 153 | 154 | 155 | function faders() { 156 | var args = arrayfromargs(arguments); 157 | var mess = args.shift(); 158 | //post("n sliders", CURRENT_SLIDERS); 159 | for (var i = 0; i < CURRENT_SLIDERS; i++) { 160 | //post('setting slider', i, 'to', args[i], '\n'); 161 | messnamed(jsarguments[1]+"_"+String(i), mess, args[i]) 162 | } 163 | } 164 | 165 | function faders_all() { 166 | var args = arrayfromargs(arguments); 167 | var mess = args.shift(); 168 | //post("n sliders", CURRENT_SLIDERS); 169 | for (var i = 0; i < CURRENT_SLIDERS; i++) { 170 | //post('setting slider', i, 'to', args[i], '\n'); 171 | messnamed(jsarguments[1]+"_"+String(i), mess, args) 172 | } 173 | } 174 | 175 | 176 | function fader() { 177 | var args = arrayfromargs(arguments); 178 | var fader_idx = args.shift() - 1; 179 | var mess = args.shift(); 180 | if (fader_idx > CURRENT_SLIDERS) { 181 | error('fader_idx '+String(fader_idx)+' too big') 182 | } 183 | //post("n sliders", CURRENT_SLIDERS); 184 | messnamed(jsarguments[1]+"_"+String(fader_idx), mess, args) 185 | } 186 | 187 | function dump_all() { 188 | post('dump all!!!') 189 | } -------------------------------------------------------------------------------- /src/source/effects.py: -------------------------------------------------------------------------------- 1 | try: 2 | import nn_tilde 3 | except ImportError: 4 | import os, sys 5 | sys.path.append(os.path.join(os.path.dirname(__file__) , ".." , "..")) 6 | import python_tools as nn_tilde 7 | 8 | 9 | from typing import List, Tuple 10 | 11 | import torch 12 | import torch.nn as nn 13 | 14 | 15 | class AudioUtils(nn_tilde.Module): 16 | 17 | def __init__(self): 18 | super().__init__() 19 | # REGISTER ATTRIBUTES 20 | self.register_attribute('gain_factor', 1.) 21 | self.register_attribute('polynomial_factors', (1., 0., 0., 0.)) 22 | self.register_attribute('saturate_mode', 'tanh') 23 | self.register_attribute('invert_signal', False) 24 | self.register_attribute('fractal', (2, 0.)) 25 | 26 | # REGISTER METHODS 27 | self.register_method( 28 | 'thru', 29 | in_channels=1, 30 | in_ratio=1, 31 | out_channels=1, 32 | out_ratio=1, 33 | input_labels=['(signal) input signal'], 34 | output_labels=['(signal) output signal'], 35 | ) 36 | self.register_method( 37 | 'invert', 38 | in_channels=1, 39 | in_ratio=1, 40 | out_channels=1, 41 | out_ratio=1, 42 | input_labels=['(signal) input signal'], 43 | output_labels=['(signal) output signal'], 44 | ) 45 | self.register_method( 46 | 'add', 47 | in_channels=2, 48 | in_ratio=1, 49 | out_channels=1, 50 | out_ratio=1, 51 | input_labels=['(signal) first signal', '(signal) second signal'], 52 | output_labels=['(signal) output signal'], 53 | ) 54 | self.register_method( 55 | 'saturate', 56 | in_channels=1, 57 | in_ratio=1, 58 | out_channels=1, 59 | out_ratio=1, 60 | input_labels=['(signal) signal to saturate'], 61 | output_labels=['(signal) saturated signal'], 62 | ) 63 | self.register_method( 64 | 'midside', 65 | in_channels=2, 66 | in_ratio=1, 67 | out_channels=2, 68 | out_ratio=1, 69 | input_labels=['(signal) L channel', '(signal) R channel'], 70 | output_labels=['(signal) Mid channel', '(signal) Side channel'], 71 | ) 72 | self.register_method( 73 | 'rms', 74 | in_channels=1, 75 | in_ratio=1, 76 | out_channels=1, 77 | out_ratio=1024, 78 | input_labels=['(signal) signal to monitor'], 79 | output_labels=['(signal) rms value'], 80 | ) 81 | self.register_method( 82 | 'polynomial', 83 | in_channels=1, 84 | in_ratio=1, 85 | out_channels=1, 86 | out_ratio=1, 87 | input_labels=['(signal) signal to distort'], 88 | output_labels=['(signal) distorted signal'], 89 | ) 90 | 91 | self.register_method( 92 | 'fractalize', 93 | in_channels=1, 94 | in_ratio=512, 95 | out_channels=1, 96 | out_ratio=512, 97 | input_labels=['(signal) signal to replicate'], 98 | output_labels=['(signal) fractalized signal'], 99 | ) 100 | 101 | @torch.jit.export 102 | def thru(self, x: torch.Tensor): 103 | return x 104 | 105 | # defining main methods 106 | @torch.jit.export 107 | def invert(self, x: torch.Tensor): 108 | if self.invert_signal[0]: 109 | return x 110 | else: 111 | return -x 112 | 113 | @torch.jit.export 114 | def add(self, x: torch.Tensor): 115 | return x.sum(-2, keepdim=True) / 2 116 | 117 | @torch.jit.export 118 | def fractalize(self, x: torch.Tensor): 119 | fractal_order = int(self.fractal[0]) 120 | fractal_amount = float(self.fractal[1]) 121 | downsampled_signal = x[..., ::fractal_order] 122 | return x 123 | 124 | @torch.jit.export 125 | def polynomial(self, x: torch.Tensor): 126 | out = torch.zeros_like(x) 127 | for i in range(4): 128 | out += self.polynomial_factors[i] * x.pow(i + 1) 129 | return out 130 | 131 | @torch.jit.export 132 | def saturate(self, x: torch.Tensor): 133 | saturate_mode = self.saturate_mode[0] 134 | if saturate_mode == 'tanh': 135 | return torch.tanh(x * self.gain_factor[0]) 136 | elif saturate_mode == 'clip': 137 | return torch.clamp(x * self.gain_factor[0], -1, 1) 138 | 139 | @torch.jit.export 140 | def midside(self, x: torch.Tensor): 141 | l, r = x[..., 0, :], x[..., 1, :] 142 | return torch.stack([(l + r) / 2, (l - r) / 2], dim=-2) 143 | 144 | @torch.jit.export 145 | def rms(self, x: torch.Tensor): 146 | x = x.reshape(x.shape[0], x.shape[1], 1024, -1) 147 | rms = x.pow(2).sum(-2).sqrt() / x.size(-1) 148 | return rms 149 | 150 | # defining attribute getters 151 | # WARNING : typing the function's ouptut is mandatory 152 | @torch.jit.export 153 | def get_gain_factor(self) -> float: 154 | return float(self.gain_factor[0]) 155 | 156 | @torch.jit.export 157 | def get_polynomial_factors(self) -> List[float]: 158 | polynomial_factors: List[float] = [] 159 | for p in self.polynomial_factors: 160 | polynomial_factors.append(float(p)) 161 | return polynomial_factors 162 | 163 | @torch.jit.export 164 | def get_saturate_mode(self) -> str: 165 | return self.saturate_mode[0] 166 | 167 | @torch.jit.export 168 | def get_invert_signal(self) -> bool: 169 | return self.invert_signal[0] 170 | 171 | @torch.jit.export 172 | def get_fractal(self) -> Tuple[int, float]: 173 | return (int(self.fractal[0]), float(self.fractal[1])) 174 | 175 | # defining attribute setter 176 | # setters must return an error code : 177 | # return 0 if the attribute has been adequately set, 178 | # return -1 if the attribute was wrong. 179 | @torch.jit.export 180 | def set_gain_factor(self, x: float) -> int: 181 | self.gain_factor = (x, ) 182 | return 0 183 | 184 | @torch.jit.export 185 | def set_polynomial_factors(self, factor1: float, factor2: float, 186 | factor3: float, factor4: float) -> int: 187 | factors = (factor1, factor2, factor3, factor4) 188 | self.polynomial_factors = factors 189 | return 0 190 | 191 | @torch.jit.export 192 | def set_saturate_mode(self, x: str): 193 | if (x == 'tanh') or (x == 'clip'): 194 | self.saturate_mode = (x, ) 195 | return 0 196 | else: 197 | return -1 198 | 199 | @torch.jit.export 200 | def set_invert_signal(self, x: bool): 201 | self.invert_signal = (x, ) 202 | return 0 203 | 204 | @torch.jit.export 205 | def set_fractal(self, factor: int, amount: float): 206 | if factor <= 0: 207 | return -1 208 | elif factor % 2 != 0: 209 | return -1 210 | self.fractal = (factor, float(amount)) 211 | return 0 212 | 213 | 214 | if __name__ == '__main__': 215 | # Create your target class 216 | model = AudioUtils() 217 | # Export it to a torchscript model 218 | model.export_to_ts('src/models/effects.ts') 219 | -------------------------------------------------------------------------------- /scripting/effects.py: -------------------------------------------------------------------------------- 1 | # 2 | # NN~ - Scripting library 3 | # effects.py : Intermediate scripting example for waveform-to-waveform case. 4 | # 5 | # We provide here a simple example of how to use nn~ in order to transform incoming audio. 6 | # In this example, we do not rely on any ML model, but simply apply effects on input buffers. 7 | # 8 | # ACIDS - IRCAM : Philippe Esling, Axel Chemla--Romeu-Santos, Antoine Caillon 9 | # 10 | 11 | from typing import List, Tuple 12 | import torch 13 | import torch.nn as nn 14 | import nn_tilde 15 | 16 | class AudioUtils(nn_tilde.Module): 17 | 18 | def __init__(self): 19 | super().__init__() 20 | # REGISTER ATTRIBUTES 21 | self.register_attribute('gain_factor', 1.) 22 | self.register_attribute('polynomial_factors', (1., 0., 0., 0.)) 23 | self.register_attribute('saturate_mode', 'tanh') 24 | self.register_attribute('invert_signal', False) 25 | self.register_attribute('fractal', (2, 0.)) 26 | 27 | # REGISTER METHODS 28 | self.register_method( 29 | 'thru', 30 | in_channels=1, 31 | in_ratio=1, 32 | out_channels=1, 33 | out_ratio=1, 34 | input_labels=['(signal) input signal'], 35 | output_labels=['(signal) output signal'], 36 | ) 37 | self.register_method( 38 | 'invert', 39 | in_channels=1, 40 | in_ratio=1, 41 | out_channels=1, 42 | out_ratio=1, 43 | input_labels=['(signal) input signal'], 44 | output_labels=['(signal) output signal'], 45 | ) 46 | self.register_method( 47 | 'add', 48 | in_channels=2, 49 | in_ratio=1, 50 | out_channels=1, 51 | out_ratio=1, 52 | input_labels=['(signal) first signal', '(signal) second signal'], 53 | output_labels=['(signal) output signal'], 54 | ) 55 | self.register_method( 56 | 'saturate', 57 | in_channels=1, 58 | in_ratio=1, 59 | out_channels=1, 60 | out_ratio=1, 61 | input_labels=['(signal) signal to saturate'], 62 | output_labels=['(signal) saturated signal'], 63 | ) 64 | self.register_method( 65 | 'midside', 66 | in_channels=2, 67 | in_ratio=1, 68 | out_channels=2, 69 | out_ratio=1, 70 | input_labels=['(signal) L channel', '(signal) R channel'], 71 | output_labels=['(signal) Mid channel', '(signal) Side channel'], 72 | ) 73 | self.register_method( 74 | 'rms', 75 | in_channels=1, 76 | in_ratio=1, 77 | out_channels=1, 78 | out_ratio=1024, 79 | input_labels=['(signal) signal to monitor'], 80 | output_labels=['(signal) rms value'], 81 | ) 82 | self.register_method( 83 | 'polynomial', 84 | in_channels=1, 85 | in_ratio=1, 86 | out_channels=1, 87 | out_ratio=1, 88 | input_labels=['(signal) signal to distort'], 89 | output_labels=['(signal) distorted signal'], 90 | ) 91 | 92 | self.register_method( 93 | 'fractalize', 94 | in_channels=1, 95 | in_ratio=512, 96 | out_channels=1, 97 | out_ratio=512, 98 | input_labels=['(signal) signal to replicate'], 99 | output_labels=['(signal) fractalized signal'], 100 | ) 101 | 102 | @torch.jit.export 103 | def thru(self, x: torch.Tensor): 104 | return x 105 | 106 | # defining main methods 107 | @torch.jit.export 108 | def invert(self, x: torch.Tensor): 109 | if self.invert_signal[0]: 110 | return x 111 | else: 112 | return -x 113 | 114 | @torch.jit.export 115 | def add(self, x: torch.Tensor): 116 | return x.sum(-2, keepdim=True) / 2 117 | 118 | @torch.jit.export 119 | def fractalize(self, x: torch.Tensor): 120 | fractal_order = int(self.fractal[0]) 121 | fractal_amount = float(self.fractal[1]) 122 | downsampled_signal = x[..., ::fractal_order] 123 | return x 124 | 125 | @torch.jit.export 126 | def polynomial(self, x: torch.Tensor): 127 | out = torch.zeros_like(x) 128 | for i in range(4): 129 | out += self.polynomial_factors[i] * x.pow(i + 1) 130 | return out 131 | 132 | @torch.jit.export 133 | def saturate(self, x: torch.Tensor): 134 | saturate_mode = self.saturate_mode[0] 135 | if saturate_mode == 'tanh': 136 | return torch.tanh(x * self.gain_factor[0]) 137 | elif saturate_mode == 'clip': 138 | return torch.clamp(x * self.gain_factor[0], -1, 1) 139 | 140 | @torch.jit.export 141 | def midside(self, x: torch.Tensor): 142 | l, r = x[..., 0, :], x[..., 1, :] 143 | return torch.stack([(l + r) / 2, (l - r) / 2], dim=-2) 144 | 145 | @torch.jit.export 146 | def rms(self, x: torch.Tensor): 147 | x = x.reshape(x.shape[0], x.shape[1], 1024, -1) 148 | rms = x.pow(2).sum(-2).sqrt() / x.size(-1) 149 | return rms 150 | 151 | # defining attribute getters 152 | # WARNING : typing the function's ouptut is mandatory 153 | @torch.jit.export 154 | def get_gain_factor(self) -> float: 155 | return float(self.gain_factor[0]) 156 | 157 | @torch.jit.export 158 | def get_polynomial_factors(self) -> List[float]: 159 | polynomial_factors: List[float] = [] 160 | for p in self.polynomial_factors: 161 | polynomial_factors.append(float(p)) 162 | return polynomial_factors 163 | 164 | @torch.jit.export 165 | def get_saturate_mode(self) -> str: 166 | return self.saturate_mode[0] 167 | 168 | @torch.jit.export 169 | def get_invert_signal(self) -> bool: 170 | return self.invert_signal[0] 171 | 172 | @torch.jit.export 173 | def get_fractal(self) -> Tuple[int, float]: 174 | return (int(self.fractal[0]), float(self.fractal[1])) 175 | 176 | # defining attribute setter 177 | # setters must return an error code : 178 | # return 0 if the attribute has been adequately set, 179 | # return -1 if the attribute was wrong. 180 | @torch.jit.export 181 | def set_gain_factor(self, x: float) -> int: 182 | self.gain_factor = (x, ) 183 | return 0 184 | 185 | @torch.jit.export 186 | def set_polynomial_factors(self, factor1: float, factor2: float, 187 | factor3: float, factor4: float) -> int: 188 | factors = (factor1, factor2, factor3, factor4) 189 | self.polynomial_factors = factors 190 | return 0 191 | 192 | @torch.jit.export 193 | def set_saturate_mode(self, x: str): 194 | if (x == 'tanh') or (x == 'clip'): 195 | self.saturate_mode = (x, ) 196 | return 0 197 | else: 198 | return -1 199 | 200 | @torch.jit.export 201 | def set_invert_signal(self, x: bool): 202 | self.invert_signal = (x, ) 203 | return 0 204 | 205 | @torch.jit.export 206 | def set_fractal(self, factor: int, amount: float): 207 | if factor <= 0: 208 | return -1 209 | elif factor % 2 != 0: 210 | return -1 211 | self.fractal = (factor, float(amount)) 212 | return 0 213 | 214 | if __name__ == '__main__': 215 | model = AudioUtils() 216 | model.export_to_ts('effects.ts') -------------------------------------------------------------------------------- /python_tools/test/test_attributes.maxpat: -------------------------------------------------------------------------------- 1 | { 2 | "patcher" : { 3 | "fileversion" : 1, 4 | "appversion" : { 5 | "major" : 8, 6 | "minor" : 6, 7 | "revision" : 5, 8 | "architecture" : "x64", 9 | "modernui" : 1 10 | } 11 | , 12 | "classnamespace" : "box", 13 | "rect" : [ 607.0, 354.0, 767.0, 480.0 ], 14 | "bglocked" : 0, 15 | "openinpresentation" : 0, 16 | "default_fontsize" : 12.0, 17 | "default_fontface" : 0, 18 | "default_fontname" : "Arial", 19 | "gridonopen" : 1, 20 | "gridsize" : [ 15.0, 15.0 ], 21 | "gridsnaponopen" : 1, 22 | "objectsnaponopen" : 1, 23 | "statusbarvisible" : 2, 24 | "toolbarvisible" : 1, 25 | "lefttoolbarpinned" : 0, 26 | "toptoolbarpinned" : 0, 27 | "righttoolbarpinned" : 0, 28 | "bottomtoolbarpinned" : 0, 29 | "toolbars_unpinned_last_save" : 0, 30 | "tallnewobj" : 0, 31 | "boxanimatetime" : 200, 32 | "enablehscroll" : 1, 33 | "enablevscroll" : 1, 34 | "devicewidth" : 0.0, 35 | "description" : "", 36 | "digest" : "", 37 | "tags" : "", 38 | "style" : "", 39 | "subpatcher_template" : "", 40 | "assistshowspatchername" : 0, 41 | "boxes" : [ { 42 | "box" : { 43 | "fontface" : 0, 44 | "fontname" : "Arial", 45 | "fontsize" : 12.0, 46 | "id" : "obj-4", 47 | "maxclass" : "number~", 48 | "mode" : 2, 49 | "numinlets" : 2, 50 | "numoutlets" : 2, 51 | "outlettype" : [ "signal", "float" ], 52 | "patching_rect" : [ 153.666666666666686, 278.0, 56.0, 22.0 ], 53 | "sig" : 0.0 54 | } 55 | 56 | } 57 | , { 58 | "box" : { 59 | "fontface" : 0, 60 | "fontname" : "Arial", 61 | "fontsize" : 12.0, 62 | "id" : "obj-2", 63 | "maxclass" : "number~", 64 | "mode" : 2, 65 | "numinlets" : 2, 66 | "numoutlets" : 2, 67 | "outlettype" : [ "signal", "float" ], 68 | "patching_rect" : [ 69.0, 278.0, 56.0, 22.0 ], 69 | "sig" : 0.0 70 | } 71 | 72 | } 73 | , { 74 | "box" : { 75 | "id" : "obj-19", 76 | "maxclass" : "message", 77 | "numinlets" : 2, 78 | "numoutlets" : 1, 79 | "outlettype" : [ "" ], 80 | "patching_rect" : [ 602.0, 154.0, 75.0, 22.0 ], 81 | "text" : "get attr_bool" 82 | } 83 | 84 | } 85 | , { 86 | "box" : { 87 | "id" : "obj-18", 88 | "maxclass" : "message", 89 | "numinlets" : 2, 90 | "numoutlets" : 1, 91 | "outlettype" : [ "" ], 92 | "patching_rect" : [ 525.0, 154.0, 66.0, 22.0 ], 93 | "text" : "get attr_str" 94 | } 95 | 96 | } 97 | , { 98 | "box" : { 99 | "id" : "obj-17", 100 | "maxclass" : "message", 101 | "numinlets" : 2, 102 | "numoutlets" : 1, 103 | "outlettype" : [ "" ], 104 | "patching_rect" : [ 454.0, 154.0, 65.0, 22.0 ], 105 | "text" : "get attr_int" 106 | } 107 | 108 | } 109 | , { 110 | "box" : { 111 | "id" : "obj-16", 112 | "maxclass" : "message", 113 | "numinlets" : 2, 114 | "numoutlets" : 1, 115 | "outlettype" : [ "" ], 116 | "patching_rect" : [ 378.0, 154.0, 65.0, 22.0 ], 117 | "text" : "get attr_int" 118 | } 119 | 120 | } 121 | , { 122 | "box" : { 123 | "id" : "obj-14", 124 | "maxclass" : "message", 125 | "numinlets" : 2, 126 | "numoutlets" : 1, 127 | "outlettype" : [ "" ], 128 | "patching_rect" : [ 419.0, 92.0, 91.0, 22.0 ], 129 | "text" : "set attr_bool $1" 130 | } 131 | 132 | } 133 | , { 134 | "box" : { 135 | "id" : "obj-13", 136 | "maxclass" : "message", 137 | "numinlets" : 2, 138 | "numoutlets" : 1, 139 | "outlettype" : [ "" ], 140 | "patching_rect" : [ 296.0, 92.0, 82.0, 22.0 ], 141 | "text" : "set attr_str $1" 142 | } 143 | 144 | } 145 | , { 146 | "box" : { 147 | "id" : "obj-12", 148 | "maxclass" : "message", 149 | "numinlets" : 2, 150 | "numoutlets" : 1, 151 | "outlettype" : [ "" ], 152 | "patching_rect" : [ 174.0, 92.0, 91.0, 22.0 ], 153 | "text" : "set attr_float $1" 154 | } 155 | 156 | } 157 | , { 158 | "box" : { 159 | "id" : "obj-11", 160 | "maxclass" : "message", 161 | "numinlets" : 2, 162 | "numoutlets" : 1, 163 | "outlettype" : [ "" ], 164 | "patching_rect" : [ 69.0, 92.0, 81.0, 22.0 ], 165 | "text" : "set attr_int $1" 166 | } 167 | 168 | } 169 | , { 170 | "box" : { 171 | "id" : "obj-9", 172 | "maxclass" : "toggle", 173 | "numinlets" : 1, 174 | "numoutlets" : 1, 175 | "outlettype" : [ "int" ], 176 | "parameter_enable" : 0, 177 | "patching_rect" : [ 419.0, 37.0, 24.0, 24.0 ] 178 | } 179 | 180 | } 181 | , { 182 | "box" : { 183 | "id" : "obj-7", 184 | "maxclass" : "message", 185 | "numinlets" : 2, 186 | "numoutlets" : 1, 187 | "outlettype" : [ "" ], 188 | "patching_rect" : [ 296.0, 49.0, 42.0, 22.0 ], 189 | "text" : "cherry" 190 | } 191 | 192 | } 193 | , { 194 | "box" : { 195 | "format" : 6, 196 | "id" : "obj-5", 197 | "maxclass" : "flonum", 198 | "numinlets" : 1, 199 | "numoutlets" : 2, 200 | "outlettype" : [ "", "bang" ], 201 | "parameter_enable" : 0, 202 | "patching_rect" : [ 174.0, 49.0, 50.0, 22.0 ] 203 | } 204 | 205 | } 206 | , { 207 | "box" : { 208 | "id" : "obj-3", 209 | "maxclass" : "number", 210 | "numinlets" : 1, 211 | "numoutlets" : 2, 212 | "outlettype" : [ "", "bang" ], 213 | "parameter_enable" : 0, 214 | "patching_rect" : [ 69.0, 44.0, 50.0, 22.0 ] 215 | } 216 | 217 | } 218 | , { 219 | "box" : { 220 | "id" : "obj-1", 221 | "maxclass" : "newobj", 222 | "numinlets" : 1, 223 | "numoutlets" : 4, 224 | "outlettype" : [ "signal", "signal", "signal", "signal" ], 225 | "patching_rect" : [ 69.0, 214.0, 273.0, 22.0 ], 226 | "text" : "nn~ test_attributes[AttributeFoo]" 227 | } 228 | 229 | } 230 | ], 231 | "lines" : [ { 232 | "patchline" : { 233 | "destination" : [ "obj-2", 0 ], 234 | "source" : [ "obj-1", 0 ] 235 | } 236 | 237 | } 238 | , { 239 | "patchline" : { 240 | "destination" : [ "obj-4", 0 ], 241 | "source" : [ "obj-1", 1 ] 242 | } 243 | 244 | } 245 | , { 246 | "patchline" : { 247 | "destination" : [ "obj-1", 0 ], 248 | "source" : [ "obj-11", 0 ] 249 | } 250 | 251 | } 252 | , { 253 | "patchline" : { 254 | "destination" : [ "obj-1", 0 ], 255 | "source" : [ "obj-12", 0 ] 256 | } 257 | 258 | } 259 | , { 260 | "patchline" : { 261 | "destination" : [ "obj-1", 0 ], 262 | "source" : [ "obj-13", 0 ] 263 | } 264 | 265 | } 266 | , { 267 | "patchline" : { 268 | "destination" : [ "obj-1", 0 ], 269 | "source" : [ "obj-14", 0 ] 270 | } 271 | 272 | } 273 | , { 274 | "patchline" : { 275 | "destination" : [ "obj-1", 0 ], 276 | "source" : [ "obj-16", 0 ] 277 | } 278 | 279 | } 280 | , { 281 | "patchline" : { 282 | "destination" : [ "obj-1", 0 ], 283 | "source" : [ "obj-17", 0 ] 284 | } 285 | 286 | } 287 | , { 288 | "patchline" : { 289 | "destination" : [ "obj-1", 0 ], 290 | "source" : [ "obj-18", 0 ] 291 | } 292 | 293 | } 294 | , { 295 | "patchline" : { 296 | "destination" : [ "obj-1", 0 ], 297 | "source" : [ "obj-19", 0 ] 298 | } 299 | 300 | } 301 | , { 302 | "patchline" : { 303 | "destination" : [ "obj-11", 0 ], 304 | "source" : [ "obj-3", 0 ] 305 | } 306 | 307 | } 308 | , { 309 | "patchline" : { 310 | "destination" : [ "obj-12", 0 ], 311 | "source" : [ "obj-5", 0 ] 312 | } 313 | 314 | } 315 | , { 316 | "patchline" : { 317 | "destination" : [ "obj-13", 0 ], 318 | "source" : [ "obj-7", 0 ] 319 | } 320 | 321 | } 322 | , { 323 | "patchline" : { 324 | "destination" : [ "obj-14", 0 ], 325 | "source" : [ "obj-9", 0 ] 326 | } 327 | 328 | } 329 | ], 330 | "dependency_cache" : [ { 331 | "name" : "nn~.mxo", 332 | "type" : "iLaX" 333 | } 334 | ], 335 | "autosave" : 0 336 | } 337 | 338 | } 339 | -------------------------------------------------------------------------------- /src/frontend/puredata/nn_tilde/nn~-help.pd: -------------------------------------------------------------------------------- 1 | #N canvas 9 53 1467 851 12; 2 | #X obj 10 51 cnv 1 960 1 empty empty empty 8 12 0 13 #000000 #000000 0; 3 | #X obj 22 18 nn~; 4 | #X text 59 18 - real-time ai audio processing; 5 | #X obj 574 67 bng 19 250 50 0 \$0-browse-vschaos2 empty IRCAM\ vschaos2\ models 24 9 0 12 #fcfcfc #000000 #000000; 6 | #X obj 768 92 bng 19 250 50 0 \$0-browse-rave-iil empty iiL\ RAVE\ models 24 9 0 12 #fcfcfc #000000 #000000; 7 | #X obj 574 92 bng 19 250 50 0 \$0-browse-rave-ircam empty IRCAM\ RAVE\ models 24 9 0 12 #fcfcfc #000000 #000000; 8 | #N canvas 38 607 670 504 guts 0; 9 | #X obj 57 272 pdcontrol; 10 | #X msg 60 162 browse https://acids-ircam.github.io/rave_models_download; 11 | #X msg 60 222 browse https://www.dropbox.com/sh/avdeiza7c6bn2of/AAAGZsnRo9ZVMa0iFhouCBL-a?dl=0, f 80; 12 | #X obj 60 198 r \$0-browse-vschaos2; 13 | #X obj 60 138 r \$0-browse-rave-ircam; 14 | #X obj 61 73 r \$0-browse-rave-iil; 15 | #X msg 61 98 browse https://huggingface.co/Intelligent-Instruments-Lab/rave-models, f 69; 16 | #X obj 84 335 loadbang; 17 | #X msg 84 359 0; 18 | #X obj 84 383 s \$0-enable; 19 | #X connect 1 0 0 0; 20 | #X connect 2 0 0 0; 21 | #X connect 3 0 2 0; 22 | #X connect 4 0 1 0; 23 | #X connect 5 0 6 0; 24 | #X connect 6 0 0 0; 25 | #X connect 7 0 8 0; 26 | #X connect 8 0 9 0; 27 | #X restore 901 800 pd guts; 28 | #X msg 24 659 gpu \$1; 29 | #X obj 24 635 tgl 19 0 empty empty empty 0 -10 0 12 #fcfcfc #000000 #000000 0 1; 30 | #X text 545 621 <-- latent space; 31 | #X obj 419 549 vsl 19 162 -10 10 0 0 empty empty empty 0 -9 0 12 #fcfcfc #000000 #000000 0 1; 32 | #X obj 506 252 tgl 19 0 \$0-enable empty empty 0 -10 0 12 #dfdfdf #000000 #000000 0 1; 33 | #X text 394 540 10; 34 | #X text 401 621 0; 35 | #X text 387 703 -10; 36 | #X obj 450 549 vsl 19 162 -10 10 0 0 empty empty empty 0 -9 0 12 #fcfcfc #000000 #000000 0 1; 37 | #X obj 482 549 vsl 19 162 -10 10 0 0 empty empty empty 0 -9 0 12 #fcfcfc #000000 #000000 0 1; 38 | #X obj 514 549 vsl 19 162 -10 10 0 0 empty empty empty 0 -9 0 12 #fcfcfc #000000 #000000 0 1; 39 | #X obj 419 276 noise~; 40 | #X obj 506 300 f; 41 | #X obj 506 348 mtof; 42 | #X obj 419 800 dac~; 43 | #X obj 506 372 lop~ 12; 44 | #X obj 584 378 +~ 3; 45 | #X obj 419 397 bob~; 46 | #X obj 584 354 *~ 3; 47 | #X obj 437 324 line~; 48 | #X obj 419 373 *~; 49 | #X obj 437 348 pow~ 2; 50 | #X obj 536 300 + 4; 51 | #X obj 506 324 + 50; 52 | #X obj 566 300 mod 41; 53 | #X obj 584 330 osc~ 7; 54 | #X text 21 65 At its core \, nn~ is a translation layer between Pure Data and the libtorch C++ interface for deep learning. Alone \, nn~ is like an empty shell \, and requires pretrained models to operate. You can find a few models here:, f 74; 55 | #X obj 697 378 dac~; 56 | #X text 21 297 optional leading flags:; 57 | #X obj 24 800 s \$0-nn; 58 | #X msg 24 752 bufsize 0; 59 | #X msg 24 728 bufsize 4096; 60 | #X text 120 727 <-- default; 61 | #X obj 506 276 metro 150; 62 | #X text 29 238 3rd :; 63 | #X text 29 203 2nd :; 64 | #X text 72 203 - optional \, defaults to "forward", f 21; 65 | #X text 29 183 1st :; 66 | #X text 73 183 ; 67 | #X obj 267 635 tgl 19 0 empty \$0-enable empty 0 -10 0 12 #fcfcfc #000000 #000000 0 1; 68 | #X msg 267 659 enable \$1; 69 | #X obj 697 252 tgl 19 0 empty empty empty 0 -10 0 12 #dfdfdf #000000 #000000 0 1; 70 | #X text 29 326 -m :; 71 | #X text 69 326 activate multichannel mode (in- and output signal for all methods combined in 1 inlet and 1 outlet), f 33; 72 | #X text 29 374 -g :; 73 | #X text 68 374 activate GPU mode (if available); 74 | #X text 29 395 -d :; 75 | #X text 68 395 initialize in disabled state; 76 | #X msg 24 473 load nasa; 77 | #X obj 613 444 r \$0-nn; 78 | #X obj 614 737 r \$0-nn; 79 | #X obj 419 732 snake~ in 4; 80 | #X obj 24 683 s \$0-nn; 81 | #X obj 267 683 s \$0-nn; 82 | #X obj 24 497 s \$0-nn; 83 | #X obj 605 800 print; 84 | #X text 413 223 timbre transfer; 85 | #X text 695 223 decoder-only; 86 | #X obj 267 800 s \$0-nn; 87 | #X obj 267 775 bng 19 250 50 0 empty empty empty 0 -10 0 12 #fcfcfc #000000 #000000; 88 | #X text 264 725 bang to output state on info outlet, f 14; 89 | #X text 22 151 CREATION ARGS:; 90 | #X text 23 442 MESSAGES:; 91 | #X text 72 238 - optional \, defaults to 4096 0 sets no-thread-mode \, resulting in lower latency, f 34; 92 | #X text 650 800 <-- check info log on bang; 93 | #X msg 24 542 reload; 94 | #X msg 24 566 method forward; 95 | #X obj 697 587 dac~; 96 | #X obj 697 444 tgl 19 0 empty empty empty 0 -10 0 12 #dfdfdf #000000 #000000 0 1; 97 | #N canvas 237 262 668 332 snapshotall 0; 98 | #X obj 49 158 snapshot~; 99 | #X obj 139 158 snapshot~; 100 | #X obj 230 158 snapshot~; 101 | #X obj 323 158 snapshot~; 102 | #X obj 49 182 outlet; 103 | #X obj 139 182 outlet; 104 | #X obj 230 182 outlet; 105 | #X obj 323 182 outlet; 106 | #X obj 49 44 inlet~; 107 | #X obj 404 110 route enable; 108 | #X obj 404 139 metro 25; 109 | #X obj 404 86 r \$0-nn; 110 | #X obj 49 101 snake~ out 4; 111 | #X connect 0 0 4 0; 112 | #X connect 1 0 5 0; 113 | #X connect 2 0 6 0; 114 | #X connect 3 0 7 0; 115 | #X connect 8 0 12 0; 116 | #X connect 9 0 10 0; 117 | #X connect 10 0 2 0; 118 | #X connect 10 0 3 0; 119 | #X connect 10 0 1 0; 120 | #X connect 10 0 0 0; 121 | #X connect 11 0 9 0; 122 | #X connect 12 0 0 0; 123 | #X connect 12 1 1 0; 124 | #X connect 12 2 2 0; 125 | #X connect 12 3 3 0; 126 | #X restore 419 509 pd snapshotall; 127 | #X msg 24 590 set ; 128 | #X text 695 416 prior; 129 | #X msg 697 276 enable \$1; 130 | #X obj 697 525 nn~ -m -g; 131 | #X obj 697 563 nn~ -m -g; 132 | #X msg 697 492 enable \$1; 133 | #X obj 771 468 sel 1; 134 | #X obj 697 468 t f f; 135 | #X text 73 542 reload model; 136 | #X msg 24 776 bufsize 16384; 137 | #X text 97 752 <-- no-thread-mode; 138 | #X text 197 584 see info log for attributes, f 14; 139 | #X text 133 565 switch method (multichannel only); 140 | #X text 80 651 activate if available (check Pd's CPU meter), f 22; 141 | #X msg 771 530 load vintage \, method decode \, bufsize 8192, f 20; 142 | #X text 411 151 EXAMPLES (these require the percussion.ts and vintage.ts models from the IRCAM RAVE collection above):, f 38; 143 | #X msg 437 300 1 \, 0 200; 144 | #X obj 770 314 osc~ 5.1; 145 | #X obj 835 314 osc~ 7.1; 146 | #X msg 771 492 load vintage \, method prior \, bufsize 8192, f 20; 147 | #X obj 900 314 osc~ 9.1; 148 | #X obj 705 314 osc~ 4.1; 149 | #X text 574 645 combining "encode" and "decode" objects like this without manipulation of the latent trajectory is similar to using the "forward" method. number of in- and output channels depends on the model. see "dim" output on info outlet., f 52; 150 | #X text 100 466 dynamically load another model (multichannel only), f 30; 151 | #X msg 1062 233 print_available_models; 152 | #X obj 1062 276 nn~; 153 | #X obj 1060 392 nn~; 154 | #X text 1055 151 DIRECT DOWNLOAD you can directly download nn~ compatible models within nn~., f 37; 155 | #X msg 1081 357 remove nasa; 156 | #X obj 419 463 nn~ -m -g nasa encode; 157 | #X obj 420 756 nn~ -m -g nasa decode; 158 | #X obj 697 338 nn~ -d -g nasa decode; 159 | #X msg 1060 325 download ircam/rave/nasa; 160 | #X connect 7 0 59 0; 161 | #X connect 8 0 7 0; 162 | #X connect 10 0 58 0; 163 | #X connect 11 0 40 0; 164 | #X connect 15 0 58 1; 165 | #X connect 16 0 58 2; 166 | #X connect 17 0 58 3; 167 | #X connect 18 0 27 0; 168 | #X connect 19 0 29 0; 169 | #X connect 19 0 30 0; 170 | #X connect 20 0 22 0; 171 | #X connect 22 0 24 1; 172 | #X connect 23 0 24 2; 173 | #X connect 24 0 106 0; 174 | #X connect 25 0 23 0; 175 | #X connect 26 0 28 0; 176 | #X connect 27 0 24 0; 177 | #X connect 28 0 27 1; 178 | #X connect 29 0 31 0; 179 | #X connect 30 0 20 0; 180 | #X connect 31 0 19 1; 181 | #X connect 32 0 25 0; 182 | #X connect 37 0 36 0; 183 | #X connect 38 0 36 0; 184 | #X connect 40 0 19 0; 185 | #X connect 40 0 93 0; 186 | #X connect 46 0 47 0; 187 | #X connect 47 0 60 0; 188 | #X connect 48 0 79 0; 189 | #X connect 55 0 61 0; 190 | #X connect 56 0 106 0; 191 | #X connect 57 0 107 0; 192 | #X connect 58 0 107 0; 193 | #X connect 66 0 65 0; 194 | #X connect 75 0 84 0; 195 | #X connect 76 0 10 0; 196 | #X connect 76 1 15 0; 197 | #X connect 76 2 16 0; 198 | #X connect 76 3 17 0; 199 | #X connect 79 0 108 0; 200 | #X connect 80 0 81 0; 201 | #X connect 81 0 74 0; 202 | #X connect 82 0 80 0; 203 | #X connect 82 0 81 0; 204 | #X connect 83 0 96 0; 205 | #X connect 83 0 91 0; 206 | #X connect 84 0 82 0; 207 | #X connect 84 1 83 0; 208 | #X connect 86 0 36 0; 209 | #X connect 91 0 81 0; 210 | #X connect 93 0 26 0; 211 | #X connect 94 0 108 1; 212 | #X connect 95 0 108 2; 213 | #X connect 96 0 80 0; 214 | #X connect 97 0 108 4; 215 | #X connect 98 0 108 0; 216 | #X connect 101 0 102 0; 217 | #X connect 105 0 103 0; 218 | #X connect 106 0 76 0; 219 | #X connect 107 0 21 0; 220 | #X connect 107 1 62 0; 221 | #X connect 108 0 34 0; 222 | #X connect 108 1 34 1; 223 | #X connect 109 0 103 0; 224 | -------------------------------------------------------------------------------- /scripting/unmix.py: -------------------------------------------------------------------------------- 1 | # 2 | # NN~ - Scripting library 3 | # unmix.py : Advanced scripting example for integrating a deep waveform-to-waveform model. 4 | # 5 | # We provide here a simple example of how to use nn~ in order to transform incoming audio. 6 | # In this example, we do not rely on any ML model, but simply apply effects on input buffers. 7 | # 8 | # ACIDS - IRCAM : Philippe Esling, Axel Chemla--Romeu-Santos, Antoine Caillon 9 | # 10 | 11 | # System imports 12 | from typing import List, Tuple 13 | import os 14 | import math 15 | # Pytorch imports 16 | import torch 17 | import torch.nn as nn 18 | import torch 19 | import torchaudio 20 | # NN~ imports 21 | import nn_tilde 22 | 23 | class Unmix(nn_tilde.Module): 24 | 25 | def __init__(self, 26 | pretrained): 27 | super().__init__() 28 | # REGISTER ATTRIBUTES 29 | self.register_attribute('sr', 44100) 30 | self.pretrained = pretrained 31 | 32 | # REGISTER METHODS 33 | self.register_method( 34 | 'forward', 35 | in_channels=1, 36 | in_ratio=1, 37 | out_channels=4, 38 | out_ratio=1, 39 | input_labels=['(signal) signal to monitor'], 40 | output_labels=['drums', 'bass', 'vocals', 'others'], 41 | ) 42 | 43 | @torch.jit.export 44 | def forward(self, input: torch.Tensor): 45 | # Preprocess the input buffer (representation) 46 | in_r = preprocess(input, int(self.sr[0]), int(self.pretrained.sample_rate)) 47 | # Pass through the deep audio separation 48 | out = self.pretrained(in_r) 49 | # Return the separated channels 50 | return out.mean(dim=2) 51 | 52 | # defining attribute getters 53 | # WARNING : typing the function's ouptut is mandatory 54 | @torch.jit.export 55 | def get_sr(self) -> int: 56 | return int(self.sr[0]) 57 | 58 | # defining attribute setter 59 | # setters must return an error code : 60 | # return 0 if the attribute has been adequately set, 61 | # return -1 if the attribute was wrong. 62 | @torch.jit.export 63 | def set_sr(self, x: int) -> int: 64 | self.sr = (x, ) 65 | return 0 66 | 67 | def preprocess( 68 | audio: torch.Tensor, 69 | rate: int, 70 | model_rate: int, 71 | ) -> torch.Tensor: 72 | """ 73 | From an input tensor, convert it to a tensor of shape 74 | shape=(nb_samples, nb_channels, nb_timesteps). This includes: 75 | - if input is 1D, adding the samples and channels dimensions. 76 | - if input is 2D 77 | o and the smallest dimension is 1 or 2, adding the samples one. 78 | o and all dimensions are > 2, assuming the smallest is the samples 79 | one, and adding the channel one 80 | - at the end, if the number of channels is greater than the number 81 | of time steps, swap those two. 82 | - resampling to target rate if necessary 83 | 84 | Args: 85 | audio (Tensor): input waveform 86 | rate (float): sample rate for the audio 87 | model_rate (float): sample rate for the model 88 | 89 | Returns: 90 | Tensor: [shape=(nb_samples, nb_channels=2, nb_timesteps)] 91 | """ 92 | shape = torch.as_tensor(audio.shape, device=audio.device) 93 | 94 | if len(shape) == 1: 95 | # assuming only time dimension is provided. 96 | audio = audio[None, None, ...] 97 | elif len(shape) == 2: 98 | if shape.min() <= 2: 99 | # assuming sample dimension is missing 100 | audio = audio[None, ...] 101 | else: 102 | # assuming channel dimension is missing 103 | audio = audio[:, None, ...] 104 | if audio.shape[1] > audio.shape[2]: 105 | # swapping channel and time 106 | audio = audio.transpose(1, 2) 107 | if audio.shape[1] > 2: 108 | audio = audio[..., :2] 109 | 110 | if audio.shape[1] == 1: 111 | # if we have mono, we duplicate it to get stereo 112 | audio = torch.repeat_interleave(audio, 2, dim=1) 113 | 114 | if rate != model_rate: 115 | # we have to resample to model samplerate if needed 116 | # this makes sure we resample input only once 117 | audio = torchaudio.functional.resample(audio, 118 | orig_freq=rate, new_freq=model_rate, resampling_method="sinc_interpolation" 119 | ).to(audio.device) 120 | return audio 121 | 122 | if __name__ == '__main__': 123 | pretrained = torch.jit.load("unmix.pt") # Pretrained weights 124 | model = Unmix(pretrained) 125 | model.export_to_ts('unmix.ts') -------------------------------------------------------------------------------- /src/source/unmix.py: -------------------------------------------------------------------------------- 1 | # 2 | # NN~ - Scripting library 3 | # unmix.py : Advanced scripting example for integrating a deep waveform-to-waveform model. 4 | # 5 | # We provide here a simple example of how to use nn~ in order to transform incoming audio. 6 | # In this example, we do not rely on any ML model, but simply apply effects on input buffers. 7 | # 8 | # ACIDS - IRCAM : Philippe Esling, Axel Chemla--Romeu-Santos, Antoine Caillon 9 | # 10 | 11 | # System imports 12 | from typing import List, Tuple 13 | import os 14 | import math 15 | # Pytorch imports 16 | import torch 17 | import torch.nn as nn 18 | import torch 19 | import torchaudio 20 | # NN~ imports 21 | import nn_tilde 22 | 23 | class Unmix(nn_tilde.Module): 24 | 25 | def __init__(self, 26 | pretrained): 27 | super().__init__() 28 | # REGISTER ATTRIBUTES 29 | self.register_attribute('sr', 44100) 30 | self.pretrained = pretrained 31 | 32 | # REGISTER METHODS 33 | self.register_method( 34 | 'forward', 35 | in_channels=1, 36 | in_ratio=1, 37 | out_channels=4, 38 | out_ratio=1, 39 | input_labels=['(signal) signal to monitor'], 40 | output_labels=['drums', 'bass', 'vocals', 'others'], 41 | ) 42 | 43 | @torch.jit.export 44 | def forward(self, input: torch.Tensor): 45 | # Preprocess the input buffer (representation) 46 | in_r = preprocess(input, int(self.sr[0]), int(self.pretrained.sample_rate)) 47 | # Pass through the deep audio separation 48 | out = self.pretrained(in_r) 49 | # Return the separated channels 50 | return out.mean(dim=2) 51 | 52 | # defining attribute getters 53 | # WARNING : typing the function's ouptut is mandatory 54 | @torch.jit.export 55 | def get_sr(self) -> int: 56 | return int(self.sr[0]) 57 | 58 | # defining attribute setter 59 | # setters must return an error code : 60 | # return 0 if the attribute has been adequately set, 61 | # return -1 if the attribute was wrong. 62 | @torch.jit.export 63 | def set_sr(self, x: int) -> int: 64 | self.sr = (x, ) 65 | return 0 66 | 67 | def preprocess( 68 | audio: torch.Tensor, 69 | rate: int, 70 | model_rate: int, 71 | ) -> torch.Tensor: 72 | """ 73 | From an input tensor, convert it to a tensor of shape 74 | shape=(nb_samples, nb_channels, nb_timesteps). This includes: 75 | - if input is 1D, adding the samples and channels dimensions. 76 | - if input is 2D 77 | o and the smallest dimension is 1 or 2, adding the samples one. 78 | o and all dimensions are > 2, assuming the smallest is the samples 79 | one, and adding the channel one 80 | - at the end, if the number of channels is greater than the number 81 | of time steps, swap those two. 82 | - resampling to target rate if necessary 83 | 84 | Args: 85 | audio (Tensor): input waveform 86 | rate (float): sample rate for the audio 87 | model_rate (float): sample rate for the model 88 | 89 | Returns: 90 | Tensor: [shape=(nb_samples, nb_channels=2, nb_timesteps)] 91 | """ 92 | shape = torch.as_tensor(audio.shape, device=audio.device) 93 | 94 | if len(shape) == 1: 95 | # assuming only time dimension is provided. 96 | audio = audio[None, None, ...] 97 | elif len(shape) == 2: 98 | if shape.min() <= 2: 99 | # assuming sample dimension is missing 100 | audio = audio[None, ...] 101 | else: 102 | # assuming channel dimension is missing 103 | audio = audio[:, None, ...] 104 | if audio.shape[1] > audio.shape[2]: 105 | # swapping channel and time 106 | audio = audio.transpose(1, 2) 107 | if audio.shape[1] > 2: 108 | audio = audio[..., :2] 109 | 110 | if audio.shape[1] == 1: 111 | # if we have mono, we duplicate it to get stereo 112 | audio = torch.repeat_interleave(audio, 2, dim=1) 113 | 114 | if rate != model_rate: 115 | # we have to resample to model samplerate if needed 116 | # this makes sure we resample input only once 117 | audio = torchaudio.functional.resample(audio, 118 | orig_freq=rate, new_freq=model_rate, resampling_method="sinc_interpolation" 119 | ).to(audio.device) 120 | return audio 121 | 122 | if __name__ == '__main__': 123 | pretrained = torch.jit.load("unmix.pt") # Pretrained weights 124 | model = Unmix(pretrained) 125 | model.export_to_ts('unmix.ts') -------------------------------------------------------------------------------- /src/frontend/puredata/nn_tilde/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10) 2 | project(nn_tilde_pd) 3 | 4 | set(CMAKE_CXX_STANDARD 20) 5 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 6 | set(CMAKE_CXX_EXTENSIONS OFF) 7 | 8 | find_package(Torch REQUIRED) 9 | 10 | file(GLOB SRC *.cpp) 11 | 12 | add_library(nn SHARED ${SRC}) 13 | 14 | if (MSVC) 15 | set_property(TARGET nn PROPERTY CXX_STANDARD 20) 16 | target_compile_features(nn PUBLIC "cxx_std_20") 17 | endif() 18 | 19 | # Get version from git for all platforms 20 | execute_process( 21 | COMMAND git describe --tags 22 | WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} 23 | OUTPUT_VARIABLE VERSION 24 | OUTPUT_STRIP_TRAILING_WHITESPACE 25 | ) 26 | if(VERSION) 27 | message(STATUS "Building version: ${VERSION}") 28 | add_definitions(-DVERSION="${VERSION}") 29 | endif() 30 | 31 | # COPY HELP FILES 32 | add_custom_command(TARGET nn POST_BUILD 33 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 34 | "${CMAKE_SOURCE_DIR}/frontend/puredata/nn_tilde/nn~-help.pd" 35 | "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/nn~-help.pd" 36 | COMMENT "Copy Help File" 37 | ) 38 | 39 | if (APPLE) 40 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") 41 | set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -undefined dynamic_lookup") 42 | 43 | add_custom_command( 44 | TARGET nn 45 | POST_BUILD 46 | COMMAND cp "${TORCH_INSTALL_PREFIX}/lib/*.dylib" "${CMAKE_CURRENT_BINARY_DIR}/" 47 | COMMENT "Copy Torch Libraries" 48 | ) 49 | 50 | set_target_properties(nn PROPERTIES 51 | PREFIX "" 52 | SUFFIX "~.pd_darwin" 53 | BUILD_WITH_INSTALL_RPATH FALSE 54 | LINK_FLAGS "-Wl,-rpath,@loader_path/" 55 | ) 56 | 57 | add_custom_command( 58 | TARGET nn 59 | POST_BUILD 60 | COMMAND ${CMAKE_SOURCE_DIR}/../env/bin/python ${CMAKE_SOURCE_DIR}/../install/dylib_fix.py -p "${CMAKE_CURRENT_BINARY_DIR}/*.pd_darwin" -l "${CMAKE_CURRENT_BINARY_DIR}/" "${CMAKE_BINARY_DIR}/_deps" "${CMAKE_SOURCE_DIR}/../env" 61 | COMMENT "Fixing libraries and codesigning" 62 | ) 63 | 64 | endif() 65 | 66 | 67 | function(resolve_symlink symlink_path resolved_path) 68 | execute_process( 69 | COMMAND readlink -f ${symlink_path} 70 | OUTPUT_VARIABLE resolved 71 | OUTPUT_STRIP_TRAILING_WHITESPACE 72 | ) 73 | set(${resolved_path} ${resolved} PARENT_SCOPE) 74 | endfunction() 75 | 76 | 77 | if (UNIX AND NOT APPLE) 78 | # set(TORCH_ESSENTIAL_LIBS 79 | # "libtorch.so*" 80 | # "libtorch_cpu.so*" 81 | # "libc10.so*" 82 | # "libgomp*so*" 83 | # "libtorch_global_deps.so*" 84 | # ) 85 | file(GLOB TORCH_ESSENTIAL_LIBS "${torch_dir}/libtorch/lib/*.so*") 86 | set(CURL_ESSENTIAL_LIBS 87 | "libnghttp2.so*" 88 | "libssh2.so*" 89 | "libssl.so*" 90 | "libkrb5.so*" 91 | "libk5crypto.so*" 92 | "libkrb5support.so*" 93 | "libcrypto.so*" 94 | "libgssapi_krb5.so*" 95 | "libzstd.so*" 96 | "libcom_err.so*" 97 | "libz.so*" 98 | "libcurl.so*" 99 | ) 100 | 101 | # Copy essential Torch libraries 102 | add_custom_target(copy_torch_libs) 103 | 104 | foreach(LIB_PATTR ${TORCH_ESSENTIAL_LIBS}) 105 | message("${LIB_PATTR} -> ${CMAKE_CURRENT_BINARY_DIR}") 106 | get_filename_component(LIB_NAME ${LIB_PATTR} NAME) 107 | add_custom_command( 108 | TARGET copy_torch_libs 109 | PRE_BUILD 110 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 111 | "${LIB_PATTR}" 112 | "${CMAKE_CURRENT_BINARY_DIR}/$(basename ${LIB_PATTR})" 113 | COMMENT "Copying ${LIB_NAME}" 114 | ) 115 | endforeach() 116 | 117 | # Copy essential curl libs 118 | foreach(LIB_PATTR ${CURL_ESSENTIAL_LIBS}) 119 | file(GLOB CURRENT_PATHS "${CMAKE_SOURCE_DIR}/../env/lib/${LIB_PATTR}") 120 | list(LENGTH CURRENT_PATHS N_PATHS) 121 | if (NOT ${N_PATHS} EQUAL 0) 122 | list(GET CURRENT_PATHS 0 LIB) 123 | resolve_symlink("${LIB}" original_path) 124 | get_filename_component(LIB_NAME ${LIB} NAME) 125 | message("${original_path} -> ${CMAKE_CURRENT_BINARY_DIR}/${LIB_NAME}") 126 | add_custom_command( 127 | TARGET copy_torch_libs 128 | PRE_BUILD 129 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 130 | "${original_path}" 131 | "${CMAKE_CURRENT_BINARY_DIR}/${LIB_NAME}" 132 | COMMENT "Copying ${LIB_NAME}" 133 | ) 134 | else() 135 | message("${LIB_PATTR} not found") 136 | endif() 137 | endforeach() 138 | 139 | add_dependencies(nn copy_torch_libs) 140 | 141 | set_target_properties(nn PROPERTIES 142 | PREFIX "" 143 | SUFFIX "~.pd_linux" 144 | BUILD_WITH_INSTALL_RPATH TRUE 145 | INSTALL_RPATH_USE_LINK_PATH TRUE 146 | INSTALL_RPATH "$ORIGIN" 147 | ) 148 | 149 | # Add libgomp as a link dependency 150 | target_link_libraries(nn PRIVATE gomp) 151 | endif() 152 | 153 | if(MSVC) 154 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT") 155 | 156 | # Base DLLs (these work for CPU version) 157 | set(REQUIRED_DLLS 158 | "torch_cpu.dll" 159 | "c10.dll" 160 | "fbgemm.dll" 161 | "libiomp5md.dll" 162 | "libiompstubs5md.dll" 163 | "uv.dll" 164 | "asmjit.dll" 165 | "torch.dll" 166 | "torch_global_deps.dll" 167 | ) 168 | 169 | # CUDA DLL setup 170 | if(CUDA_FOUND OR EXISTS "${TORCH_INSTALL_PREFIX}/lib/torch_cuda.dll") 171 | list(APPEND REQUIRED_DLLS 172 | # PyTorch CUDA DLLs 173 | "torch_cuda.dll" 174 | "c10_cuda.dll" 175 | # Core CUDA Runtime DLLs 176 | "cudart64_12.dll" 177 | # Additional CUDA DLLs 178 | "cudnn64_9.dll" 179 | "cudnn_graph64_9.dll" 180 | "cudnn_engines_precompiled64_9.dll" 181 | "cudnn_engines_runtime_compiled64_9.dll" 182 | "cudnn_heuristic64_9.dll" 183 | "nvrtc-builtins64_120.dll" 184 | "cudadevrt.dll" 185 | ) 186 | 187 | # VC Runtime handling 188 | if(DEFINED ENV{VCREDIST_PATH} AND EXISTS "$ENV{VCREDIST_PATH}/vcruntime140_1.dll") 189 | message(STATUS "Found VC Runtime at: $ENV{VCREDIST_PATH}") 190 | list(APPEND REQUIRED_DLLS "$ENV{VCREDIST_PATH}/vcruntime140_1.dll") 191 | else() 192 | message(WARNING "VC Runtime not found in VCREDIST_PATH") 193 | endif() 194 | 195 | # CUDA runtime DLL handling 196 | if(DEFINED ENV{CUDA_PATH}) 197 | file(GLOB CUDA_RUNTIME_DLLS "$ENV{CUDA_PATH}/bin/*.dll") 198 | foreach(CUDA_DLL ${CUDA_RUNTIME_DLLS}) 199 | get_filename_component(DLL_NAME ${CUDA_DLL} NAME) 200 | foreach(REQUIRED_DLL ${REQUIRED_DLLS}) 201 | if(DLL_NAME STREQUAL REQUIRED_DLL) 202 | add_custom_command(TARGET nn POST_BUILD 203 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 204 | "${CUDA_DLL}" 205 | "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/${DLL_NAME}" 206 | COMMENT "Copying CUDA runtime DLL: ${DLL_NAME}" 207 | ) 208 | endif() 209 | endforeach() 210 | endforeach() 211 | endif() 212 | endif() 213 | 214 | # Copy libtorch DLLs 215 | foreach(DLL ${REQUIRED_DLLS}) 216 | if(EXISTS "${TORCH_INSTALL_PREFIX}/lib/${DLL}") 217 | add_custom_command(TARGET nn POST_BUILD 218 | COMMAND ${CMAKE_COMMAND} -E copy_if_different 219 | "${TORCH_INSTALL_PREFIX}/lib/${DLL}" 220 | "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/${DLL}" 221 | COMMENT "Copying ${DLL}" 222 | ) 223 | endif() 224 | endforeach() 225 | 226 | set_target_properties(nn PROPERTIES PREFIX "" SUFFIX "~.dll") 227 | endif() 228 | 229 | if(NOT $ENV{PD_EXTERNAL_PATH} STREQUAL "") 230 | add_custom_command( 231 | TARGET nn 232 | POST_BUILD 233 | COMMAND ${CMAKE_COMMAND} -E copy_directory "${CMAKE_CURRENT_BINARY_DIR}" "$ENV{PD_EXTERNAL_PATH}/nn_tilde" 234 | COMMENT "Copying ${CMAKE_CURRENT_BINARY_DIR} to $ENV{PD_EXTERNAL_PATH}/nn_tilde" 235 | ) 236 | endif() 237 | 238 | target_link_libraries(nn PRIVATE backend) 239 | target_include_directories(nn PRIVATE "${PUREDATA_INCLUDE_DIR}") 240 | 241 | if (APPLE) 242 | add_custom_command( 243 | TARGET nn 244 | POST_BUILD 245 | COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/../install/patch_with_vst.sh" "${CMAKE_BINARY_DIR}/frontend/puredata/nn_tilde/" 246 | ) 247 | endif() 248 | 249 | set(CONDA_ENV_PATH "${CMAKE_SOURCE_DIR}/../env") 250 | set(CURL_INCLUDE_DIR "${CONDA_ENV_PATH}/include") 251 | 252 | if (UNIX) 253 | if (APPLE) 254 | set(CURL_LIBRARY "${CONDA_ENV_PATH}/lib/libcurl.dylib") 255 | else() 256 | set(CURL_LIBRARY "${CONDA_ENV_PATH}/lib/libcurl.so") 257 | add_custom_command( 258 | TARGET nn 259 | POST_BUILD 260 | COMMAND ${CMAKE_COMMAND} -E copy ${CURL_LIBRARY} "${CMAKE_BINARY_DIR}/frontend/puredata/nn_tilde/" 261 | ) 262 | endif() 263 | endif() 264 | 265 | include_directories(${CURL_INCLUDE_DIR}) 266 | target_link_libraries(nn PRIVATE ${CURL_LIBRARY}) 267 | 268 | target_link_libraries(nn PRIVATE nlohmann_json::nlohmann_json) 269 | if (MSVC) 270 | target_link_libraries(nn PRIVATE "${PUREDATA_BIN_DIR}/pd.lib" shlwapi.lib) 271 | endif() -------------------------------------------------------------------------------- /src/patchers/latent_remote/M4L.latent_remote.maxpat: -------------------------------------------------------------------------------- 1 | { 2 | "patcher" : { 3 | "fileversion" : 1, 4 | "appversion" : { 5 | "major" : 8, 6 | "minor" : 6, 7 | "revision" : 5, 8 | "architecture" : "x64", 9 | "modernui" : 1 10 | } 11 | , 12 | "classnamespace" : "box", 13 | "rect" : [ 339.0, 161.0, 680.0, 729.0 ], 14 | "bglocked" : 0, 15 | "openinpresentation" : 1, 16 | "default_fontsize" : 12.0, 17 | "default_fontface" : 0, 18 | "default_fontname" : "Arial", 19 | "gridonopen" : 1, 20 | "gridsize" : [ 15.0, 15.0 ], 21 | "gridsnaponopen" : 1, 22 | "objectsnaponopen" : 1, 23 | "statusbarvisible" : 2, 24 | "toolbarvisible" : 1, 25 | "lefttoolbarpinned" : 0, 26 | "toptoolbarpinned" : 0, 27 | "righttoolbarpinned" : 0, 28 | "bottomtoolbarpinned" : 0, 29 | "toolbars_unpinned_last_save" : 0, 30 | "tallnewobj" : 0, 31 | "boxanimatetime" : 200, 32 | "enablehscroll" : 1, 33 | "enablevscroll" : 1, 34 | "devicewidth" : 0.0, 35 | "description" : "", 36 | "digest" : "", 37 | "tags" : "", 38 | "style" : "", 39 | "subpatcher_template" : "", 40 | "assistshowspatchername" : 0, 41 | "boxes" : [ { 42 | "box" : { 43 | "id" : "obj-11", 44 | "maxclass" : "message", 45 | "numinlets" : 2, 46 | "numoutlets" : 1, 47 | "outlettype" : [ "" ], 48 | "patching_rect" : [ 617.0, 64.0, 57.0, 22.0 ], 49 | "text" : "dump_all" 50 | } 51 | 52 | } 53 | , { 54 | "box" : { 55 | "id" : "obj-9", 56 | "maxclass" : "newobj", 57 | "numinlets" : 1, 58 | "numoutlets" : 0, 59 | "patching_rect" : [ 617.0, 92.0, 61.0, 22.0 ], 60 | "text" : "s #0_opts" 61 | } 62 | 63 | } 64 | , { 65 | "box" : { 66 | "id" : "obj-8", 67 | "maxclass" : "newobj", 68 | "numinlets" : 2, 69 | "numoutlets" : 2, 70 | "outlettype" : [ "bang", "" ], 71 | "patching_rect" : [ 497.399999999999977, 80.0, 76.0, 22.0 ], 72 | "text" : "sel dump_all" 73 | } 74 | 75 | } 76 | , { 77 | "box" : { 78 | "comment" : "", 79 | "id" : "obj-6", 80 | "index" : 0, 81 | "maxclass" : "outlet", 82 | "numinlets" : 1, 83 | "numoutlets" : 0, 84 | "patching_rect" : [ 517.0, 429.0, 30.0, 30.0 ], 85 | "varname" : "dumpout" 86 | } 87 | 88 | } 89 | , { 90 | "box" : { 91 | "comment" : "", 92 | "id" : "obj-5", 93 | "index" : 0, 94 | "maxclass" : "outlet", 95 | "numinlets" : 1, 96 | "numoutlets" : 0, 97 | "patching_rect" : [ 271.0, 441.0, 30.0, 30.0 ], 98 | "varname" : "symout" 99 | } 100 | 101 | } 102 | , { 103 | "box" : { 104 | "id" : "obj-4", 105 | "maxclass" : "newobj", 106 | "numinlets" : 0, 107 | "numoutlets" : 1, 108 | "outlettype" : [ "" ], 109 | "patching_rect" : [ 390.0, 578.0, 59.0, 22.0 ], 110 | "text" : "r #0_opts" 111 | } 112 | 113 | } 114 | , { 115 | "box" : { 116 | "comment" : "", 117 | "id" : "obj-3", 118 | "index" : 0, 119 | "maxclass" : "inlet", 120 | "numinlets" : 0, 121 | "numoutlets" : 1, 122 | "outlettype" : [ "" ], 123 | "patching_rect" : [ 419.0, 7.0, 30.0, 30.0 ] 124 | } 125 | 126 | } 127 | , { 128 | "box" : { 129 | "id" : "obj-57", 130 | "maxclass" : "message", 131 | "numinlets" : 2, 132 | "numoutlets" : 1, 133 | "outlettype" : [ "" ], 134 | "patching_rect" : [ 278.0, 578.0, 99.0, 22.0 ], 135 | "text" : "max_columns #2" 136 | } 137 | 138 | } 139 | , { 140 | "box" : { 141 | "id" : "obj-56", 142 | "maxclass" : "message", 143 | "numinlets" : 2, 144 | "numoutlets" : 1, 145 | "outlettype" : [ "" ], 146 | "patching_rect" : [ 206.0, 578.0, 60.0, 36.0 ], 147 | "text" : "sliders #1\n" 148 | } 149 | 150 | } 151 | , { 152 | "box" : { 153 | "id" : "obj-53", 154 | "maxclass" : "newobj", 155 | "numinlets" : 1, 156 | "numoutlets" : 2, 157 | "outlettype" : [ "bang", "bang" ], 158 | "patching_rect" : [ 193.0, 533.0, 32.0, 22.0 ], 159 | "text" : "t b b" 160 | } 161 | 162 | } 163 | , { 164 | "box" : { 165 | "id" : "obj-15", 166 | "maxclass" : "newobj", 167 | "numinlets" : 1, 168 | "numoutlets" : 1, 169 | "outlettype" : [ "bang" ], 170 | "patching_rect" : [ 193.0, 503.0, 58.0, 22.0 ], 171 | "text" : "loadbang" 172 | } 173 | 174 | } 175 | , { 176 | "box" : { 177 | "id" : "obj-387", 178 | "maxclass" : "newobj", 179 | "numinlets" : 1, 180 | "numoutlets" : 0, 181 | "patching_rect" : [ 517.0, 138.0, 74.0, 22.0 ], 182 | "text" : "s #0_opts" 183 | } 184 | 185 | } 186 | , { 187 | "box" : { 188 | "id" : "obj-386", 189 | "maxclass" : "newobj", 190 | "numinlets" : 1, 191 | "numoutlets" : 1, 192 | "outlettype" : [ "" ], 193 | "patching_rect" : [ 433.0, 138.0, 71.0, 22.0 ], 194 | "text" : "fromsymbol" 195 | } 196 | 197 | } 198 | , { 199 | "box" : { 200 | "id" : "obj-314", 201 | "maxclass" : "newobj", 202 | "numinlets" : 1, 203 | "numoutlets" : 6, 204 | "outlettype" : [ "signal", "bang", "int", "float", "", "list" ], 205 | "patching_rect" : [ 419.0, 49.0, 117.0, 22.0 ], 206 | "text" : "typeroute~", 207 | "varname" : "symbol_in" 208 | } 209 | 210 | } 211 | , { 212 | "box" : { 213 | "comment" : "", 214 | "id" : "obj-270", 215 | "index" : 0, 216 | "maxclass" : "outlet", 217 | "numinlets" : 1, 218 | "numoutlets" : 0, 219 | "patching_rect" : [ 10.0, 638.0, 30.0, 30.0 ], 220 | "varname" : "output1" 221 | } 222 | 223 | } 224 | , { 225 | "box" : { 226 | "comment" : "", 227 | "id" : "obj-33", 228 | "index" : 0, 229 | "maxclass" : "inlet", 230 | "numinlets" : 0, 231 | "numoutlets" : 1, 232 | "outlettype" : [ "" ], 233 | "patching_rect" : [ 10.0, 11.0, 30.0, 30.0 ], 234 | "varname" : "input1" 235 | } 236 | 237 | } 238 | , { 239 | "box" : { 240 | "id" : "obj-14", 241 | "maxclass" : "newobj", 242 | "numinlets" : 1, 243 | "numoutlets" : 1, 244 | "outlettype" : [ "" ], 245 | "patching_rect" : [ 206.0, 655.0, 149.0, 22.0 ], 246 | "saved_object_attributes" : { 247 | "filename" : "M4L.latent_remote.js", 248 | "parameter_enable" : 0 249 | } 250 | , 251 | "text" : "js M4L.latent_remote.js #0", 252 | "varname" : "js" 253 | } 254 | 255 | } 256 | , { 257 | "box" : { 258 | "id" : "obj-1", 259 | "maxclass" : "newobj", 260 | "numinlets" : 1, 261 | "numoutlets" : 1, 262 | "outlettype" : [ "signal" ], 263 | "patching_rect" : [ 10.0, 80.0, 100.0, 22.0 ], 264 | "text" : "mc.unpack~ #1", 265 | "varname" : "input_unpack" 266 | } 267 | 268 | } 269 | , { 270 | "box" : { 271 | "id" : "obj-2", 272 | "maxclass" : "newobj", 273 | "numinlets" : 1, 274 | "numoutlets" : 1, 275 | "outlettype" : [ "multichannelsignal" ], 276 | "patching_rect" : [ 10.0, 400.0, 100.0, 22.0 ], 277 | "text" : "mc.pack~ #1", 278 | "varname" : "output_pack" 279 | } 280 | 281 | } 282 | , { 283 | "box" : { 284 | "id" : "obj-7", 285 | "maxclass" : "newobj", 286 | "numinlets" : 2, 287 | "numoutlets" : 1, 288 | "outlettype" : [ "" ], 289 | "patching_rect" : [ 271.0, 421.0, 100.0, 22.0 ], 290 | "text" : "pak" 291 | } 292 | 293 | } 294 | ], 295 | "lines" : [ { 296 | "patchline" : { 297 | "destination" : [ "obj-9", 0 ], 298 | "source" : [ "obj-11", 0 ] 299 | } 300 | 301 | } 302 | , { 303 | "patchline" : { 304 | "destination" : [ "obj-53", 0 ], 305 | "source" : [ "obj-15", 0 ] 306 | } 307 | 308 | } 309 | , { 310 | "patchline" : { 311 | "destination" : [ "obj-270", 0 ], 312 | "source" : [ "obj-2", 0 ] 313 | } 314 | 315 | } 316 | , { 317 | "patchline" : { 318 | "destination" : [ "obj-314", 0 ], 319 | "source" : [ "obj-3", 0 ] 320 | } 321 | 322 | } 323 | , { 324 | "patchline" : { 325 | "destination" : [ "obj-387", 0 ], 326 | "source" : [ "obj-314", 5 ] 327 | } 328 | 329 | } 330 | , { 331 | "patchline" : { 332 | "destination" : [ "obj-8", 0 ], 333 | "source" : [ "obj-314", 4 ] 334 | } 335 | 336 | } 337 | , { 338 | "patchline" : { 339 | "destination" : [ "obj-1", 0 ], 340 | "source" : [ "obj-33", 0 ] 341 | } 342 | 343 | } 344 | , { 345 | "patchline" : { 346 | "destination" : [ "obj-387", 0 ], 347 | "source" : [ "obj-386", 0 ] 348 | } 349 | 350 | } 351 | , { 352 | "patchline" : { 353 | "destination" : [ "obj-14", 0 ], 354 | "source" : [ "obj-4", 0 ] 355 | } 356 | 357 | } 358 | , { 359 | "patchline" : { 360 | "destination" : [ "obj-56", 0 ], 361 | "source" : [ "obj-53", 0 ] 362 | } 363 | 364 | } 365 | , { 366 | "patchline" : { 367 | "destination" : [ "obj-57", 0 ], 368 | "midpoints" : [ 215.5, 566.0, 287.5, 566.0 ], 369 | "source" : [ "obj-53", 1 ] 370 | } 371 | 372 | } 373 | , { 374 | "patchline" : { 375 | "destination" : [ "obj-14", 0 ], 376 | "source" : [ "obj-56", 0 ] 377 | } 378 | 379 | } 380 | , { 381 | "patchline" : { 382 | "destination" : [ "obj-14", 0 ], 383 | "source" : [ "obj-57", 0 ] 384 | } 385 | 386 | } 387 | , { 388 | "patchline" : { 389 | "destination" : [ "obj-5", 0 ], 390 | "source" : [ "obj-7", 0 ] 391 | } 392 | 393 | } 394 | , { 395 | "patchline" : { 396 | "destination" : [ "obj-11", 0 ], 397 | "source" : [ "obj-8", 0 ] 398 | } 399 | 400 | } 401 | , { 402 | "patchline" : { 403 | "destination" : [ "obj-386", 0 ], 404 | "source" : [ "obj-8", 1 ] 405 | } 406 | 407 | } 408 | ], 409 | "dependency_cache" : [ { 410 | "name" : "M4L.latent_remote.js", 411 | "bootpath" : "~/Documents/Max 8/Library/latent_control", 412 | "patcherrelativepath" : ".", 413 | "type" : "TEXT", 414 | "implicit" : 1 415 | } 416 | ], 417 | "autosave" : 0 418 | } 419 | 420 | } 421 | --------------------------------------------------------------------------------