├── .gitignore ├── .gitmodules ├── CMakeLists.txt ├── LICENSE ├── README.md ├── common ├── Biquad.cpp ├── Biquad.h └── ValueSmoother.hpp ├── irs └── deer ink studios │ ├── V30-P1-opus87-deerinkstudios.wav │ ├── V30-P1-sene935-deerinkstudios.wav │ ├── V30-P2-audix-i5-deerinkstudios.wav │ └── V30-P2-sene935-deerinkstudios.wav ├── models └── deer ink studios │ ├── tw40_blues_deluxe_deerinkstudios.json │ ├── tw40_blues_solo_deerinkstudios.json │ ├── tw40_british_lead_deerinkstudios.json │ ├── tw40_british_rhythm_deerinkstudios.json │ ├── tw40_california_clean_deerinkstudios.json │ └── tw40_california_crunch_deerinkstudios.json ├── rt-neural-generic ├── CMakeLists.txt ├── src │ ├── model_variant.hpp │ ├── rt-neural-generic.cpp │ ├── rt-neural-generic.h │ └── uris.h └── ttl │ ├── manifest.ttl │ ├── modgui.ttl │ ├── modgui │ ├── aida-x-manual.pdf │ ├── icon-rt-neural-generic.html │ ├── img │ │ ├── aida.png │ │ ├── ax.svg │ │ ├── rw_knob_large_dark.svg │ │ ├── scale.png │ │ └── scale.svg │ ├── javascript-rt-neural-generic.js │ ├── knobs │ │ ├── rw_knob_large_dark.svg │ │ └── scale.png │ ├── screenshot-rt-neural-generic.png │ ├── stylesheet-rt-neural-generic.css │ └── thumbnail-rt-neural-generic.png │ └── rt-neural-generic.ttl ├── tests ├── CMakeLists.txt └── src │ ├── test_rtneural.cpp │ └── test_smoothers.cpp └── variant └── generate_variant_hpp.py /.gitignore: -------------------------------------------------------------------------------- 1 | # These are some examples of commonly ignored file patterns. 2 | # You should customize this list as applicable to your project. 3 | # Learn more about .gitignore: 4 | # https://www.atlassian.com/git/tutorials/saving-changes/gitignore 5 | 6 | # cmake build folder 7 | build/ 8 | 9 | # Node artifact files 10 | node_modules/ 11 | dist/ 12 | 13 | # Compiled Java class files 14 | *.class 15 | 16 | # Compiled Python bytecode 17 | *.py[cod] 18 | 19 | # Log files 20 | *.log 21 | 22 | # Package files 23 | *.jar 24 | 25 | # Maven 26 | target/ 27 | dist/ 28 | 29 | # JetBrains IDE 30 | .idea/ 31 | 32 | # Unit test reports 33 | TEST*.xml 34 | 35 | # Generated by MacOS 36 | .DS_Store 37 | 38 | # Generated by Windows 39 | Thumbs.db 40 | 41 | # Applications 42 | *.app 43 | *.exe 44 | *.war 45 | 46 | # Large media files 47 | *.mp4 48 | *.tiff 49 | *.avi 50 | *.flv 51 | *.mov 52 | *.wmv 53 | 54 | aidadsp-lv2-modduox.zip 55 | rt-neural-generic/ttl/modgui/img/aida.psd 56 | rt-neural-generic/ttl/modgui/img/wood.jpg 57 | rt-neural-generic/ttl/modgui/img/space.jpg 58 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "modules/RTNeural"] 2 | path = modules/RTNeural 3 | url = https://github.com/jatinchowdhury18/RTNeural 4 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.15) 2 | 3 | project(aidadsp-lv2 VERSION 1.0.0) 4 | 5 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") 6 | set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE}") 7 | 8 | option(GENERIC_AARCH64 "Use specific cxxflags and ldflags for a generic aarch64 machine" OFF) 9 | option(MPB_MOD_DUO_GCC_750 "Use specific cxxflags and ldflags for Mod Duo device using mod-plugin-builder with toolchain gcc 7.5.0" OFF) 10 | option(MPB_MOD_DUO_GCC_750 "Use specific cxxflags and ldflags for Mod Dwarf device using mod-plugin-builder with toolchain gcc 7.5.0" OFF) 11 | option(MPB_MOD_DUOX_GCC_750 "Use specific cxxflags and ldflags for Mod Duo X device using mod-plugin-builder with toolchain gcc 7.5.0" OFF) 12 | option(MPB_RPI4_GCC_750 "Use specific cxxflags and ldflags for Rpi4 device using mod-plugin-builder with toolchain gcc 7.5.0" OFF) 13 | 14 | if(GENERIC_AARCH64) 15 | # @TODO: -funsafe-loop-optimizations causes the plugin to sound really bad, excluding 16 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -fprefetch-loop-arrays -funroll-loops -static-libstdc++ -Wl,-Ofast -Wl,--as-needed -Wl,--strip-all") 17 | set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -static-libstdc++ -Wl,-Ofast -Wl,--as-needed -Wl,--strip-all") 18 | endif() 19 | 20 | if(MPB_MOD_DUO_GCC_750) 21 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -mcpu=cortex-a7 -mtune=cortex-a7 -ffast-math -fno-finite-math-only -fprefetch-loop-arrays -funroll-loops -funsafe-loop-optimizations") 22 | set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -static-libstdc++ -Wl,-Ofast -Wl,--as-needed -Wl,--strip-all") 23 | endif() 24 | 25 | if(MPB_MOD_DWARF_GCC_750) 26 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -mcpu=cortex-a35 -mtune=cortex-a35 -ffast-math -fno-finite-math-only -fprefetch-loop-arrays -funroll-loops -funsafe-loop-optimizations") 27 | set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -static-libstdc++ -Wl,-Ofast -Wl,--as-needed -Wl,--strip-all") 28 | endif() 29 | 30 | if(MPB_MOD_DUOX_GCC_750) 31 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -mcpu=cortex-a53 -mtune=cortex-a53 -ffast-math -fno-finite-math-only -fprefetch-loop-arrays -funroll-loops -funsafe-loop-optimizations") 32 | set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -static-libstdc++ -Wl,-Ofast -Wl,--as-needed -Wl,--strip-all") 33 | endif() 34 | 35 | if(MPB_RPI4_GCC_750) 36 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -mcpu=cortex-a72 -mtune=cortex-a72 -ffast-math -fno-finite-math-only -fprefetch-loop-arrays -funroll-loops -funsafe-loop-optimizations") 37 | set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -static-libstdc++ -Wl,-Ofast -Wl,--as-needed -Wl,--strip-all") 38 | endif() 39 | 40 | message("CMAKE_CXX_FLAGS_RELEASE in ${CMAKE_PROJECT_NAME} = ${CMAKE_CXX_FLAGS_RELEASE}") 41 | message("CMAKE_SHARED_LINKER_FLAGS_RELEASE in ${CMAKE_PROJECT_NAME} = ${CMAKE_SHARED_LINKER_FLAGS_RELEASE}") 42 | 43 | set(AIDADSP_MODEL "" CACHE STRING "Which commercial plugin model to build") 44 | 45 | if (AIDADSP_MODEL) 46 | add_subdirectory(commercial) 47 | else() 48 | add_subdirectory(rt-neural-generic) 49 | endif() 50 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Aida DSP lv2 plugin bundle # 2 | 3 | [![paypal](https://www.paypalobjects.com/en_US/i/btn/btn_donateCC_LG.gif)](https://www.paypal.com/donate/?hosted_button_id=UZWHH6HKJTHFJ) 4 | 5 | ### What is this repository for? ### 6 | 7 | * A bundle of audio plugins from [Aida DSP](http://aidadsp.cc) 8 | * Bundle version: 1.0 9 | * This bundle is intended to be used with MOD Audio's products and derivatives 10 | 11 | ### Plugin list ### 12 | 13 | * rt-neural-generic.lv2 14 | 15 | #### rt-neural-generic.lv2 #### 16 | 17 | It's a lv2 plugin that leverages [RTNeural](https://github.com/jatinchowdhury18/RTNeural.git) to model 18 | pedals or amps. 19 | 20 | - Play realistic Amps or Pedals captured with cutting-edge ML technology 21 | - Full featured 5-band EQ with adjustable Q, frequencies and pre/post switch 22 | - Input and Output Volume Controls 23 | 24 | Developers: 25 | 26 | - This plugin supports json model files loading via specific atom messages 27 | 28 | ##### Generate json models ##### 29 | 30 | This implies neural network training. Please follow: 31 | 32 | - [AIDA-X Model Trainer.ipynb](https://colab.research.google.com/github/AidaDSP/Automated-GuitarAmpModelling/blob/aidadsp_devel/AIDA_X_Model_Trainer.ipynb) 33 | 34 | ### Build ### 35 | 36 | #### MOD Audio #### 37 | 38 | We're proudly part of the amazing MOD Audio platform and community. Our plugin is already integrated 39 | in their build system [MPB](https://github.com/moddevices/mod-plugin-builder/blob/master/plugins/package/aidadsp-lv2/aidadsp-lv2.mk), so please just follow their instructions. 40 | 41 | #### Aida DSP OS #### 42 | 43 | Below a guide on how to cross compile this bundle with [aidadsp sdk](https://drive.google.com/drive/folders/1-AAfAP-FAddCw0LJuvzsW8m_1lWHKXaV?usp=sharing). 44 | You can extract cmake commands to fit your build system. 45 | 46 | - RTNEURAL_ENABLE_AARCH64 specific option for aarch64 builds 47 | - RTNEURAL_XSIMD=ON or RTNEURAL_EIGEN=ON to select an available backend for RTNeural library 48 | 49 | for other options see [RTNeural](https://github.com/jatinchowdhury18/RTNeural.git) project. 50 | 51 | ``` 52 | 1. install sdk with ./poky-glibc-x86_64-aidadsp-sdk-image-aarch64-nanopi-neo2-toolchain-2.1.15.sh 53 | 3. source environment-setup-aarch64-poky-linux 54 | 4. git clone https://github.com/AidaDSP/aidadsp-lv2.git && cd aidadsp-lv2 55 | 5. mkdir build && cd build 56 | 6. cmake -DCMAKE_BUILD_TYPE=Release -DGENERIC_AARCH64=ON -DRTNEURAL_XSIMD=ON -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON ../ 57 | 7. cmake --build . 58 | 8. make install DESTDIR="/tmp/" 59 | 60 | bundle will be placed in /tmp/ ready to be copied on your device 61 | ``` 62 | 63 | [![paypal](https://www.paypalobjects.com/en_US/i/btn/btn_donateCC_LG.gif)](https://www.paypal.com/donate/?hosted_button_id=UZWHH6HKJTHFJ) 64 | -------------------------------------------------------------------------------- /common/Biquad.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Biquad.cpp 3 | // 4 | // Created by Nigel Redmon on 11/24/12 5 | // EarLevel Engineering: earlevel.com 6 | // Copyright 2012 Nigel Redmon 7 | // 8 | // For a complete explanation of the Biquad code: 9 | // http://www.earlevel.com/main/2012/11/26/biquad-c-source-code/ 10 | // 11 | // License: 12 | // 13 | // This source code is provided as is, without warranty. 14 | // You may copy and distribute verbatim copies of this document. 15 | // You may modify and use this source code to create binary code 16 | // for your own purposes, free or commercial. 17 | // 18 | 19 | #include 20 | #include "Biquad.h" 21 | 22 | Biquad::Biquad() { 23 | type = bq_type_lowpass; 24 | a0 = 1.0; 25 | a1 = a2 = b1 = b2 = 0.0; 26 | Fc = 0.50; 27 | Q = 0.707; 28 | peakGain = 0.0; 29 | z1 = z2 = 0.0; 30 | } 31 | 32 | Biquad::Biquad(int type, double Fc, double Q, double peakGainDB) { 33 | setBiquad(type, Fc, Q, peakGainDB); 34 | z1 = z2 = 0.0; 35 | } 36 | 37 | Biquad::~Biquad() { 38 | } 39 | 40 | void Biquad::setType(int type) { 41 | this->type = type; 42 | calcBiquad(); 43 | } 44 | 45 | void Biquad::setQ(double Q) { 46 | this->Q = Q; 47 | calcBiquad(); 48 | } 49 | 50 | void Biquad::setFc(double Fc) { 51 | this->Fc = Fc; 52 | calcBiquad(); 53 | } 54 | 55 | void Biquad::setPeakGain(double peakGainDB) { 56 | this->peakGain = peakGainDB; 57 | calcBiquad(); 58 | } 59 | 60 | void Biquad::setBiquad(int type, double Fc, double Q, double peakGainDB) { 61 | this->type = type; 62 | this->Q = Q; 63 | this->Fc = Fc; 64 | setPeakGain(peakGainDB); 65 | } 66 | 67 | void Biquad::calcBiquad(void) { 68 | double norm; 69 | double V = pow(10, fabs(peakGain) / 20.0); 70 | double K = tan(M_PI * Fc); 71 | switch (this->type) { 72 | case bq_type_lowpass: 73 | norm = 1 / (1 + K / Q + K * K); 74 | a0 = K * K * norm; 75 | a1 = 2 * a0; 76 | a2 = a0; 77 | b1 = 2 * (K * K - 1) * norm; 78 | b2 = (1 - K / Q + K * K) * norm; 79 | break; 80 | 81 | case bq_type_highpass: 82 | norm = 1 / (1 + K / Q + K * K); 83 | a0 = 1 * norm; 84 | a1 = -2 * a0; 85 | a2 = a0; 86 | b1 = 2 * (K * K - 1) * norm; 87 | b2 = (1 - K / Q + K * K) * norm; 88 | break; 89 | 90 | case bq_type_bandpass: 91 | norm = 1 / (1 + K / Q + K * K); 92 | a0 = K / Q * norm; 93 | a1 = 0; 94 | a2 = -a0; 95 | b1 = 2 * (K * K - 1) * norm; 96 | b2 = (1 - K / Q + K * K) * norm; 97 | break; 98 | 99 | case bq_type_notch: 100 | norm = 1 / (1 + K / Q + K * K); 101 | a0 = (1 + K * K) * norm; 102 | a1 = 2 * (K * K - 1) * norm; 103 | a2 = a0; 104 | b1 = a1; 105 | b2 = (1 - K / Q + K * K) * norm; 106 | break; 107 | 108 | case bq_type_peak: 109 | if (peakGain >= 0) { // boost 110 | norm = 1 / (1 + 1/Q * K + K * K); 111 | a0 = (1 + V/Q * K + K * K) * norm; 112 | a1 = 2 * (K * K - 1) * norm; 113 | a2 = (1 - V/Q * K + K * K) * norm; 114 | b1 = a1; 115 | b2 = (1 - 1/Q * K + K * K) * norm; 116 | } 117 | else { // cut 118 | norm = 1 / (1 + V/Q * K + K * K); 119 | a0 = (1 + 1/Q * K + K * K) * norm; 120 | a1 = 2 * (K * K - 1) * norm; 121 | a2 = (1 - 1/Q * K + K * K) * norm; 122 | b1 = a1; 123 | b2 = (1 - V/Q * K + K * K) * norm; 124 | } 125 | break; 126 | case bq_type_lowshelf: 127 | if (peakGain >= 0) { // boost 128 | norm = 1 / (1 + sqrt(2) * K + K * K); 129 | a0 = (1 + sqrt(2*V) * K + V * K * K) * norm; 130 | a1 = 2 * (V * K * K - 1) * norm; 131 | a2 = (1 - sqrt(2*V) * K + V * K * K) * norm; 132 | b1 = 2 * (K * K - 1) * norm; 133 | b2 = (1 - sqrt(2) * K + K * K) * norm; 134 | } 135 | else { // cut 136 | norm = 1 / (1 + sqrt(2*V) * K + V * K * K); 137 | a0 = (1 + sqrt(2) * K + K * K) * norm; 138 | a1 = 2 * (K * K - 1) * norm; 139 | a2 = (1 - sqrt(2) * K + K * K) * norm; 140 | b1 = 2 * (V * K * K - 1) * norm; 141 | b2 = (1 - sqrt(2*V) * K + V * K * K) * norm; 142 | } 143 | break; 144 | case bq_type_highshelf: 145 | if (peakGain >= 0) { // boost 146 | norm = 1 / (1 + sqrt(2) * K + K * K); 147 | a0 = (V + sqrt(2*V) * K + K * K) * norm; 148 | a1 = 2 * (K * K - V) * norm; 149 | a2 = (V - sqrt(2*V) * K + K * K) * norm; 150 | b1 = 2 * (K * K - 1) * norm; 151 | b2 = (1 - sqrt(2) * K + K * K) * norm; 152 | } 153 | else { // cut 154 | norm = 1 / (V + sqrt(2*V) * K + K * K); 155 | a0 = (1 + sqrt(2) * K + K * K) * norm; 156 | a1 = 2 * (K * K - 1) * norm; 157 | a2 = (1 - sqrt(2) * K + K * K) * norm; 158 | b1 = 2 * (K * K - V) * norm; 159 | b2 = (V - sqrt(2*V) * K + K * K) * norm; 160 | } 161 | break; 162 | } 163 | 164 | return; 165 | } 166 | -------------------------------------------------------------------------------- /common/Biquad.h: -------------------------------------------------------------------------------- 1 | // 2 | // Biquad.h 3 | // 4 | // Created by Nigel Redmon on 11/24/12 5 | // EarLevel Engineering: earlevel.com 6 | // Copyright 2012 Nigel Redmon 7 | // 8 | // For a complete explanation of the Biquad code: 9 | // http://www.earlevel.com/main/2012/11/26/biquad-c-source-code/ 10 | // 11 | // License: 12 | // 13 | // This source code is provided as is, without warranty. 14 | // You may copy and distribute verbatim copies of this document. 15 | // You may modify and use this source code to create binary code 16 | // for your own purposes, free or commercial. 17 | // 18 | 19 | #ifndef Biquad_h 20 | #define Biquad_h 21 | 22 | enum { 23 | bq_type_lowpass = 0, 24 | bq_type_highpass, 25 | bq_type_bandpass, 26 | bq_type_notch, 27 | bq_type_peak, 28 | bq_type_lowshelf, 29 | bq_type_highshelf 30 | }; 31 | 32 | class Biquad { 33 | public: 34 | Biquad(); 35 | Biquad(int type, double Fc, double Q, double peakGainDB); 36 | ~Biquad(); 37 | void setType(int type); 38 | void setQ(double Q); 39 | void setFc(double Fc); 40 | void setPeakGain(double peakGainDB); 41 | void setBiquad(int type, double Fc, double Q, double peakGainDB); 42 | float process(float in); 43 | 44 | protected: 45 | void calcBiquad(void); 46 | 47 | int type; 48 | double a0, a1, a2, b1, b2; 49 | double Fc, Q, peakGain; 50 | double z1, z2; 51 | }; 52 | 53 | inline float Biquad::process(float in) { 54 | double out = in * a0 + z1; 55 | z1 = in * a1 + z2 - b1 * out; 56 | z2 = in * a2 - b2 * out; 57 | return out; 58 | } 59 | 60 | #endif // Biquad_h 61 | -------------------------------------------------------------------------------- /common/ValueSmoother.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * DISTRHO Plugin Framework (DPF) 3 | * Copyright (C) 2021 Jean Pierre Cimalando 4 | * Copyright (C) 2021-2023 Filipe Coelho 5 | * 6 | * Permission to use, copy, modify, and/or distribute this software for any purpose with 7 | * or without fee is hereby granted, provided that the above copyright notice and this 8 | * permission notice appear in all copies. 9 | * 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD 11 | * TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN 12 | * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER 14 | * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 15 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | */ 17 | 18 | #ifndef DISTRHO_VALUE_SMOOTHER_HPP_INCLUDED 19 | #define DISTRHO_VALUE_SMOOTHER_HPP_INCLUDED 20 | 21 | #include 22 | #include 23 | 24 | // -------------------------------------------------------------------------------------------------------------------- 25 | 26 | /** 27 | Safely compare two floating point numbers. 28 | Returns true if they match. 29 | */ 30 | template 31 | static inline constexpr 32 | bool d_isEqual(const T& v1, const T& v2) 33 | { 34 | return std::abs(v1-v2) < std::numeric_limits::epsilon(); 35 | } 36 | 37 | /** 38 | Safely compare two floating point numbers. 39 | Returns true if they don't match. 40 | */ 41 | template 42 | static inline constexpr 43 | bool d_isNotEqual(const T& v1, const T& v2) 44 | { 45 | return std::abs(v1-v2) >= std::numeric_limits::epsilon(); 46 | } 47 | 48 | /** 49 | Safely check if a floating point number is zero. 50 | */ 51 | template 52 | static inline constexpr 53 | bool d_isZero(const T& value) 54 | { 55 | return std::abs(value) < std::numeric_limits::epsilon(); 56 | } 57 | 58 | /** 59 | Safely check if a floating point number is not zero. 60 | */ 61 | template 62 | static inline constexpr 63 | bool d_isNotZero(const T& value) 64 | { 65 | return std::abs(value) >= std::numeric_limits::epsilon(); 66 | } 67 | 68 | // -------------------------------------------------------------------------------------------------------------------- 69 | 70 | /** 71 | * @brief An exponential smoother for control values 72 | * 73 | * This continually smooths a value towards a defined target, 74 | * using a low-pass filter of the 1st order, which creates an exponential curve. 75 | * 76 | * The length of the curve is defined by a T60 constant, 77 | * which is the time it takes for a 1-to-0 smoothing to fall to -60dB. 78 | * 79 | * Note that this smoother has asymptotical behavior, 80 | * and it must not be assumed that the final target is ever reached. 81 | */ 82 | class ExponentialValueSmoother { 83 | float coef; 84 | float target; 85 | float mem; 86 | float tau; 87 | float sampleRate; 88 | 89 | public: 90 | ExponentialValueSmoother() 91 | : coef(0.f), 92 | target(0.f), 93 | mem(0.f), 94 | tau(0.f), 95 | sampleRate(0.f) {} 96 | 97 | void setSampleRate(const float newSampleRate) noexcept 98 | { 99 | if (d_isNotEqual(sampleRate, newSampleRate)) 100 | { 101 | sampleRate = newSampleRate; 102 | updateCoef(); 103 | } 104 | } 105 | 106 | void setTimeConstant(const float newT60) noexcept 107 | { 108 | const float newTau = newT60 * (float)(1.0 / 6.91); 109 | 110 | if (d_isNotEqual(tau, newTau)) 111 | { 112 | tau = newTau; 113 | updateCoef(); 114 | } 115 | } 116 | 117 | float getCurrentValue() const noexcept 118 | { 119 | return mem; 120 | } 121 | 122 | float getTargetValue() const noexcept 123 | { 124 | return target; 125 | } 126 | 127 | void setTargetValue(const float newTarget) noexcept 128 | { 129 | target = newTarget; 130 | } 131 | 132 | void clearToTargetValue() noexcept 133 | { 134 | mem = target; 135 | } 136 | 137 | inline float peek() const noexcept 138 | { 139 | return mem * coef + target * (1.f - coef); 140 | } 141 | 142 | inline float next() noexcept 143 | { 144 | return (mem = mem * coef + target * (1.f - coef)); 145 | } 146 | 147 | private: 148 | void updateCoef() noexcept 149 | { 150 | coef = std::exp(-1.f / (tau * sampleRate)); 151 | } 152 | }; 153 | 154 | // -------------------------------------------------------------------------------------------------------------------- 155 | 156 | /** 157 | * @brief A linear smoother for control values 158 | * 159 | * This continually smooths a value towards a defined target, using linear segments. 160 | * 161 | * The duration of the smoothing segment is defined by the given time constant. 162 | * Every time the target changes, a new segment restarts for the whole duration of the time constant. 163 | * 164 | * Note that this smoother, unlike an exponential smoother, eventually should converge to its target value. 165 | */ 166 | class LinearValueSmoother { 167 | float step; 168 | float target; 169 | float mem; 170 | float tau; 171 | float sampleRate; 172 | 173 | public: 174 | LinearValueSmoother() 175 | : step(0.f), 176 | target(0.f), 177 | mem(0.f), 178 | tau(0.f), 179 | sampleRate(0.f) {} 180 | 181 | void setSampleRate(const float newSampleRate) noexcept 182 | { 183 | if (d_isNotEqual(sampleRate, newSampleRate)) 184 | { 185 | sampleRate = newSampleRate; 186 | updateStep(); 187 | } 188 | } 189 | 190 | void setTimeConstant(const float newTau) noexcept 191 | { 192 | if (d_isNotEqual(tau, newTau)) 193 | { 194 | tau = newTau; 195 | updateStep(); 196 | } 197 | } 198 | 199 | float getCurrentValue() const noexcept 200 | { 201 | return mem; 202 | } 203 | 204 | float getTargetValue() const noexcept 205 | { 206 | return target; 207 | } 208 | 209 | void setTargetValue(const float newTarget) noexcept 210 | { 211 | if (d_isNotEqual(target, newTarget)) 212 | { 213 | target = newTarget; 214 | updateStep(); 215 | } 216 | } 217 | 218 | void clearToTargetValue() noexcept 219 | { 220 | mem = target; 221 | } 222 | 223 | inline float peek() const noexcept 224 | { 225 | const float dy = target - mem; 226 | return mem + std::copysign(std::fmin(std::abs(dy), std::abs(step)), dy); 227 | } 228 | 229 | inline float next() noexcept 230 | { 231 | const float y0 = mem; 232 | const float dy = target - y0; 233 | return (mem = y0 + std::copysign(std::fmin(std::abs(dy), std::abs(step)), dy)); 234 | } 235 | 236 | private: 237 | void updateStep() noexcept 238 | { 239 | step = (target - mem) / (tau * sampleRate); 240 | } 241 | }; 242 | 243 | // -------------------------------------------------------------------------------------------------------------------- 244 | 245 | #endif // DISTRHO_VALUE_SMOOTHER_HPP_INCLUDED 246 | -------------------------------------------------------------------------------- /irs/deer ink studios/V30-P1-opus87-deerinkstudios.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AidaDSP/aidadsp-lv2/f86e21dcb3b481246f0406c5f003f1a0c7d93d37/irs/deer ink studios/V30-P1-opus87-deerinkstudios.wav -------------------------------------------------------------------------------- /irs/deer ink studios/V30-P1-sene935-deerinkstudios.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AidaDSP/aidadsp-lv2/f86e21dcb3b481246f0406c5f003f1a0c7d93d37/irs/deer ink studios/V30-P1-sene935-deerinkstudios.wav -------------------------------------------------------------------------------- /irs/deer ink studios/V30-P2-audix-i5-deerinkstudios.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AidaDSP/aidadsp-lv2/f86e21dcb3b481246f0406c5f003f1a0c7d93d37/irs/deer ink studios/V30-P2-audix-i5-deerinkstudios.wav -------------------------------------------------------------------------------- /irs/deer ink studios/V30-P2-sene935-deerinkstudios.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AidaDSP/aidadsp-lv2/f86e21dcb3b481246f0406c5f003f1a0c7d93d37/irs/deer ink studios/V30-P2-sene935-deerinkstudios.wav -------------------------------------------------------------------------------- /rt-neural-generic/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # CMake file for LV2 rt-neural-generic plugin 2 | 3 | cmake_minimum_required(VERSION 3.15) 4 | 5 | project(rt-neural-generic) 6 | 7 | # flags and definitions 8 | set(CMAKE_CXX_STANDARD 17) 9 | 10 | set(RTNEURAL_XSIMD ON CACHE BOOL "Use RTNeural with this backend") 11 | message("RTNEURAL_XSIMD in ${CMAKE_PROJECT_NAME} = ${RTNEURAL_XSIMD}") 12 | 13 | # add external libraries 14 | add_subdirectory(../modules/RTNeural ${CMAKE_CURRENT_BINARY_DIR}/RTNeural) 15 | 16 | # check for lv2 using pkgconfig 17 | find_package(PkgConfig) 18 | pkg_check_modules(LV2 REQUIRED lv2>=1.10.0) 19 | 20 | # configure executable 21 | add_library(rt-neural-generic SHARED 22 | src/rt-neural-generic.cpp 23 | ../common/Biquad.cpp 24 | ) 25 | 26 | # include and link directories 27 | include_directories(rt-neural-generic 28 | ./src 29 | ../common 30 | ${LV2_INCLUDE_DIRS} 31 | ../modules/RTNeural/modules/json 32 | ../modules/RTNeural) 33 | 34 | link_directories(rt-neural-generic 35 | ./src 36 | ../common 37 | ${LV2_LIBRARY_DIRS} 38 | ../modules/RTNeural 39 | ../modules/RTNeural/modules/json) 40 | 41 | # configure target 42 | target_compile_definitions(rt-neural-generic PUBLIC 43 | AIDADSP_COMMERCIAL=0 44 | AIDADSP_MODEL_LOADER=1 45 | ) 46 | target_link_libraries(rt-neural-generic ${LV2_LIBRARIES} RTNeural) 47 | set_target_properties(rt-neural-generic PROPERTIES PREFIX "") 48 | 49 | # setup install dir 50 | set(LV2_INSTALL_DIR ${DESTDIR}${PREFIX}/rt-neural-generic.lv2) 51 | 52 | # config install 53 | install(TARGETS rt-neural-generic 54 | DESTINATION ${LV2_INSTALL_DIR} 55 | ) 56 | 57 | install(DIRECTORY ttl/ 58 | DESTINATION ${LV2_INSTALL_DIR} 59 | ) 60 | 61 | install(DIRECTORY ../models 62 | DESTINATION ${LV2_INSTALL_DIR} 63 | ) 64 | -------------------------------------------------------------------------------- /rt-neural-generic/src/model_variant.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #define MAX_INPUT_SIZE 3 5 | struct NullModel { static constexpr int input_size = 0; static constexpr int output_size = 0; }; 6 | using ModelType_GRU_8_1 = RTNeural::ModelT, RTNeural::DenseT>; 7 | using ModelType_GRU_8_2 = RTNeural::ModelT, RTNeural::DenseT>; 8 | using ModelType_GRU_8_3 = RTNeural::ModelT, RTNeural::DenseT>; 9 | using ModelType_GRU_12_1 = RTNeural::ModelT, RTNeural::DenseT>; 10 | using ModelType_GRU_12_2 = RTNeural::ModelT, RTNeural::DenseT>; 11 | using ModelType_GRU_12_3 = RTNeural::ModelT, RTNeural::DenseT>; 12 | using ModelType_GRU_16_1 = RTNeural::ModelT, RTNeural::DenseT>; 13 | using ModelType_GRU_16_2 = RTNeural::ModelT, RTNeural::DenseT>; 14 | using ModelType_GRU_16_3 = RTNeural::ModelT, RTNeural::DenseT>; 15 | using ModelType_GRU_20_1 = RTNeural::ModelT, RTNeural::DenseT>; 16 | using ModelType_GRU_20_2 = RTNeural::ModelT, RTNeural::DenseT>; 17 | using ModelType_GRU_20_3 = RTNeural::ModelT, RTNeural::DenseT>; 18 | using ModelType_GRU_24_1 = RTNeural::ModelT, RTNeural::DenseT>; 19 | using ModelType_GRU_24_2 = RTNeural::ModelT, RTNeural::DenseT>; 20 | using ModelType_GRU_24_3 = RTNeural::ModelT, RTNeural::DenseT>; 21 | using ModelType_GRU_32_1 = RTNeural::ModelT, RTNeural::DenseT>; 22 | using ModelType_GRU_32_2 = RTNeural::ModelT, RTNeural::DenseT>; 23 | using ModelType_GRU_32_3 = RTNeural::ModelT, RTNeural::DenseT>; 24 | using ModelType_GRU_40_1 = RTNeural::ModelT, RTNeural::DenseT>; 25 | using ModelType_GRU_40_2 = RTNeural::ModelT, RTNeural::DenseT>; 26 | using ModelType_GRU_40_3 = RTNeural::ModelT, RTNeural::DenseT>; 27 | using ModelType_GRU_64_1 = RTNeural::ModelT, RTNeural::DenseT>; 28 | using ModelType_GRU_64_2 = RTNeural::ModelT, RTNeural::DenseT>; 29 | using ModelType_GRU_64_3 = RTNeural::ModelT, RTNeural::DenseT>; 30 | using ModelType_GRU_80_1 = RTNeural::ModelT, RTNeural::DenseT>; 31 | using ModelType_GRU_80_2 = RTNeural::ModelT, RTNeural::DenseT>; 32 | using ModelType_GRU_80_3 = RTNeural::ModelT, RTNeural::DenseT>; 33 | using ModelType_LSTM_8_1 = RTNeural::ModelT, RTNeural::DenseT>; 34 | using ModelType_LSTM_8_2 = RTNeural::ModelT, RTNeural::DenseT>; 35 | using ModelType_LSTM_8_3 = RTNeural::ModelT, RTNeural::DenseT>; 36 | using ModelType_LSTM_12_1 = RTNeural::ModelT, RTNeural::DenseT>; 37 | using ModelType_LSTM_12_2 = RTNeural::ModelT, RTNeural::DenseT>; 38 | using ModelType_LSTM_12_3 = RTNeural::ModelT, RTNeural::DenseT>; 39 | using ModelType_LSTM_16_1 = RTNeural::ModelT, RTNeural::DenseT>; 40 | using ModelType_LSTM_16_2 = RTNeural::ModelT, RTNeural::DenseT>; 41 | using ModelType_LSTM_16_3 = RTNeural::ModelT, RTNeural::DenseT>; 42 | using ModelType_LSTM_20_1 = RTNeural::ModelT, RTNeural::DenseT>; 43 | using ModelType_LSTM_20_2 = RTNeural::ModelT, RTNeural::DenseT>; 44 | using ModelType_LSTM_20_3 = RTNeural::ModelT, RTNeural::DenseT>; 45 | using ModelType_LSTM_24_1 = RTNeural::ModelT, RTNeural::DenseT>; 46 | using ModelType_LSTM_24_2 = RTNeural::ModelT, RTNeural::DenseT>; 47 | using ModelType_LSTM_24_3 = RTNeural::ModelT, RTNeural::DenseT>; 48 | using ModelType_LSTM_32_1 = RTNeural::ModelT, RTNeural::DenseT>; 49 | using ModelType_LSTM_32_2 = RTNeural::ModelT, RTNeural::DenseT>; 50 | using ModelType_LSTM_32_3 = RTNeural::ModelT, RTNeural::DenseT>; 51 | using ModelType_LSTM_40_1 = RTNeural::ModelT, RTNeural::DenseT>; 52 | using ModelType_LSTM_40_2 = RTNeural::ModelT, RTNeural::DenseT>; 53 | using ModelType_LSTM_40_3 = RTNeural::ModelT, RTNeural::DenseT>; 54 | using ModelType_LSTM_64_1 = RTNeural::ModelT, RTNeural::DenseT>; 55 | using ModelType_LSTM_64_2 = RTNeural::ModelT, RTNeural::DenseT>; 56 | using ModelType_LSTM_64_3 = RTNeural::ModelT, RTNeural::DenseT>; 57 | using ModelType_LSTM_80_1 = RTNeural::ModelT, RTNeural::DenseT>; 58 | using ModelType_LSTM_80_2 = RTNeural::ModelT, RTNeural::DenseT>; 59 | using ModelType_LSTM_80_3 = RTNeural::ModelT, RTNeural::DenseT>; 60 | using ModelVariantType = std::variant; 61 | 62 | inline bool is_model_type_ModelType_GRU_8_1 (const nlohmann::json& model_json) { 63 | const auto json_layers = model_json.at ("layers"); 64 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 65 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 66 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 67 | const auto is_hidden_size_correct = hidden_size == 8; 68 | const auto input_size = model_json.at ("in_shape").back().get(); 69 | const auto is_input_size_correct = input_size == 1; 70 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 71 | } 72 | 73 | inline bool is_model_type_ModelType_GRU_8_2 (const nlohmann::json& model_json) { 74 | const auto json_layers = model_json.at ("layers"); 75 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 76 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 77 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 78 | const auto is_hidden_size_correct = hidden_size == 8; 79 | const auto input_size = model_json.at ("in_shape").back().get(); 80 | const auto is_input_size_correct = input_size == 2; 81 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 82 | } 83 | 84 | inline bool is_model_type_ModelType_GRU_8_3 (const nlohmann::json& model_json) { 85 | const auto json_layers = model_json.at ("layers"); 86 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 87 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 88 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 89 | const auto is_hidden_size_correct = hidden_size == 8; 90 | const auto input_size = model_json.at ("in_shape").back().get(); 91 | const auto is_input_size_correct = input_size == 3; 92 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 93 | } 94 | 95 | inline bool is_model_type_ModelType_GRU_12_1 (const nlohmann::json& model_json) { 96 | const auto json_layers = model_json.at ("layers"); 97 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 98 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 99 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 100 | const auto is_hidden_size_correct = hidden_size == 12; 101 | const auto input_size = model_json.at ("in_shape").back().get(); 102 | const auto is_input_size_correct = input_size == 1; 103 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 104 | } 105 | 106 | inline bool is_model_type_ModelType_GRU_12_2 (const nlohmann::json& model_json) { 107 | const auto json_layers = model_json.at ("layers"); 108 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 109 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 110 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 111 | const auto is_hidden_size_correct = hidden_size == 12; 112 | const auto input_size = model_json.at ("in_shape").back().get(); 113 | const auto is_input_size_correct = input_size == 2; 114 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 115 | } 116 | 117 | inline bool is_model_type_ModelType_GRU_12_3 (const nlohmann::json& model_json) { 118 | const auto json_layers = model_json.at ("layers"); 119 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 120 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 121 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 122 | const auto is_hidden_size_correct = hidden_size == 12; 123 | const auto input_size = model_json.at ("in_shape").back().get(); 124 | const auto is_input_size_correct = input_size == 3; 125 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 126 | } 127 | 128 | inline bool is_model_type_ModelType_GRU_16_1 (const nlohmann::json& model_json) { 129 | const auto json_layers = model_json.at ("layers"); 130 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 131 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 132 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 133 | const auto is_hidden_size_correct = hidden_size == 16; 134 | const auto input_size = model_json.at ("in_shape").back().get(); 135 | const auto is_input_size_correct = input_size == 1; 136 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 137 | } 138 | 139 | inline bool is_model_type_ModelType_GRU_16_2 (const nlohmann::json& model_json) { 140 | const auto json_layers = model_json.at ("layers"); 141 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 142 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 143 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 144 | const auto is_hidden_size_correct = hidden_size == 16; 145 | const auto input_size = model_json.at ("in_shape").back().get(); 146 | const auto is_input_size_correct = input_size == 2; 147 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 148 | } 149 | 150 | inline bool is_model_type_ModelType_GRU_16_3 (const nlohmann::json& model_json) { 151 | const auto json_layers = model_json.at ("layers"); 152 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 153 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 154 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 155 | const auto is_hidden_size_correct = hidden_size == 16; 156 | const auto input_size = model_json.at ("in_shape").back().get(); 157 | const auto is_input_size_correct = input_size == 3; 158 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 159 | } 160 | 161 | inline bool is_model_type_ModelType_GRU_20_1 (const nlohmann::json& model_json) { 162 | const auto json_layers = model_json.at ("layers"); 163 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 164 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 165 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 166 | const auto is_hidden_size_correct = hidden_size == 20; 167 | const auto input_size = model_json.at ("in_shape").back().get(); 168 | const auto is_input_size_correct = input_size == 1; 169 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 170 | } 171 | 172 | inline bool is_model_type_ModelType_GRU_20_2 (const nlohmann::json& model_json) { 173 | const auto json_layers = model_json.at ("layers"); 174 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 175 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 176 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 177 | const auto is_hidden_size_correct = hidden_size == 20; 178 | const auto input_size = model_json.at ("in_shape").back().get(); 179 | const auto is_input_size_correct = input_size == 2; 180 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 181 | } 182 | 183 | inline bool is_model_type_ModelType_GRU_20_3 (const nlohmann::json& model_json) { 184 | const auto json_layers = model_json.at ("layers"); 185 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 186 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 187 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 188 | const auto is_hidden_size_correct = hidden_size == 20; 189 | const auto input_size = model_json.at ("in_shape").back().get(); 190 | const auto is_input_size_correct = input_size == 3; 191 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 192 | } 193 | 194 | inline bool is_model_type_ModelType_GRU_24_1 (const nlohmann::json& model_json) { 195 | const auto json_layers = model_json.at ("layers"); 196 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 197 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 198 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 199 | const auto is_hidden_size_correct = hidden_size == 24; 200 | const auto input_size = model_json.at ("in_shape").back().get(); 201 | const auto is_input_size_correct = input_size == 1; 202 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 203 | } 204 | 205 | inline bool is_model_type_ModelType_GRU_24_2 (const nlohmann::json& model_json) { 206 | const auto json_layers = model_json.at ("layers"); 207 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 208 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 209 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 210 | const auto is_hidden_size_correct = hidden_size == 24; 211 | const auto input_size = model_json.at ("in_shape").back().get(); 212 | const auto is_input_size_correct = input_size == 2; 213 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 214 | } 215 | 216 | inline bool is_model_type_ModelType_GRU_24_3 (const nlohmann::json& model_json) { 217 | const auto json_layers = model_json.at ("layers"); 218 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 219 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 220 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 221 | const auto is_hidden_size_correct = hidden_size == 24; 222 | const auto input_size = model_json.at ("in_shape").back().get(); 223 | const auto is_input_size_correct = input_size == 3; 224 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 225 | } 226 | 227 | inline bool is_model_type_ModelType_GRU_32_1 (const nlohmann::json& model_json) { 228 | const auto json_layers = model_json.at ("layers"); 229 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 230 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 231 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 232 | const auto is_hidden_size_correct = hidden_size == 32; 233 | const auto input_size = model_json.at ("in_shape").back().get(); 234 | const auto is_input_size_correct = input_size == 1; 235 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 236 | } 237 | 238 | inline bool is_model_type_ModelType_GRU_32_2 (const nlohmann::json& model_json) { 239 | const auto json_layers = model_json.at ("layers"); 240 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 241 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 242 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 243 | const auto is_hidden_size_correct = hidden_size == 32; 244 | const auto input_size = model_json.at ("in_shape").back().get(); 245 | const auto is_input_size_correct = input_size == 2; 246 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 247 | } 248 | 249 | inline bool is_model_type_ModelType_GRU_32_3 (const nlohmann::json& model_json) { 250 | const auto json_layers = model_json.at ("layers"); 251 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 252 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 253 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 254 | const auto is_hidden_size_correct = hidden_size == 32; 255 | const auto input_size = model_json.at ("in_shape").back().get(); 256 | const auto is_input_size_correct = input_size == 3; 257 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 258 | } 259 | 260 | inline bool is_model_type_ModelType_GRU_40_1 (const nlohmann::json& model_json) { 261 | const auto json_layers = model_json.at ("layers"); 262 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 263 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 264 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 265 | const auto is_hidden_size_correct = hidden_size == 40; 266 | const auto input_size = model_json.at ("in_shape").back().get(); 267 | const auto is_input_size_correct = input_size == 1; 268 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 269 | } 270 | 271 | inline bool is_model_type_ModelType_GRU_40_2 (const nlohmann::json& model_json) { 272 | const auto json_layers = model_json.at ("layers"); 273 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 274 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 275 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 276 | const auto is_hidden_size_correct = hidden_size == 40; 277 | const auto input_size = model_json.at ("in_shape").back().get(); 278 | const auto is_input_size_correct = input_size == 2; 279 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 280 | } 281 | 282 | inline bool is_model_type_ModelType_GRU_40_3 (const nlohmann::json& model_json) { 283 | const auto json_layers = model_json.at ("layers"); 284 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 285 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 286 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 287 | const auto is_hidden_size_correct = hidden_size == 40; 288 | const auto input_size = model_json.at ("in_shape").back().get(); 289 | const auto is_input_size_correct = input_size == 3; 290 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 291 | } 292 | 293 | inline bool is_model_type_ModelType_GRU_64_1 (const nlohmann::json& model_json) { 294 | const auto json_layers = model_json.at ("layers"); 295 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 296 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 297 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 298 | const auto is_hidden_size_correct = hidden_size == 64; 299 | const auto input_size = model_json.at ("in_shape").back().get(); 300 | const auto is_input_size_correct = input_size == 1; 301 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 302 | } 303 | 304 | inline bool is_model_type_ModelType_GRU_64_2 (const nlohmann::json& model_json) { 305 | const auto json_layers = model_json.at ("layers"); 306 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 307 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 308 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 309 | const auto is_hidden_size_correct = hidden_size == 64; 310 | const auto input_size = model_json.at ("in_shape").back().get(); 311 | const auto is_input_size_correct = input_size == 2; 312 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 313 | } 314 | 315 | inline bool is_model_type_ModelType_GRU_64_3 (const nlohmann::json& model_json) { 316 | const auto json_layers = model_json.at ("layers"); 317 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 318 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 319 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 320 | const auto is_hidden_size_correct = hidden_size == 64; 321 | const auto input_size = model_json.at ("in_shape").back().get(); 322 | const auto is_input_size_correct = input_size == 3; 323 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 324 | } 325 | 326 | inline bool is_model_type_ModelType_GRU_80_1 (const nlohmann::json& model_json) { 327 | const auto json_layers = model_json.at ("layers"); 328 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 329 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 330 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 331 | const auto is_hidden_size_correct = hidden_size == 80; 332 | const auto input_size = model_json.at ("in_shape").back().get(); 333 | const auto is_input_size_correct = input_size == 1; 334 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 335 | } 336 | 337 | inline bool is_model_type_ModelType_GRU_80_2 (const nlohmann::json& model_json) { 338 | const auto json_layers = model_json.at ("layers"); 339 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 340 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 341 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 342 | const auto is_hidden_size_correct = hidden_size == 80; 343 | const auto input_size = model_json.at ("in_shape").back().get(); 344 | const auto is_input_size_correct = input_size == 2; 345 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 346 | } 347 | 348 | inline bool is_model_type_ModelType_GRU_80_3 (const nlohmann::json& model_json) { 349 | const auto json_layers = model_json.at ("layers"); 350 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 351 | const auto is_layer_type_correct = rnn_layer_type == "gru"; 352 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 353 | const auto is_hidden_size_correct = hidden_size == 80; 354 | const auto input_size = model_json.at ("in_shape").back().get(); 355 | const auto is_input_size_correct = input_size == 3; 356 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 357 | } 358 | 359 | inline bool is_model_type_ModelType_LSTM_8_1 (const nlohmann::json& model_json) { 360 | const auto json_layers = model_json.at ("layers"); 361 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 362 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 363 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 364 | const auto is_hidden_size_correct = hidden_size == 8; 365 | const auto input_size = model_json.at ("in_shape").back().get(); 366 | const auto is_input_size_correct = input_size == 1; 367 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 368 | } 369 | 370 | inline bool is_model_type_ModelType_LSTM_8_2 (const nlohmann::json& model_json) { 371 | const auto json_layers = model_json.at ("layers"); 372 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 373 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 374 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 375 | const auto is_hidden_size_correct = hidden_size == 8; 376 | const auto input_size = model_json.at ("in_shape").back().get(); 377 | const auto is_input_size_correct = input_size == 2; 378 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 379 | } 380 | 381 | inline bool is_model_type_ModelType_LSTM_8_3 (const nlohmann::json& model_json) { 382 | const auto json_layers = model_json.at ("layers"); 383 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 384 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 385 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 386 | const auto is_hidden_size_correct = hidden_size == 8; 387 | const auto input_size = model_json.at ("in_shape").back().get(); 388 | const auto is_input_size_correct = input_size == 3; 389 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 390 | } 391 | 392 | inline bool is_model_type_ModelType_LSTM_12_1 (const nlohmann::json& model_json) { 393 | const auto json_layers = model_json.at ("layers"); 394 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 395 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 396 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 397 | const auto is_hidden_size_correct = hidden_size == 12; 398 | const auto input_size = model_json.at ("in_shape").back().get(); 399 | const auto is_input_size_correct = input_size == 1; 400 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 401 | } 402 | 403 | inline bool is_model_type_ModelType_LSTM_12_2 (const nlohmann::json& model_json) { 404 | const auto json_layers = model_json.at ("layers"); 405 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 406 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 407 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 408 | const auto is_hidden_size_correct = hidden_size == 12; 409 | const auto input_size = model_json.at ("in_shape").back().get(); 410 | const auto is_input_size_correct = input_size == 2; 411 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 412 | } 413 | 414 | inline bool is_model_type_ModelType_LSTM_12_3 (const nlohmann::json& model_json) { 415 | const auto json_layers = model_json.at ("layers"); 416 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 417 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 418 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 419 | const auto is_hidden_size_correct = hidden_size == 12; 420 | const auto input_size = model_json.at ("in_shape").back().get(); 421 | const auto is_input_size_correct = input_size == 3; 422 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 423 | } 424 | 425 | inline bool is_model_type_ModelType_LSTM_16_1 (const nlohmann::json& model_json) { 426 | const auto json_layers = model_json.at ("layers"); 427 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 428 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 429 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 430 | const auto is_hidden_size_correct = hidden_size == 16; 431 | const auto input_size = model_json.at ("in_shape").back().get(); 432 | const auto is_input_size_correct = input_size == 1; 433 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 434 | } 435 | 436 | inline bool is_model_type_ModelType_LSTM_16_2 (const nlohmann::json& model_json) { 437 | const auto json_layers = model_json.at ("layers"); 438 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 439 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 440 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 441 | const auto is_hidden_size_correct = hidden_size == 16; 442 | const auto input_size = model_json.at ("in_shape").back().get(); 443 | const auto is_input_size_correct = input_size == 2; 444 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 445 | } 446 | 447 | inline bool is_model_type_ModelType_LSTM_16_3 (const nlohmann::json& model_json) { 448 | const auto json_layers = model_json.at ("layers"); 449 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 450 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 451 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 452 | const auto is_hidden_size_correct = hidden_size == 16; 453 | const auto input_size = model_json.at ("in_shape").back().get(); 454 | const auto is_input_size_correct = input_size == 3; 455 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 456 | } 457 | 458 | inline bool is_model_type_ModelType_LSTM_20_1 (const nlohmann::json& model_json) { 459 | const auto json_layers = model_json.at ("layers"); 460 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 461 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 462 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 463 | const auto is_hidden_size_correct = hidden_size == 20; 464 | const auto input_size = model_json.at ("in_shape").back().get(); 465 | const auto is_input_size_correct = input_size == 1; 466 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 467 | } 468 | 469 | inline bool is_model_type_ModelType_LSTM_20_2 (const nlohmann::json& model_json) { 470 | const auto json_layers = model_json.at ("layers"); 471 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 472 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 473 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 474 | const auto is_hidden_size_correct = hidden_size == 20; 475 | const auto input_size = model_json.at ("in_shape").back().get(); 476 | const auto is_input_size_correct = input_size == 2; 477 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 478 | } 479 | 480 | inline bool is_model_type_ModelType_LSTM_20_3 (const nlohmann::json& model_json) { 481 | const auto json_layers = model_json.at ("layers"); 482 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 483 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 484 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 485 | const auto is_hidden_size_correct = hidden_size == 20; 486 | const auto input_size = model_json.at ("in_shape").back().get(); 487 | const auto is_input_size_correct = input_size == 3; 488 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 489 | } 490 | 491 | inline bool is_model_type_ModelType_LSTM_24_1 (const nlohmann::json& model_json) { 492 | const auto json_layers = model_json.at ("layers"); 493 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 494 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 495 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 496 | const auto is_hidden_size_correct = hidden_size == 24; 497 | const auto input_size = model_json.at ("in_shape").back().get(); 498 | const auto is_input_size_correct = input_size == 1; 499 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 500 | } 501 | 502 | inline bool is_model_type_ModelType_LSTM_24_2 (const nlohmann::json& model_json) { 503 | const auto json_layers = model_json.at ("layers"); 504 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 505 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 506 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 507 | const auto is_hidden_size_correct = hidden_size == 24; 508 | const auto input_size = model_json.at ("in_shape").back().get(); 509 | const auto is_input_size_correct = input_size == 2; 510 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 511 | } 512 | 513 | inline bool is_model_type_ModelType_LSTM_24_3 (const nlohmann::json& model_json) { 514 | const auto json_layers = model_json.at ("layers"); 515 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 516 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 517 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 518 | const auto is_hidden_size_correct = hidden_size == 24; 519 | const auto input_size = model_json.at ("in_shape").back().get(); 520 | const auto is_input_size_correct = input_size == 3; 521 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 522 | } 523 | 524 | inline bool is_model_type_ModelType_LSTM_32_1 (const nlohmann::json& model_json) { 525 | const auto json_layers = model_json.at ("layers"); 526 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 527 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 528 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 529 | const auto is_hidden_size_correct = hidden_size == 32; 530 | const auto input_size = model_json.at ("in_shape").back().get(); 531 | const auto is_input_size_correct = input_size == 1; 532 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 533 | } 534 | 535 | inline bool is_model_type_ModelType_LSTM_32_2 (const nlohmann::json& model_json) { 536 | const auto json_layers = model_json.at ("layers"); 537 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 538 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 539 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 540 | const auto is_hidden_size_correct = hidden_size == 32; 541 | const auto input_size = model_json.at ("in_shape").back().get(); 542 | const auto is_input_size_correct = input_size == 2; 543 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 544 | } 545 | 546 | inline bool is_model_type_ModelType_LSTM_32_3 (const nlohmann::json& model_json) { 547 | const auto json_layers = model_json.at ("layers"); 548 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 549 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 550 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 551 | const auto is_hidden_size_correct = hidden_size == 32; 552 | const auto input_size = model_json.at ("in_shape").back().get(); 553 | const auto is_input_size_correct = input_size == 3; 554 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 555 | } 556 | 557 | inline bool is_model_type_ModelType_LSTM_40_1 (const nlohmann::json& model_json) { 558 | const auto json_layers = model_json.at ("layers"); 559 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 560 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 561 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 562 | const auto is_hidden_size_correct = hidden_size == 40; 563 | const auto input_size = model_json.at ("in_shape").back().get(); 564 | const auto is_input_size_correct = input_size == 1; 565 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 566 | } 567 | 568 | inline bool is_model_type_ModelType_LSTM_40_2 (const nlohmann::json& model_json) { 569 | const auto json_layers = model_json.at ("layers"); 570 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 571 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 572 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 573 | const auto is_hidden_size_correct = hidden_size == 40; 574 | const auto input_size = model_json.at ("in_shape").back().get(); 575 | const auto is_input_size_correct = input_size == 2; 576 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 577 | } 578 | 579 | inline bool is_model_type_ModelType_LSTM_40_3 (const nlohmann::json& model_json) { 580 | const auto json_layers = model_json.at ("layers"); 581 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 582 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 583 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 584 | const auto is_hidden_size_correct = hidden_size == 40; 585 | const auto input_size = model_json.at ("in_shape").back().get(); 586 | const auto is_input_size_correct = input_size == 3; 587 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 588 | } 589 | 590 | inline bool is_model_type_ModelType_LSTM_64_1 (const nlohmann::json& model_json) { 591 | const auto json_layers = model_json.at ("layers"); 592 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 593 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 594 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 595 | const auto is_hidden_size_correct = hidden_size == 64; 596 | const auto input_size = model_json.at ("in_shape").back().get(); 597 | const auto is_input_size_correct = input_size == 1; 598 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 599 | } 600 | 601 | inline bool is_model_type_ModelType_LSTM_64_2 (const nlohmann::json& model_json) { 602 | const auto json_layers = model_json.at ("layers"); 603 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 604 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 605 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 606 | const auto is_hidden_size_correct = hidden_size == 64; 607 | const auto input_size = model_json.at ("in_shape").back().get(); 608 | const auto is_input_size_correct = input_size == 2; 609 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 610 | } 611 | 612 | inline bool is_model_type_ModelType_LSTM_64_3 (const nlohmann::json& model_json) { 613 | const auto json_layers = model_json.at ("layers"); 614 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 615 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 616 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 617 | const auto is_hidden_size_correct = hidden_size == 64; 618 | const auto input_size = model_json.at ("in_shape").back().get(); 619 | const auto is_input_size_correct = input_size == 3; 620 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 621 | } 622 | 623 | inline bool is_model_type_ModelType_LSTM_80_1 (const nlohmann::json& model_json) { 624 | const auto json_layers = model_json.at ("layers"); 625 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 626 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 627 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 628 | const auto is_hidden_size_correct = hidden_size == 80; 629 | const auto input_size = model_json.at ("in_shape").back().get(); 630 | const auto is_input_size_correct = input_size == 1; 631 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 632 | } 633 | 634 | inline bool is_model_type_ModelType_LSTM_80_2 (const nlohmann::json& model_json) { 635 | const auto json_layers = model_json.at ("layers"); 636 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 637 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 638 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 639 | const auto is_hidden_size_correct = hidden_size == 80; 640 | const auto input_size = model_json.at ("in_shape").back().get(); 641 | const auto is_input_size_correct = input_size == 2; 642 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 643 | } 644 | 645 | inline bool is_model_type_ModelType_LSTM_80_3 (const nlohmann::json& model_json) { 646 | const auto json_layers = model_json.at ("layers"); 647 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 648 | const auto is_layer_type_correct = rnn_layer_type == "lstm"; 649 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 650 | const auto is_hidden_size_correct = hidden_size == 80; 651 | const auto input_size = model_json.at ("in_shape").back().get(); 652 | const auto is_input_size_correct = input_size == 3; 653 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 654 | } 655 | 656 | inline bool custom_model_creator (const nlohmann::json& model_json, ModelVariantType& model) { 657 | if (is_model_type_ModelType_GRU_8_1 (model_json)) { 658 | model.emplace(); 659 | return true; 660 | } 661 | else if (is_model_type_ModelType_GRU_8_2 (model_json)) { 662 | model.emplace(); 663 | return true; 664 | } 665 | else if (is_model_type_ModelType_GRU_8_3 (model_json)) { 666 | model.emplace(); 667 | return true; 668 | } 669 | else if (is_model_type_ModelType_GRU_12_1 (model_json)) { 670 | model.emplace(); 671 | return true; 672 | } 673 | else if (is_model_type_ModelType_GRU_12_2 (model_json)) { 674 | model.emplace(); 675 | return true; 676 | } 677 | else if (is_model_type_ModelType_GRU_12_3 (model_json)) { 678 | model.emplace(); 679 | return true; 680 | } 681 | else if (is_model_type_ModelType_GRU_16_1 (model_json)) { 682 | model.emplace(); 683 | return true; 684 | } 685 | else if (is_model_type_ModelType_GRU_16_2 (model_json)) { 686 | model.emplace(); 687 | return true; 688 | } 689 | else if (is_model_type_ModelType_GRU_16_3 (model_json)) { 690 | model.emplace(); 691 | return true; 692 | } 693 | else if (is_model_type_ModelType_GRU_20_1 (model_json)) { 694 | model.emplace(); 695 | return true; 696 | } 697 | else if (is_model_type_ModelType_GRU_20_2 (model_json)) { 698 | model.emplace(); 699 | return true; 700 | } 701 | else if (is_model_type_ModelType_GRU_20_3 (model_json)) { 702 | model.emplace(); 703 | return true; 704 | } 705 | else if (is_model_type_ModelType_GRU_24_1 (model_json)) { 706 | model.emplace(); 707 | return true; 708 | } 709 | else if (is_model_type_ModelType_GRU_24_2 (model_json)) { 710 | model.emplace(); 711 | return true; 712 | } 713 | else if (is_model_type_ModelType_GRU_24_3 (model_json)) { 714 | model.emplace(); 715 | return true; 716 | } 717 | else if (is_model_type_ModelType_GRU_32_1 (model_json)) { 718 | model.emplace(); 719 | return true; 720 | } 721 | else if (is_model_type_ModelType_GRU_32_2 (model_json)) { 722 | model.emplace(); 723 | return true; 724 | } 725 | else if (is_model_type_ModelType_GRU_32_3 (model_json)) { 726 | model.emplace(); 727 | return true; 728 | } 729 | else if (is_model_type_ModelType_GRU_40_1 (model_json)) { 730 | model.emplace(); 731 | return true; 732 | } 733 | else if (is_model_type_ModelType_GRU_40_2 (model_json)) { 734 | model.emplace(); 735 | return true; 736 | } 737 | else if (is_model_type_ModelType_GRU_40_3 (model_json)) { 738 | model.emplace(); 739 | return true; 740 | } 741 | else if (is_model_type_ModelType_GRU_64_1 (model_json)) { 742 | model.emplace(); 743 | return true; 744 | } 745 | else if (is_model_type_ModelType_GRU_64_2 (model_json)) { 746 | model.emplace(); 747 | return true; 748 | } 749 | else if (is_model_type_ModelType_GRU_64_3 (model_json)) { 750 | model.emplace(); 751 | return true; 752 | } 753 | else if (is_model_type_ModelType_GRU_80_1 (model_json)) { 754 | model.emplace(); 755 | return true; 756 | } 757 | else if (is_model_type_ModelType_GRU_80_2 (model_json)) { 758 | model.emplace(); 759 | return true; 760 | } 761 | else if (is_model_type_ModelType_GRU_80_3 (model_json)) { 762 | model.emplace(); 763 | return true; 764 | } 765 | else if (is_model_type_ModelType_LSTM_8_1 (model_json)) { 766 | model.emplace(); 767 | return true; 768 | } 769 | else if (is_model_type_ModelType_LSTM_8_2 (model_json)) { 770 | model.emplace(); 771 | return true; 772 | } 773 | else if (is_model_type_ModelType_LSTM_8_3 (model_json)) { 774 | model.emplace(); 775 | return true; 776 | } 777 | else if (is_model_type_ModelType_LSTM_12_1 (model_json)) { 778 | model.emplace(); 779 | return true; 780 | } 781 | else if (is_model_type_ModelType_LSTM_12_2 (model_json)) { 782 | model.emplace(); 783 | return true; 784 | } 785 | else if (is_model_type_ModelType_LSTM_12_3 (model_json)) { 786 | model.emplace(); 787 | return true; 788 | } 789 | else if (is_model_type_ModelType_LSTM_16_1 (model_json)) { 790 | model.emplace(); 791 | return true; 792 | } 793 | else if (is_model_type_ModelType_LSTM_16_2 (model_json)) { 794 | model.emplace(); 795 | return true; 796 | } 797 | else if (is_model_type_ModelType_LSTM_16_3 (model_json)) { 798 | model.emplace(); 799 | return true; 800 | } 801 | else if (is_model_type_ModelType_LSTM_20_1 (model_json)) { 802 | model.emplace(); 803 | return true; 804 | } 805 | else if (is_model_type_ModelType_LSTM_20_2 (model_json)) { 806 | model.emplace(); 807 | return true; 808 | } 809 | else if (is_model_type_ModelType_LSTM_20_3 (model_json)) { 810 | model.emplace(); 811 | return true; 812 | } 813 | else if (is_model_type_ModelType_LSTM_24_1 (model_json)) { 814 | model.emplace(); 815 | return true; 816 | } 817 | else if (is_model_type_ModelType_LSTM_24_2 (model_json)) { 818 | model.emplace(); 819 | return true; 820 | } 821 | else if (is_model_type_ModelType_LSTM_24_3 (model_json)) { 822 | model.emplace(); 823 | return true; 824 | } 825 | else if (is_model_type_ModelType_LSTM_32_1 (model_json)) { 826 | model.emplace(); 827 | return true; 828 | } 829 | else if (is_model_type_ModelType_LSTM_32_2 (model_json)) { 830 | model.emplace(); 831 | return true; 832 | } 833 | else if (is_model_type_ModelType_LSTM_32_3 (model_json)) { 834 | model.emplace(); 835 | return true; 836 | } 837 | else if (is_model_type_ModelType_LSTM_40_1 (model_json)) { 838 | model.emplace(); 839 | return true; 840 | } 841 | else if (is_model_type_ModelType_LSTM_40_2 (model_json)) { 842 | model.emplace(); 843 | return true; 844 | } 845 | else if (is_model_type_ModelType_LSTM_40_3 (model_json)) { 846 | model.emplace(); 847 | return true; 848 | } 849 | else if (is_model_type_ModelType_LSTM_64_1 (model_json)) { 850 | model.emplace(); 851 | return true; 852 | } 853 | else if (is_model_type_ModelType_LSTM_64_2 (model_json)) { 854 | model.emplace(); 855 | return true; 856 | } 857 | else if (is_model_type_ModelType_LSTM_64_3 (model_json)) { 858 | model.emplace(); 859 | return true; 860 | } 861 | else if (is_model_type_ModelType_LSTM_80_1 (model_json)) { 862 | model.emplace(); 863 | return true; 864 | } 865 | else if (is_model_type_ModelType_LSTM_80_2 (model_json)) { 866 | model.emplace(); 867 | return true; 868 | } 869 | else if (is_model_type_ModelType_LSTM_80_3 (model_json)) { 870 | model.emplace(); 871 | return true; 872 | } 873 | model.emplace(); 874 | return false; 875 | } 876 | -------------------------------------------------------------------------------- /rt-neural-generic/src/rt-neural-generic.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * aidadsp-lv2 3 | * Copyright (C) 2022-2023 Massimo Pennazio 4 | * SPDX-License-Identifier: GPL-3.0-or-later 5 | */ 6 | 7 | #include "rt-neural-generic.h" 8 | 9 | /**********************************************************************************************************************************************************/ 10 | 11 | static const LV2_Descriptor Descriptor = { 12 | PLUGIN_URI, 13 | RtNeuralGeneric::instantiate, 14 | RtNeuralGeneric::connect_port, 15 | RtNeuralGeneric::activate, 16 | RtNeuralGeneric::run, 17 | RtNeuralGeneric::deactivate, 18 | RtNeuralGeneric::cleanup, 19 | RtNeuralGeneric::extension_data 20 | }; 21 | 22 | /**********************************************************************************************************************************************************/ 23 | 24 | LV2_SYMBOL_EXPORT 25 | const LV2_Descriptor* lv2_descriptor(uint32_t index) 26 | { 27 | if (index == 0) return &Descriptor; 28 | else return NULL; 29 | } 30 | 31 | /**********************************************************************************************************************************************************/ 32 | 33 | // Apply a gain ramp to a buffer 34 | static void applyGainRamp(ExponentialValueSmoother& smoother, float *out, const float *in, uint32_t n_samples) { 35 | for(uint32_t i=0; iprocess(in[i]); 45 | } 46 | } 47 | 48 | /**********************************************************************************************************************************************************/ 49 | 50 | void RtNeuralGeneric::applyToneControls(float *out, const float *in, LV2_Handle instance, uint32_t n_samples) 51 | { 52 | RtNeuralGeneric *self = (RtNeuralGeneric*) instance; 53 | float bass_boost_db = *self->bass_boost_db; 54 | float bass_freq = *self->bass_freq; 55 | uint8_t bass_has_changed = 0; 56 | float mid_boost_db = *self->mid_boost_db; 57 | float mid_freq = *self->mid_freq; 58 | float mid_q = *self->mid_q; 59 | float mid_type = *self->mid_type; 60 | uint8_t mid_has_changed = 0; 61 | float treble_boost_db = *self->treble_boost_db; 62 | float treble_freq = *self->treble_freq; 63 | uint8_t treble_has_changed = 0; 64 | float depth_boost_db = *self->depth_boost_db; 65 | float presence_boost_db = *self->presence_boost_db; 66 | 67 | /* Bass */ 68 | if (bass_boost_db != self->bass_boost_db_old) { 69 | self->bass_boost_db_old = bass_boost_db; 70 | bass_has_changed++; 71 | } 72 | if (bass_freq != self->bass_freq_old) { 73 | self->bass_freq_old = bass_freq; 74 | bass_has_changed++; 75 | } 76 | if (bass_has_changed) { 77 | self->bass->setBiquad(bq_type_lowshelf, bass_freq / self->samplerate, 0.707f, bass_boost_db); 78 | } 79 | 80 | /* Mid */ 81 | if (mid_boost_db != self->mid_boost_db_old) { 82 | self->mid_boost_db_old = mid_boost_db; 83 | mid_has_changed++; 84 | } 85 | if (mid_freq != self->mid_freq_old) { 86 | self->mid_freq_old = mid_freq; 87 | mid_has_changed++; 88 | } 89 | if (mid_q != self->mid_q_old) { 90 | self->mid_q_old = mid_q; 91 | mid_has_changed++; 92 | } 93 | if (mid_type !=self->mid_type_old) { 94 | self->mid_type_old = mid_type; 95 | mid_has_changed++; 96 | } 97 | if (mid_has_changed) { 98 | if(mid_type == BANDPASS) { 99 | self->mid->setBiquad(bq_type_bandpass, mid_freq / self->samplerate, mid_q, mid_boost_db); 100 | } 101 | else { 102 | self->mid->setBiquad(bq_type_peak, mid_freq / self->samplerate, mid_q, mid_boost_db); 103 | } 104 | } 105 | 106 | /* Treble */ 107 | if (treble_boost_db != self->treble_boost_db_old) { 108 | self->treble_boost_db_old = treble_boost_db; 109 | treble_has_changed++; 110 | } 111 | if (treble_freq != self->treble_freq_old) { 112 | self->treble_freq_old = treble_freq; 113 | treble_has_changed++; 114 | } 115 | if (treble_has_changed) { 116 | self->treble->setBiquad(bq_type_highshelf, treble_freq / self->samplerate, 0.707f, treble_boost_db); 117 | } 118 | 119 | /* Depth & Presence */ 120 | if(depth_boost_db != self->depth_boost_db_old) { 121 | self->depth_boost_db_old = depth_boost_db; 122 | self->depth->setBiquad(bq_type_peak, DEPTH_FREQ / self->samplerate, DEPTH_Q, depth_boost_db); 123 | } 124 | if(presence_boost_db != self->presence_boost_db_old) { 125 | self->presence_boost_db_old = presence_boost_db; 126 | self->presence->setBiquad(bq_type_highshelf, PRESENCE_FREQ / self->samplerate, PRESENCE_Q, presence_boost_db); 127 | } 128 | 129 | /* Run biquad cascade filters */ 130 | if(mid_type == BANDPASS) { 131 | applyBiquadFilter(out, in, self->mid, n_samples); 132 | } 133 | else { 134 | applyBiquadFilter(out, in, self->depth, n_samples); 135 | applyBiquadFilter(out, out, self->bass, n_samples); 136 | applyBiquadFilter(out, out, self->mid, n_samples); 137 | applyBiquadFilter(out, out, self->treble, n_samples); 138 | applyBiquadFilter(out, out, self->presence, n_samples); 139 | } 140 | } 141 | 142 | /**********************************************************************************************************************************************************/ 143 | 144 | /** 145 | * This function carries model calculations for snapshot models, models with one parameter and 146 | * models with two parameters. 147 | */ 148 | void RtNeuralGeneric::applyModel(DynamicModel* model, float* out, uint32_t n_samples) 149 | { 150 | const bool input_skip = model->input_skip; 151 | const float input_gain = model->input_gain; 152 | const float output_gain = model->output_gain; 153 | #if AIDADSP_CONDITIONED_MODELS 154 | LinearValueSmoother& param1Coeff = model->param1Coeff; 155 | LinearValueSmoother& param2Coeff = model->param2Coeff; 156 | #endif 157 | 158 | std::visit ( 159 | [input_skip, &out, n_samples, input_gain, output_gain 160 | #if AIDADSP_CONDITIONED_MODELS 161 | , ¶m1Coeff, ¶m2Coeff 162 | #endif 163 | ] (auto&& custom_model) 164 | { 165 | using ModelType = std::decay_t; 166 | if constexpr (ModelType::input_size == 1) 167 | { 168 | if (input_skip) 169 | { 170 | for (uint32_t i=0; ivariant 239 | ); 240 | } 241 | 242 | /**********************************************************************************************************************************************************/ 243 | 244 | LV2_Handle RtNeuralGeneric::instantiate(const LV2_Descriptor* descriptor, double samplerate, const char* bundle_path, const LV2_Feature* const* features) 245 | { 246 | RtNeuralGeneric *self = new RtNeuralGeneric(); 247 | 248 | self->samplerate = samplerate; 249 | 250 | #if AIDADSP_COMMERCIAL && (AIDADSP_MODEL_DEFINE != SHOWCASE) 251 | self->run_count = 0; 252 | mod_license_check(features, PLUGIN_URI); 253 | #endif 254 | 255 | // Get host features 256 | for (int i = 0; features[i]; ++i) { 257 | if (!strcmp(features[i]->URI, LV2_URID__map)) { 258 | self->map = (LV2_URID_Map*)features[i]->data; 259 | } else if (!strcmp(features[i]->URI, LV2_WORKER__schedule)) { 260 | self->schedule = (LV2_Worker_Schedule*)features[i]->data; 261 | } else if (!strcmp(features[i]->URI, LV2_LOG__log)) { 262 | self->log = (LV2_Log_Log*)features[i]->data; 263 | } 264 | } 265 | if (!self->map) { 266 | std::cout << "Error! Missing feature urid:map " << __func__ << " " << __LINE__ << std::endl; 267 | delete self; 268 | return 0; 269 | } else if (!self->schedule) { 270 | std::cout << "Error! Missing feature work:schedule " << __func__ << " " << __LINE__ << std::endl; 271 | delete self; 272 | return 0; 273 | } 274 | 275 | // Map URIs and initialize forge 276 | map_plugin_uris(self->map, &self->uris); 277 | lv2_log_logger_init(&self->logger, self->map, self->log); 278 | #if AIDADSP_MODEL_LOADER 279 | lv2_atom_forge_init(&self->forge, self->map); 280 | #endif 281 | 282 | // Setup initial values 283 | self->preGain.setSampleRate(self->samplerate); 284 | self->preGain.setTimeConstant(0.1f); 285 | self->preGain.setTargetValue(1.f); 286 | self->preGain.clearToTargetValue(); 287 | self->masterGain.setSampleRate(self->samplerate); 288 | self->masterGain.setTimeConstant(0.1f); 289 | self->masterGain.setTargetValue(0.f); 290 | self->masterGain.clearToTargetValue(); 291 | 292 | // Setup fixed frequency dc blocker filter (high pass) 293 | self->dc_blocker = new Biquad(bq_type_highpass, 35.0f / samplerate, 0.707f, 0.0f); 294 | 295 | // Setup variable high frequencies roll-off filter (low pass) 296 | self->in_lpf_pc_old = 66.216f; 297 | self->in_lpf = new Biquad(bq_type_lowpass, MAP(self->in_lpf_pc_old, 0.0f, 100.0f, INLPF_MAX_CO, INLPF_MIN_CO), 0.707f, 0.0f); 298 | 299 | // Setup equalizer section 300 | self->bass_boost_db_old = 0.0f; 301 | self->bass_freq_old = 250.0f; 302 | self->bass = new Biquad(bq_type_lowshelf, self->bass_freq_old / samplerate, 0.707f, self->bass_boost_db_old); 303 | self->mid_boost_db_old = 0.0f; 304 | self->mid_freq_old = 600.0f; 305 | self->mid_q_old = 0.707f; 306 | self->mid_type_old = 0.0f; 307 | self->mid = new Biquad(bq_type_peak, self->mid_freq_old / samplerate, self->mid_q_old, self->mid_boost_db_old); 308 | self->treble_boost_db_old = 0.0f; 309 | self->treble_freq_old = 1500.0f; 310 | self->treble = new Biquad(bq_type_highshelf, self->treble_freq_old / samplerate, 0.707f, self->treble_boost_db_old); 311 | self->depth_boost_db_old = 0.0f; 312 | self->depth = new Biquad(bq_type_peak, DEPTH_FREQ / samplerate, DEPTH_Q, self->depth_boost_db_old); 313 | self->presence_boost_db_old = 0.0f; 314 | self->presence = new Biquad(bq_type_highshelf, PRESENCE_FREQ / samplerate, PRESENCE_Q, self->presence_boost_db_old); 315 | 316 | self->last_input_size = 0; 317 | 318 | self->loading = true; 319 | 320 | // Initial model triggered by host default state load later on 321 | self->model = nullptr; 322 | 323 | #if ! AIDADSP_MODEL_LOADER 324 | // Trigger the loading of the first model later in ::run 325 | self->model_index_old = -1.0f; 326 | #endif 327 | 328 | #ifdef AIDADSP_CHANNELS 329 | self->channel_switch.resize(8); 330 | #endif 331 | 332 | return (LV2_Handle)self; 333 | } 334 | 335 | /**********************************************************************************************************************************************************/ 336 | 337 | void RtNeuralGeneric::activate(LV2_Handle instance) 338 | { 339 | RtNeuralGeneric *self = (RtNeuralGeneric*) instance; 340 | 341 | self->preGain.clearToTargetValue(); 342 | self->masterGain.clearToTargetValue(); 343 | 344 | if (self->model == nullptr) 345 | return; 346 | 347 | // @TODO: include the activate function code here 348 | // @TODO: if (self->samplerate != self->model->samplerate) ??? 349 | #if AIDADSP_CONDITIONED_MODELS 350 | self->model->paramFirstRun = true; 351 | #endif 352 | #if 0 353 | std::visit ( 354 | [] (auto&& custom_model) 355 | { 356 | using ModelType = std::decay_t; 357 | if constexpr (! std::is_same_v) 358 | { 359 | custom_model.reset(); 360 | } 361 | }, 362 | self->model->variant); 363 | lv2_log_note(&self->logger, "%s %d: mdl rst!\n", __func__, __LINE__); 364 | #endif 365 | } 366 | 367 | /**********************************************************************************************************************************************************/ 368 | 369 | void RtNeuralGeneric::deactivate(LV2_Handle instance) 370 | { 371 | // @TODO: include the deactivate function code here 372 | } 373 | 374 | /**********************************************************************************************************************************************************/ 375 | 376 | void RtNeuralGeneric::connect_port(LV2_Handle instance, uint32_t port, void *data) 377 | { 378 | RtNeuralGeneric *self = (RtNeuralGeneric*) instance; 379 | 380 | switch((ports_t)port) 381 | { 382 | case IN: 383 | self->in = (float*) data; 384 | break; 385 | case OUT_1: 386 | self->out_1 = (float*) data; 387 | break; 388 | case PREGAIN: 389 | self->pregain_db = (float*) data; 390 | break; 391 | #if AIDADSP_PARAMS >= 1 392 | case PARAM1: 393 | self->param1 = (float*) data; 394 | break; 395 | #if AIDADSP_PARAMS >= 2 396 | case PARAM2: 397 | self->param2 = (float*) data; 398 | break; 399 | #endif 400 | #endif 401 | case MASTER: 402 | self->master_db = (float*) data; 403 | break; 404 | case NET_BYPASS: 405 | self->net_bypass = (float*) data; 406 | break; 407 | #if AIDADSP_MODEL_LOADER 408 | case PLUGIN_CONTROL: 409 | self->control_port = (const LV2_Atom_Sequence*)data; 410 | break; 411 | case PLUGIN_NOTIFY: 412 | self->notify_port = (LV2_Atom_Sequence*)data; 413 | break; 414 | #else 415 | case PLUGIN_MODEL_INDEX: 416 | self->model_index = (float*)data; 417 | break; 418 | #if AIDADSP_CHANNELS >= 1 419 | case CHANNEL1: 420 | self->channel_switch[0] = (float*)data; 421 | break; 422 | #if AIDADSP_CHANNELS >= 2 423 | case CHANNEL2: 424 | self->channel_switch[1] = (float*)data; 425 | break; 426 | #endif 427 | #endif 428 | #endif 429 | case IN_LPF: 430 | self->in_lpf_pc = (float*) data; 431 | break; 432 | case EQ_POS: 433 | self->eq_position = (float*) data; 434 | break; 435 | case BASS: 436 | self->bass_boost_db = (float*) data; 437 | break; 438 | case BFREQ: 439 | self->bass_freq = (float*) data; 440 | break; 441 | case MID: 442 | self->mid_boost_db = (float*) data; 443 | break; 444 | case MFREQ: 445 | self->mid_freq = (float*) data; 446 | break; 447 | case MIDQ: 448 | self->mid_q = (float*) data; 449 | break; 450 | case MTYPE: 451 | self->mid_type = (float*) data; 452 | break; 453 | case TREBLE: 454 | self->treble_boost_db = (float*) data; 455 | break; 456 | case TFREQ: 457 | self->treble_freq = (float*) data; 458 | break; 459 | case DEPTH: 460 | self->depth_boost_db = (float*) data; 461 | break; 462 | case PRESENCE: 463 | self->presence_boost_db = (float*) data; 464 | break; 465 | case EQ_BYPASS: 466 | self->eq_bypass = (float*) data; 467 | break; 468 | #if AIDADSP_OPTIONAL_DCBLOCKER 469 | case DCBLOCKER: 470 | self->dc_blocker_param = (float*) data; 471 | break; 472 | #endif 473 | case INPUT_SIZE: 474 | self->input_size = (float*) data; 475 | break; 476 | case PLUGIN_ENABLED: 477 | self->enabled = (float*) data; 478 | break; 479 | } 480 | } 481 | 482 | /**********************************************************************************************************************************************************/ 483 | 484 | void RtNeuralGeneric::run(LV2_Handle instance, uint32_t n_samples) 485 | { 486 | RtNeuralGeneric *self = (RtNeuralGeneric*) instance; 487 | PluginURIs* uris = &self->uris; 488 | 489 | const float pregain = DB_CO(*self->pregain_db); 490 | const float master = DB_CO(*self->master_db); 491 | const bool net_bypass = *self->net_bypass > 0.5f; 492 | const float in_lpf_pc = *self->in_lpf_pc; 493 | const float eq_position = *self->eq_position; 494 | const float eq_bypass = *self->eq_bypass; 495 | const bool enabled = *self->enabled > 0.5f; 496 | #if AIDADSP_PARAMS == 1 497 | const float param1 = *self->param1; 498 | const float param2 = 0.f; 499 | #elif AIDADSP_PARAMS == 2 500 | const float param1 = *self->param1; 501 | const float param2 = *self->param2; 502 | #endif 503 | #ifdef AIDADSP_CHANNELS 504 | std::vector ctrls(8); 505 | #if AIDADSP_CHANNELS >= 1 506 | ctrls[0] = *self->channel_switch[0]; 507 | #if AIDADSP_CHANNELS >= 2 508 | ctrls[1] = *self->channel_switch[1]; 509 | #endif 510 | #endif 511 | #endif 512 | 513 | self->preGain.setTargetValue(pregain); 514 | if (in_lpf_pc != self->in_lpf_pc_old) { /* Update filter coeffs */ 515 | self->in_lpf->setBiquad(bq_type_lowpass, MAP(in_lpf_pc, 0.0f, 100.0f, INLPF_MAX_CO, INLPF_MIN_CO), 0.707f, 0.0f); 516 | self->in_lpf_pc_old = in_lpf_pc; 517 | } 518 | *self->input_size = self->last_input_size; 519 | 520 | #if AIDADSP_COMMERCIAL && (AIDADSP_MODEL_DEFINE != SHOWCASE) 521 | self->run_count = mod_license_run_begin(self->run_count, n_samples); 522 | #endif 523 | 524 | #if AIDADSP_MODEL_LOADER 525 | #ifdef PROCESS_ATOM_MESSAGES 526 | /*++++++++ READ ATOM MESSAGES ++++++++*/ 527 | // Set up forge to write directly to notify output port. 528 | const uint32_t notify_capacity = self->notify_port->atom.size; 529 | lv2_atom_forge_set_buffer(&self->forge, 530 | (uint8_t*)self->notify_port, 531 | notify_capacity); 532 | 533 | // Start a sequence in the notify output port. 534 | lv2_atom_forge_sequence_head(&self->forge, &self->notify_frame, 0); 535 | 536 | // Read incoming events 537 | LV2_ATOM_SEQUENCE_FOREACH(self->control_port, ev) { 538 | if (lv2_atom_forge_is_object_type(&self->forge, ev->body.type)) { 539 | const LV2_Atom_Object* obj = (const LV2_Atom_Object*)&ev->body; 540 | if (obj->body.otype == uris->patch_Set) { 541 | // Get the property and value of the set message 542 | const LV2_Atom* property = NULL; 543 | const LV2_Atom* value = NULL; 544 | lv2_atom_object_get(obj, 545 | uris->patch_property, &property, 546 | uris->patch_value, &value, 547 | 0); 548 | if (!property) { 549 | lv2_log_trace(&self->logger, 550 | "patch:Set message with no property\n"); 551 | continue; 552 | } else if (property->type != uris->atom_URID) { 553 | lv2_log_trace(&self->logger, 554 | "patch:Set property is not a URID\n"); 555 | continue; 556 | } else if (((const LV2_Atom_URID*)property)->body != uris->json) { 557 | lv2_log_trace(&self->logger, 558 | "patch:Set property body is not json\n"); 559 | continue; 560 | } 561 | if (!value) { 562 | lv2_log_trace(&self->logger, 563 | "patch:Set message with no value\n"); 564 | continue; 565 | } else if (value->type != uris->atom_Path) { 566 | lv2_log_trace(&self->logger, 567 | "patch:Set value is not a Path\n"); 568 | continue; 569 | } 570 | 571 | // Json model file change, send it to the worker. 572 | lv2_log_trace(&self->logger, "Queueing set message\n"); 573 | WorkerLoadMessage msg = { kWorkerLoad, {} }; 574 | std::memcpy(msg.path, value + 1, std::min(value->size, static_cast(sizeof(msg.path) - 1u))); 575 | self->schedule->schedule_work(self->schedule->handle, sizeof(msg), &msg); 576 | self->loading = true; 577 | } else { 578 | lv2_log_trace(&self->logger, 579 | "Unknown object type %d\n", obj->body.otype); 580 | } 581 | } else { 582 | lv2_log_trace(&self->logger, 583 | "Unknown event type %d\n", ev->body.type); 584 | } 585 | } 586 | /*++++++++ END READ ATOM MESSAGES ++++++++*/ 587 | #endif 588 | #else 589 | #ifdef AIDADSP_CHANNELS 590 | float model_index = controlsToModelIndex(*self->model_index, ctrls); 591 | #else 592 | float model_index = *self->model_index; 593 | #endif 594 | 595 | if (model_index != self->model_index_old) { 596 | self->model_index_old = model_index; 597 | 598 | // Json model file change, send it to the worker. 599 | lv2_log_trace(&self->logger, "Queueing set message\n"); 600 | WorkerLoadMessage msg = { kWorkerLoad, static_cast(model_index + 1.5f) }; // round to int + 1 601 | self->schedule->schedule_work(self->schedule->handle, sizeof(msg), &msg); 602 | self->loading = true; 603 | } 604 | #endif 605 | 606 | // 0 samples means pre-run, nothing left for us to do 607 | if (n_samples == 0) { 608 | return; 609 | } 610 | 611 | // not enabled (bypass) 612 | if (!enabled) { 613 | if (self->out_1 != self->in) 614 | std::memcpy(self->out_1, self->in, sizeof(float)*n_samples); 615 | #if AIDADSP_COMMERCIAL && (AIDADSP_MODEL_DEFINE != SHOWCASE) 616 | mod_license_run_silence(self->run_count, self->out_1, n_samples, 0); 617 | #endif 618 | return; 619 | } 620 | 621 | /*++++++++ AUDIO DSP ++++++++*/ 622 | if (in_lpf_pc != 0.0f) { 623 | applyBiquadFilter(self->out_1, self->in, self->in_lpf, n_samples); // High frequencies roll-off (lowpass) 624 | } else { 625 | std::memcpy(self->out_1, self->in, sizeof(float)*n_samples); 626 | } 627 | applyGainRamp(self->preGain, self->out_1, self->out_1, n_samples); // Pre-gain 628 | if(eq_position == 1.0f && eq_bypass == 0.0f) { 629 | applyToneControls(self->out_1, self->out_1, instance, n_samples); // Equalizer section 630 | } 631 | if (self->model != nullptr) { 632 | if (!net_bypass) { 633 | #if AIDADSP_CONDITIONED_MODELS 634 | self->model->param1Coeff.setTargetValue(param1); 635 | self->model->param2Coeff.setTargetValue(param2); 636 | if (self->model->paramFirstRun) { 637 | self->model->paramFirstRun = false; 638 | self->model->param1Coeff.clearToTargetValue(); 639 | self->model->param2Coeff.clearToTargetValue(); 640 | } 641 | #endif 642 | applyModel(self->model, self->out_1, n_samples); 643 | } 644 | } 645 | #if AIDADSP_OPTIONAL_DCBLOCKER 646 | if (*self->dc_blocker_param == 1.0f) 647 | #endif 648 | { 649 | applyBiquadFilter(self->out_1, self->out_1, self->dc_blocker, n_samples); // Dc blocker filter (highpass) 650 | } 651 | if(eq_position == 0.0f && eq_bypass == 0.0f) { 652 | applyToneControls(self->out_1, self->out_1, instance, n_samples); // Equalizer section 653 | } 654 | self->masterGain.setTargetValue(self->loading ? 0.f : master); 655 | applyGainRamp(self->masterGain, self->out_1, self->out_1, n_samples); // Master volume 656 | #if AIDADSP_COMMERCIAL && (AIDADSP_MODEL_DEFINE != SHOWCASE) 657 | mod_license_run_silence(self->run_count, self->out_1, n_samples, 0); 658 | #endif 659 | /*++++++++ END AUDIO DSP ++++++++*/ 660 | } 661 | 662 | /**********************************************************************************************************************************************************/ 663 | 664 | void RtNeuralGeneric::cleanup(LV2_Handle instance) 665 | { 666 | RtNeuralGeneric *self = (RtNeuralGeneric*) instance; 667 | 668 | freeModel (self->model); 669 | delete self->dc_blocker; 670 | delete self->in_lpf; 671 | delete self->bass; 672 | delete self->mid; 673 | delete self->treble; 674 | delete self->depth; 675 | delete self->presence; 676 | delete self; 677 | } 678 | 679 | /**********************************************************************************************************************************************************/ 680 | 681 | const void* RtNeuralGeneric::extension_data(const char* uri) 682 | { 683 | #if AIDADSP_MODEL_LOADER 684 | static const LV2_State_Interface state = { save, restore }; 685 | if (!strcmp(uri, LV2_STATE__interface)) { 686 | return &state; 687 | } 688 | #endif 689 | static const LV2_Worker_Interface worker = { work, work_response, NULL }; 690 | if (!strcmp(uri, LV2_WORKER__interface)) { 691 | return &worker; 692 | } 693 | #if AIDADSP_COMMERCIAL && (AIDADSP_MODEL_DEFINE != SHOWCASE) 694 | return mod_license_interface(uri); 695 | #else 696 | return NULL; 697 | #endif 698 | } 699 | 700 | /**********************************************************************************************************************************************************/ 701 | 702 | #if AIDADSP_MODEL_LOADER 703 | /** 704 | * This function is invoked during startup, after RtNeuralGeneric::instantiate 705 | * or whenever a state is restored 706 | */ 707 | LV2_State_Status RtNeuralGeneric::restore(LV2_Handle instance, 708 | LV2_State_Retrieve_Function retrieve, 709 | LV2_State_Handle handle, 710 | uint32_t flags, 711 | const LV2_Feature* const* features) 712 | { 713 | RtNeuralGeneric *self = (RtNeuralGeneric*) instance; 714 | 715 | size_t size; 716 | uint32_t type; 717 | uint32_t valflags; 718 | int res; 719 | 720 | const void* value = retrieve( 721 | handle, 722 | self->uris.json, 723 | &size, &type, &valflags); 724 | 725 | if (value) { 726 | lv2_log_note(&self->logger, "Restoring file %s\n", (const char*)value); 727 | 728 | // send to worker for loading 729 | WorkerLoadMessage msg = { kWorkerLoad, {} }; 730 | 731 | LV2_State_Map_Path* map_path = NULL; 732 | LV2_State_Free_Path* free_path = NULL; 733 | for (int i = 0; features[i]; ++i) { 734 | if (!strcmp(features[i]->URI, LV2_STATE__mapPath)) { 735 | map_path = (LV2_State_Map_Path*)features[i]->data; 736 | } else if (!strcmp(features[i]->URI, LV2_STATE__freePath)) { 737 | free_path = (LV2_State_Free_Path*)features[i]->data; 738 | } 739 | } 740 | 741 | if (map_path) { 742 | char* apath = map_path->absolute_path(map_path->handle, (const char*)value); 743 | std::memcpy(msg.path, apath, std::min(strlen(apath), sizeof(msg.path) - 1u)); 744 | if (free_path) { 745 | free_path->free_path(free_path->handle, apath); 746 | #ifndef _WIN32 747 | } else { 748 | free(apath); 749 | #endif 750 | } 751 | } else { 752 | std::memcpy(msg.path, value, std::min(size, sizeof(msg.path) - 1u)); 753 | } 754 | 755 | self->schedule->schedule_work(self->schedule->handle, sizeof(msg), &msg); 756 | } 757 | 758 | return LV2_STATE_SUCCESS; 759 | } 760 | 761 | /**********************************************************************************************************************************************************/ 762 | 763 | LV2_State_Status RtNeuralGeneric::save(LV2_Handle instance, 764 | LV2_State_Store_Function store, 765 | LV2_State_Handle handle, 766 | uint32_t flags, 767 | const LV2_Feature* const* features) 768 | { 769 | RtNeuralGeneric* self = (RtNeuralGeneric*) instance; 770 | 771 | // nothing loaded yet 772 | if (!self->model) { 773 | return LV2_STATE_SUCCESS; 774 | } 775 | 776 | LV2_State_Map_Path* map_path = NULL; 777 | for (int i = 0; features[i]; ++i) { 778 | if (!strcmp(features[i]->URI, LV2_STATE__mapPath)) { 779 | map_path = (LV2_State_Map_Path*)features[i]->data; 780 | } 781 | } 782 | 783 | if (map_path) { 784 | char* apath = map_path->abstract_path(map_path->handle, self->model->path); 785 | store(handle, 786 | self->uris.json, 787 | apath, 788 | strlen(apath) + 1, 789 | self->uris.atom_Path, 790 | LV2_STATE_IS_POD | LV2_STATE_IS_PORTABLE); 791 | free(apath); 792 | return LV2_STATE_SUCCESS; 793 | } else { 794 | return LV2_STATE_ERR_NO_FEATURE; 795 | } 796 | } 797 | #endif 798 | 799 | /**********************************************************************************************************************************************************/ 800 | 801 | /** 802 | * Do work in a non-realtime thread. 803 | * This is called for every piece of work scheduled in the audio thread using 804 | * self->schedule->schedule_work(). A reply can be sent back to the audio 805 | * thread using the provided respond function. 806 | */ 807 | LV2_Worker_Status RtNeuralGeneric::work(LV2_Handle instance, 808 | LV2_Worker_Respond_Function respond, 809 | LV2_Worker_Respond_Handle handle, 810 | uint32_t size, 811 | const void* data) 812 | { 813 | RtNeuralGeneric* self = (RtNeuralGeneric*) instance; 814 | const WorkerMessage* msg = (const WorkerMessage*)data; 815 | float param1 = 0.0f; 816 | float param2 = 0.0f; 817 | 818 | switch (msg->type) 819 | { 820 | case kWorkerLoad: 821 | #if AIDADSP_CONDITIONED_MODELS 822 | if (self->model != nullptr) { 823 | param1 = self->model->param1Coeff.getTargetValue(); 824 | param2 = self->model->param2Coeff.getTargetValue(); 825 | } 826 | #endif 827 | #if AIDADSP_MODEL_LOADER 828 | if (DynamicModel* newmodel = RtNeuralGeneric::loadModelFromPath(&self->logger, ((const WorkerLoadMessage*)data)->path, &self->last_input_size, param1, param2)) 829 | #else 830 | if (DynamicModel* newmodel = RtNeuralGeneric::loadModelFromIndex(&self->logger, ((const WorkerLoadMessage*)data)->modelIndex, &self->last_input_size, param1, param2)) 831 | #endif 832 | { 833 | WorkerApplyMessage reply = { kWorkerApply, newmodel }; 834 | respond (handle, sizeof(reply), &reply); 835 | } 836 | return LV2_WORKER_SUCCESS; 837 | 838 | case kWorkerFree: 839 | freeModel (((const WorkerApplyMessage*)data)->model); 840 | return LV2_WORKER_SUCCESS; 841 | 842 | case kWorkerApply: 843 | // should not happen! 844 | break; 845 | } 846 | 847 | return LV2_WORKER_ERR_UNKNOWN; 848 | } 849 | 850 | /**********************************************************************************************************************************************************/ 851 | 852 | /** 853 | * Handle a response from work() in the audio thread. 854 | * 855 | * When running normally, this will be called by the host after run(). When 856 | * freewheeling, this will be called immediately at the point the work was 857 | * scheduled. 858 | */ 859 | LV2_Worker_Status RtNeuralGeneric::work_response(LV2_Handle instance, uint32_t size, const void* data) 860 | { 861 | RtNeuralGeneric *self = (RtNeuralGeneric*) instance; 862 | 863 | const WorkerMessage* const msg = static_cast(data); 864 | 865 | if (msg->type != kWorkerApply) 866 | return LV2_WORKER_ERR_UNKNOWN; 867 | 868 | // prepare reply for deleting old model 869 | WorkerApplyMessage reply = { kWorkerFree, self->model }; 870 | 871 | // swap current model with new one 872 | self->model = static_cast(data)->model; 873 | 874 | // send reply 875 | self->schedule->schedule_work(self->schedule->handle, sizeof(reply), &reply); 876 | 877 | // log about new model in use 878 | lv2_log_trace(&self->logger, "New model in use\n"); 879 | 880 | #if AIDADSP_MODEL_LOADER 881 | // report change to host/ui 882 | lv2_atom_forge_frame_time(&self->forge, 0); 883 | write_set_file(&self->forge, 884 | &self->uris, 885 | self->model->path, 886 | strlen(self->model->path)); 887 | #endif 888 | 889 | self->loading = false; 890 | lv2_log_trace(&self->logger, "loading = false\n"); 891 | 892 | return LV2_WORKER_SUCCESS; 893 | } 894 | 895 | /**********************************************************************************************************************************************************/ 896 | 897 | /** 898 | * This function tests the inference engine 899 | */ 900 | bool RtNeuralGeneric::testModel(LV2_Log_Logger* logger, DynamicModel *model, const std::vector& xData, const std::vector& yData) 901 | { 902 | std::unique_ptr out(new float [xData.size()]); 903 | float in_gain = model->input_gain; 904 | float out_gain = model->output_gain; 905 | /* Gain correction inject unwanted errors */ 906 | model->input_gain = 1.f; 907 | model->output_gain = 1.f; 908 | #if AIDADSP_CONDITIONED_MODELS 909 | /* Conditioned models tested with all params at 0 */ 910 | float param1 = model->param1Coeff.getTargetValue(); 911 | float param2 = model->param2Coeff.getTargetValue(); 912 | model->param1Coeff.setTargetValue(0.f); 913 | model->param1Coeff.clearToTargetValue(); 914 | model->param2Coeff.setTargetValue(0.f); 915 | model->param2Coeff.clearToTargetValue(); 916 | #endif 917 | for(size_t i = 0; i < xData.size(); i++) { 918 | out[i] = xData[i]; 919 | } 920 | applyModel(model, out.get(), xData.size()); 921 | /* Restore params previously saved */ 922 | model->input_gain = in_gain; 923 | model->output_gain = out_gain; 924 | #if AIDADSP_CONDITIONED_MODELS 925 | model->param1Coeff.setTargetValue(param1); 926 | model->param1Coeff.clearToTargetValue(); 927 | model->param2Coeff.setTargetValue(param2); 928 | model->param2Coeff.clearToTargetValue(); 929 | #endif 930 | constexpr double threshold = TEST_MODEL_THR; 931 | size_t nErrs = 0; 932 | float max_error = 0.0f; 933 | std::vector inputErrors; 934 | for(size_t i = 0; i < xData.size(); i++) { 935 | auto err = std::abs(out[i] - yData[i]); 936 | max_error = std::max(err, max_error); 937 | if(err > threshold) { 938 | nErrs++; 939 | inputErrors.push_back(std::abs(xData[i])); 940 | } 941 | } 942 | if(nErrs > 0) 943 | { 944 | lv2_log_trace(logger, "Failure %s: %d errs!\n", __func__, (int)nErrs); 945 | lv2_log_trace(logger, "Max err: %.12f, thr: %.12f\n", max_error, threshold); 946 | lv2_log_trace(logger, "< %.6f [dB]\n", CO_DB(*std::max_element(inputErrors.begin(), inputErrors.end()))); 947 | } 948 | else 949 | { 950 | lv2_log_trace(logger, "Success %s: %d errs!\n", __func__, (int)nErrs); 951 | lv2_log_trace(logger, "Max err: %.12f, thr: %.12f\n", max_error, threshold); 952 | return true; 953 | } 954 | return false; 955 | } 956 | 957 | /**********************************************************************************************************************************************************/ 958 | 959 | #if AIDADSP_MODEL_LOADER 960 | /** 961 | * This function loads a pre-trained neural model from a json file 962 | */ 963 | DynamicModel* RtNeuralGeneric::loadModelFromPath(LV2_Log_Logger* logger, const char* path, int* input_size_ptr, const float old_param1, const float old_param2) 964 | { 965 | int input_skip; 966 | int input_size; 967 | float input_gain; 968 | float output_gain; 969 | float model_samplerate; 970 | nlohmann::json model_json; 971 | 972 | try { 973 | std::ifstream jsonStream(path, std::ifstream::binary); 974 | jsonStream >> model_json; 975 | 976 | /* Understand which model type to load */ 977 | input_size = model_json["in_shape"].back().get(); 978 | if (input_size > MAX_INPUT_SIZE) { 979 | throw std::invalid_argument("Value for input_size not supported"); 980 | } 981 | 982 | if (model_json["in_skip"].is_number()) { 983 | input_skip = model_json["in_skip"].get(); 984 | if (input_skip > 1) 985 | throw std::invalid_argument("Values for in_skip > 1 are not supported"); 986 | } 987 | else { 988 | input_skip = 0; 989 | } 990 | 991 | if (model_json["in_gain"].is_number()) { 992 | input_gain = DB_CO(model_json["in_gain"].get()); 993 | } 994 | else { 995 | input_gain = 1.0f; 996 | } 997 | 998 | if (model_json["out_gain"].is_number()) { 999 | output_gain = DB_CO(model_json["out_gain"].get()); 1000 | } 1001 | else { 1002 | output_gain = 1.0f; 1003 | } 1004 | 1005 | if (model_json["metadata"]["samplerate"].is_number()) { 1006 | model_samplerate = model_json["metadata"]["samplerate"].get(); 1007 | } 1008 | else if (model_json["samplerate"].is_number()) { 1009 | model_samplerate = model_json["samplerate"].get(); 1010 | } 1011 | else { 1012 | model_samplerate = 48000.0f; 1013 | } 1014 | 1015 | lv2_log_note(logger, "Successfully loaded json file: %s\n", path); 1016 | } 1017 | catch (const std::exception& e) { 1018 | lv2_log_error(logger, "Unable to load json file: %s\nError: %s\n", path, e.what()); 1019 | return nullptr; 1020 | } 1021 | 1022 | std::unique_ptr model = std::make_unique(); 1023 | 1024 | try { 1025 | if (! custom_model_creator (model_json, model->variant)) 1026 | throw std::runtime_error ("Unable to identify a known model architecture!"); 1027 | 1028 | std::visit ( 1029 | [&model_json] (auto&& custom_model) 1030 | { 1031 | using ModelType = std::decay_t; 1032 | if constexpr (! std::is_same_v) 1033 | { 1034 | custom_model.parseJson (model_json, true); 1035 | custom_model.reset(); 1036 | } 1037 | }, 1038 | model->variant); 1039 | lv2_log_note(logger, "%s %d: mdl rst!\n", __func__, __LINE__); 1040 | } 1041 | catch (const std::exception& e) { 1042 | lv2_log_error(logger, "Error loading model: %s\n", e.what()); 1043 | return nullptr; 1044 | } 1045 | 1046 | /* Save extra info */ 1047 | model->path = strdup(path); 1048 | model->input_skip = input_skip != 0; 1049 | model->input_gain = input_gain; 1050 | model->output_gain = output_gain; 1051 | model->samplerate = model_samplerate; 1052 | #if AIDADSP_CONDITIONED_MODELS 1053 | model->param1Coeff.setSampleRate(model_samplerate); 1054 | model->param1Coeff.setTimeConstant(0.1f); 1055 | model->param1Coeff.setTargetValue(old_param1); 1056 | model->param1Coeff.clearToTargetValue(); 1057 | model->param2Coeff.setSampleRate(model_samplerate); 1058 | model->param2Coeff.setTimeConstant(0.1f); 1059 | model->param2Coeff.setTargetValue(old_param2); 1060 | model->param2Coeff.clearToTargetValue(); 1061 | model->paramFirstRun = true; 1062 | #endif 1063 | 1064 | /* Sanity check on inference engine with loaded model, also serves as pre-buffer 1065 | * to avoid "clicks" during initialization */ 1066 | #ifdef DEBUG 1067 | if (model_json["input_batch"].is_array() && model_json["input_batch"].is_array()) { 1068 | #else 1069 | if(false) { 1070 | #endif 1071 | std::vector input_batch = model_json["/input_batch"_json_pointer]; 1072 | std::vector output_batch = model_json["/output_batch"_json_pointer]; 1073 | testModel(logger, model.get(), input_batch, output_batch); 1074 | } 1075 | else 1076 | { 1077 | float out[2048] = {}; 1078 | applyModel(model.get(), out, 2048); 1079 | } 1080 | 1081 | // cache input size for later 1082 | *input_size_ptr = input_size; 1083 | 1084 | return model.release(); 1085 | } 1086 | #endif 1087 | 1088 | /**********************************************************************************************************************************************************/ 1089 | 1090 | /** 1091 | * This function deletes a model instance and its related details 1092 | */ 1093 | void RtNeuralGeneric::freeModel(DynamicModel* model) 1094 | { 1095 | if (model == nullptr) 1096 | return; 1097 | #if AIDADSP_MODEL_LOADER 1098 | free (model->path); 1099 | #endif 1100 | delete model; 1101 | } 1102 | 1103 | -------------------------------------------------------------------------------- /rt-neural-generic/src/rt-neural-generic.h: -------------------------------------------------------------------------------- 1 | /* 2 | * aidadsp-lv2 3 | * Copyright (C) 2022-2023 Massimo Pennazio 4 | * SPDX-License-Identifier: GPL-3.0-or-later 5 | */ 6 | 7 | #pragma once 8 | 9 | #ifndef AIDADSP_COMMERCIAL 10 | #error AIDADSP_COMMERCIAL undefined, must be 0 or 1 11 | #endif 12 | #ifndef AIDADSP_MODEL_LOADER 13 | #error AIDADSP_MODEL_LOADER undefined, must be 0 or 1 14 | #endif 15 | 16 | // enabled by default, can be turned off 17 | #ifndef AIDADSP_CONDITIONED_MODELS 18 | #define AIDADSP_CONDITIONED_MODELS 1 19 | #endif 20 | 21 | // DC blocker is optional for model loader 22 | #if AIDADSP_MODEL_LOADER 23 | #define AIDADSP_OPTIONAL_DCBLOCKER 1 24 | #else 25 | #define AIDADSP_OPTIONAL_DCBLOCKER 0 26 | #endif 27 | 28 | #include 29 | #include 30 | #include 31 | #include 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | 43 | #include 44 | 45 | #include 46 | #include 47 | 48 | #include "uris.h" 49 | 50 | #if AIDADSP_COMMERCIAL 51 | #define TWINCLASSIC 0 52 | #define LEAD 1 53 | #define TWEAKER 2 54 | #define VIBRO 3 55 | #define JCVM 4 56 | #define SHOWCASE 5 57 | #endif 58 | 59 | #if AIDADSP_COMMERCIAL && (AIDADSP_MODEL_DEFINE != SHOWCASE) 60 | #include "libmodla.h" 61 | #endif 62 | 63 | #if AIDADSP_COMMERCIAL && (AIDADSP_MODEL_DEFINE == TWINCLASSIC \ 64 | || AIDADSP_MODEL_DEFINE == VIBRO \ 65 | || AIDADSP_MODEL_DEFINE == JCVM) 66 | #define AIDADSP_PARAMS 1 67 | #define AIDADSP_CHANNELS 1 68 | #elif AIDADSP_COMMERCIAL && (AIDADSP_MODEL_DEFINE == TWEAKER) 69 | #define AIDADSP_PARAMS 1 70 | #define AIDADSP_CHANNELS 2 71 | #elif AIDADSP_COMMERCIAL && (AIDADSP_MODEL_DEFINE == SHOWCASE) 72 | #ifdef AIDADSP_CONDITIONED_MODELS 73 | #undef AIDADSP_CONDITIONED_MODELS 74 | #endif 75 | #define AIDADSP_MODEL_LOADER 0 76 | #define AIDADSP_PARAMS 0 77 | #undef AIDADSP_CHANNELS 78 | #else 79 | #define AIDADSP_PARAMS 2 80 | #endif 81 | 82 | /**********************************************************************************************************************************************************/ 83 | 84 | typedef enum { 85 | IN, OUT_1, 86 | #if AIDADSP_MODEL_LOADER 87 | PLUGIN_CONTROL, PLUGIN_NOTIFY, 88 | #else 89 | PLUGIN_MODEL_INDEX, 90 | #endif 91 | IN_LPF, PREGAIN, 92 | NET_BYPASS, 93 | #if AIDADSP_PARAMS >= 1 94 | PARAM1, 95 | #if AIDADSP_PARAMS >= 2 96 | PARAM2, 97 | #endif 98 | #endif 99 | EQ_BYPASS, EQ_POS, BASS, BFREQ, MID, MFREQ, MIDQ, MTYPE, TREBLE, TFREQ, DEPTH, PRESENCE, 100 | #if AIDADSP_OPTIONAL_DCBLOCKER 101 | DCBLOCKER, 102 | #endif 103 | MASTER, 104 | INPUT_SIZE, 105 | #if AIDADSP_CHANNELS >= 1 106 | CHANNEL1, 107 | #if AIDADSP_CHANNELS >= 2 108 | CHANNEL2, 109 | #endif 110 | #endif 111 | PLUGIN_ENABLED, 112 | PLUGIN_PORT_COUNT} ports_t; 113 | 114 | // Everything needed to run a model 115 | struct DynamicModel { 116 | ModelVariantType variant; 117 | #if AIDADSP_MODEL_LOADER 118 | char* path; 119 | #endif 120 | bool input_skip; /* Means the model has been trained with first input element skipped to the output */ 121 | float input_gain; 122 | float output_gain; 123 | float samplerate; 124 | #if AIDADSP_CONDITIONED_MODELS 125 | LinearValueSmoother param1Coeff; 126 | LinearValueSmoother param2Coeff; 127 | bool paramFirstRun; 128 | #endif 129 | }; 130 | 131 | #define PROCESS_ATOM_MESSAGES 132 | enum WorkerMessageType { 133 | kWorkerLoad, 134 | kWorkerApply, 135 | kWorkerFree 136 | }; 137 | 138 | // common fields to all worker messages 139 | struct WorkerMessage { 140 | WorkerMessageType type; 141 | }; 142 | 143 | // WorkerMessage compatible, to be used for kWorkerLoad 144 | struct WorkerLoadMessage { 145 | WorkerMessageType type; 146 | #if AIDADSP_MODEL_LOADER 147 | char path[1024]; 148 | #else 149 | int modelIndex; 150 | #endif 151 | }; 152 | 153 | // WorkerMessage compatible, to be used for kWorkerApply or kWorkerFree 154 | struct WorkerApplyMessage { 155 | WorkerMessageType type; 156 | DynamicModel* model; 157 | }; 158 | 159 | /* Convert a value in dB's to a coefficent */ 160 | #define DB_CO(g) ((g) > -90.0f ? powf(10.0f, (g) * 0.05f) : 0.0f) 161 | #define CO_DB(v) (20.0f * log10f(v)) 162 | 163 | /* Define a macro to scale % to coeff */ 164 | #define PC_CO(g) ((g) < 100.0f ? (g / 100.0f) : 1.0f) 165 | 166 | /* Define a macro to re-maps a number from one range to another */ 167 | #define MAP(x, in_min, in_max, out_min, out_max) (((x - in_min) * (out_max - out_min) / (in_max - in_min)) + out_min) 168 | 169 | /* Defines for tone controls */ 170 | #define PEAK 0.0f 171 | #define BANDPASS 1.0f 172 | #define DEPTH_FREQ 75.0f 173 | #define DEPTH_Q 0.707f 174 | #define PRESENCE_FREQ 900.0f 175 | #define PRESENCE_Q 0.707f 176 | 177 | /* Defines for antialiasing filter */ 178 | #define INLPF_MAX_CO 0.99f * 0.5f /* coeff * ((samplerate / 2) / samplerate) */ 179 | #define INLPF_MIN_CO 0.25f * 0.5f /* coeff * ((samplerate / 2) / samplerate) */ 180 | 181 | /* Define the acceptable threshold for model test */ 182 | #define TEST_MODEL_THR 1.0e-5 183 | 184 | /**********************************************************************************************************************************************************/ 185 | 186 | class RtNeuralGeneric 187 | { 188 | public: 189 | RtNeuralGeneric() {} 190 | ~RtNeuralGeneric() {} 191 | static LV2_Handle instantiate(const LV2_Descriptor* descriptor, double samplerate, const char* bundle_path, const LV2_Feature* const* features); 192 | static void activate(LV2_Handle instance); 193 | static void deactivate(LV2_Handle instance); 194 | static void connect_port(LV2_Handle instance, uint32_t port, void *data); 195 | static void run(LV2_Handle instance, uint32_t n_samples); 196 | static void cleanup(LV2_Handle instance); 197 | static const void* extension_data(const char* uri); 198 | float *in; 199 | float *out_1; 200 | float *pregain_db; 201 | ExponentialValueSmoother preGain; 202 | #if AIDADSP_CONDITIONED_MODELS 203 | float *param1; 204 | float *param2; 205 | #endif 206 | #if AIDADSP_OPTIONAL_DCBLOCKER 207 | float *dc_blocker_param; 208 | #endif 209 | float *master_db; 210 | ExponentialValueSmoother masterGain; 211 | float *net_bypass; 212 | bool loading; 213 | float *in_lpf_pc; 214 | float in_lpf_pc_old; 215 | /* Eq section */ 216 | float *eq_position; 217 | float *bass_boost_db; 218 | float bass_boost_db_old; 219 | float *bass_freq; 220 | float bass_freq_old; 221 | float *mid_boost_db; 222 | float mid_boost_db_old; 223 | float *mid_freq; 224 | float mid_freq_old; 225 | float *mid_q; 226 | float mid_q_old; 227 | float *mid_type; 228 | float mid_type_old; 229 | float *treble_boost_db; 230 | float treble_boost_db_old; 231 | float *treble_freq; 232 | float treble_freq_old; 233 | float *depth_boost_db; 234 | float depth_boost_db_old; 235 | float *presence_boost_db; 236 | float presence_boost_db_old; 237 | float *eq_bypass; 238 | float *input_size; 239 | float *enabled; 240 | 241 | // to be used for reporting input_size to GUI (0 for error/unloaded, otherwise matching input_size) 242 | int last_input_size; 243 | #if ! AIDADSP_MODEL_LOADER 244 | float *model_index; 245 | float model_index_old; 246 | #ifdef AIDADSP_CHANNELS 247 | std::vector channel_switch; 248 | #endif 249 | #endif 250 | 251 | #if AIDADSP_MODEL_LOADER 252 | static LV2_State_Status restore(LV2_Handle instance, 253 | LV2_State_Retrieve_Function retrieve, 254 | LV2_State_Handle handle, 255 | uint32_t flags, 256 | const LV2_Feature* const* features); 257 | static LV2_State_Status save(LV2_Handle instance, 258 | LV2_State_Store_Function store, 259 | LV2_State_Handle handle, 260 | uint32_t flags, 261 | const LV2_Feature* const* features); 262 | #endif 263 | 264 | static LV2_Worker_Status work(LV2_Handle instance, 265 | LV2_Worker_Respond_Function respond, 266 | LV2_Worker_Respond_Handle handle, 267 | uint32_t size, 268 | const void* data); 269 | static LV2_Worker_Status work_response(LV2_Handle instance, uint32_t size, const void* data); 270 | #if AIDADSP_MODEL_LOADER 271 | static DynamicModel* loadModelFromPath(LV2_Log_Logger* logger, const char* path, int* input_size_ptr, const float old_param1, const float old_param2); 272 | #else 273 | static DynamicModel* loadModelFromIndex(LV2_Log_Logger* logger, int modelIndex, int* input_size_ptr, const float old_param1, const float old_param2); 274 | static float controlsToModelIndex(int modelIndex, const std::vector& ctrls); 275 | #endif 276 | static void freeModel(DynamicModel* model); 277 | 278 | // Features 279 | LV2_URID_Map* map; 280 | LV2_Worker_Schedule* schedule; 281 | LV2_Log_Log* log; 282 | 283 | #if AIDADSP_MODEL_LOADER 284 | // Forge for creating atoms 285 | LV2_Atom_Forge forge; 286 | 287 | // Forge frame for notify port (for writing worker replies) 288 | LV2_Atom_Forge_Frame notify_frame; 289 | #endif 290 | 291 | // Logger convenience API 292 | LV2_Log_Logger logger; 293 | 294 | // Ports 295 | #if AIDADSP_MODEL_LOADER 296 | const LV2_Atom_Sequence* control_port; 297 | LV2_Atom_Sequence* notify_port; 298 | #endif 299 | float* output_port; 300 | float* input_port; 301 | 302 | // URIs 303 | PluginURIs uris; 304 | 305 | private: 306 | double samplerate; 307 | #if AIDADSP_COMMERCIAL && (AIDADSP_MODEL_DEFINE != SHOWCASE) 308 | uint32_t run_count; 309 | #endif 310 | 311 | Biquad *dc_blocker; 312 | Biquad *in_lpf; 313 | Biquad *bass; 314 | Biquad *mid; 315 | Biquad *treble; 316 | Biquad *depth; 317 | Biquad *presence; 318 | 319 | DynamicModel* model; 320 | 321 | static void applyBiquadFilter(float *out, const float *in, Biquad *filter, uint32_t n_samples); 322 | static void applyModel(DynamicModel *model, float *out, uint32_t n_samples); 323 | static void applyToneControls(float *out, const float *in, LV2_Handle instance, uint32_t n_samples); 324 | static bool testModel(LV2_Log_Logger* logger, DynamicModel *model, const std::vector& xData, const std::vector& yData); 325 | }; 326 | 327 | -------------------------------------------------------------------------------- /rt-neural-generic/src/uris.h: -------------------------------------------------------------------------------- 1 | /* 2 | This file taken from the LV2 ImpulseResponser Example Plugin 3 | Copyright 2011-2012 David Robillard 4 | 5 | Permission to use, copy, modify, and/or distribute this software for any 6 | purpose with or without fee is hereby granted, provided that the above 7 | copyright notice and this permission notice appear in all copies. 8 | 9 | THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | 17 | Modified August 2022 by Massimo Pennazio maxipenna@libero.it 18 | */ 19 | #pragma once 20 | 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | #ifndef PLUGIN_URI 27 | #define PLUGIN_URI "http://aidadsp.cc/plugins/aidadsp-bundle/rt-neural-generic" 28 | #endif 29 | 30 | #define PLUGIN__json PLUGIN_URI "#json" 31 | #define PLUGIN__applyJson PLUGIN_URI "#applyJson" 32 | 33 | typedef struct { 34 | LV2_URID atom_Float; 35 | LV2_URID atom_Path; 36 | LV2_URID atom_Resource; 37 | LV2_URID atom_Sequence; 38 | LV2_URID atom_URID; 39 | LV2_URID atom_eventTransfer; 40 | LV2_URID applyJson; 41 | LV2_URID json; 42 | LV2_URID midi_Event; 43 | LV2_URID param_gain; 44 | LV2_URID patch_Get; 45 | LV2_URID patch_Set; 46 | LV2_URID patch_property; 47 | LV2_URID patch_value; 48 | } PluginURIs; 49 | 50 | static inline void 51 | map_plugin_uris(LV2_URID_Map* map, PluginURIs* uris) 52 | { 53 | uris->atom_Float = map->map(map->handle, LV2_ATOM__Float); 54 | uris->atom_Path = map->map(map->handle, LV2_ATOM__Path); 55 | uris->atom_Resource = map->map(map->handle, LV2_ATOM__Resource); 56 | uris->atom_Sequence = map->map(map->handle, LV2_ATOM__Sequence); 57 | uris->atom_URID = map->map(map->handle, LV2_ATOM__URID); 58 | uris->atom_eventTransfer = map->map(map->handle, LV2_ATOM__eventTransfer); 59 | uris->applyJson = map->map(map->handle, PLUGIN__applyJson); 60 | uris->json = map->map(map->handle, PLUGIN__json); 61 | uris->midi_Event = map->map(map->handle, LV2_MIDI__MidiEvent); 62 | uris->param_gain = map->map(map->handle, LV2_PARAMETERS__gain); 63 | uris->patch_Get = map->map(map->handle, LV2_PATCH__Get); 64 | uris->patch_Set = map->map(map->handle, LV2_PATCH__Set); 65 | uris->patch_property = map->map(map->handle, LV2_PATCH__property); 66 | uris->patch_value = map->map(map->handle, LV2_PATCH__value); 67 | } 68 | 69 | /** 70 | * Write a message like the following to @p forge: 71 | * [] 72 | * a patch:Set ; 73 | * patch:property eg:json ; 74 | * patch:value . 75 | */ 76 | static inline LV2_Atom* 77 | write_set_file(LV2_Atom_Forge* forge, 78 | const PluginURIs* uris, 79 | const char* filename, 80 | const uint32_t filename_len) 81 | { 82 | LV2_Atom_Forge_Frame frame; 83 | LV2_Atom* set = (LV2_Atom*)lv2_atom_forge_object( 84 | forge, &frame, 0, uris->patch_Set); 85 | 86 | lv2_atom_forge_key(forge, uris->patch_property); 87 | lv2_atom_forge_urid(forge, uris->json); 88 | lv2_atom_forge_key(forge, uris->patch_value); 89 | lv2_atom_forge_path(forge, filename, filename_len + 1); 90 | 91 | lv2_atom_forge_pop(forge, &frame); 92 | 93 | return set; 94 | } 95 | -------------------------------------------------------------------------------- /rt-neural-generic/ttl/manifest.ttl: -------------------------------------------------------------------------------- 1 | @prefix lv2: . 2 | @prefix rdfs: . 3 | 4 | a lv2:Plugin ; 5 | lv2:binary ; 6 | rdfs:seeAlso , . 7 | -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui.ttl: -------------------------------------------------------------------------------- 1 | @prefix modgui: . 2 | @prefix lv2: . 3 | 4 | 5 | modgui:gui [ 6 | modgui:resourcesDirectory ; 7 | modgui:iconTemplate ; 8 | modgui:javascript ; 9 | modgui:stylesheet ; 10 | modgui:screenshot ; 11 | modgui:thumbnail ; 12 | modgui:brand "Aida DSP" ; 13 | modgui:label "rt-neural-gen" ; 14 | modgui:documentation ; 15 | modgui:discussionURL ; 16 | modgui:port [ 17 | lv2:index 0 ; 18 | lv2:symbol "PREGAIN" ; 19 | lv2:name "INPUT" ; 20 | ] , [ 21 | lv2:index 1 ; 22 | lv2:symbol "BASS" ; 23 | lv2:name "BASS" ; 24 | ] , [ 25 | lv2:index 2 ; 26 | lv2:symbol "TREBLE" ; 27 | lv2:name "TREBLE" ; 28 | ] , [ 29 | lv2:index 3 ; 30 | lv2:symbol "MASTER" ; 31 | lv2:name "OUTPUT" ; 32 | ] ; 33 | modgui:monitoredOutputs [ 34 | lv2:symbol "ModelInSize"; 35 | ] ; 36 | ] . 37 | -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/aida-x-manual.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AidaDSP/aidadsp-lv2/f86e21dcb3b481246f0406c5f003f1a0c7d93d37/rt-neural-generic/ttl/modgui/aida-x-manual.pdf -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/icon-rt-neural-generic.html: -------------------------------------------------------------------------------- 1 |
2 |
3 |
4 |
5 |
6 | 7 |
8 |
9 |

10 | 11 |

12 |

AI CRAFTED TONE

13 |
14 |
15 | 16 | 17 | 18 |
19 |
20 |
21 |
22 |
ON
23 |
24 |
Toggle
25 |
26 |
Off
27 |
28 |
29 |
30 | 35 |
41 |
42 |
INPUT
43 |
44 |
45 |
46 |
Post
47 |
53 |
Toggle
54 |
55 |
Pre
56 |
57 |
58 |
Peak
59 |
65 |
Toggle
66 |
67 |
Bandpass
68 |
69 |
70 |
71 | 76 |
82 |
83 |
Bass
84 |
85 |
86 |
87 | 92 |
98 |
99 |
Middle
100 |
101 |
102 |
103 | 108 |
114 |
115 |
Treble
116 |
117 |
118 |
119 |
120 | 125 |
131 |
132 |
DEPTH
133 |
134 |
135 |
136 | 141 |
147 |
148 |
PRESENCE
149 |
150 |
151 |
152 |
153 | 158 |
164 |
165 |
OUTPUT
166 |
167 |
168 |
169 |
170 | 171 |
172 | {{#effect.ports.audio.input}} 173 |
179 |
180 |
181 | {{/effect.ports.audio.input}} {{#effect.ports.midi.input}} 182 |
188 |
189 |
190 | {{/effect.ports.midi.input}} {{#effect.ports.cv.input}} 191 |
197 |
198 |
199 | {{/effect.ports.cv.input}} 200 |
201 |
202 | {{#effect.ports.audio.output}} 203 |
209 |
210 |
211 | {{/effect.ports.audio.output}} {{#effect.ports.midi.output}} 212 |
218 |
219 |
220 | {{/effect.ports.midi.output}} {{#effect.ports.cv.output}} 221 |
227 |
228 |
229 | {{/effect.ports.cv.output}} 230 |
231 | -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/img/aida.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AidaDSP/aidadsp-lv2/f86e21dcb3b481246f0406c5f003f1a0c7d93d37/rt-neural-generic/ttl/modgui/img/aida.png -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/img/ax.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 7 | 18 | 19 | 22 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/img/rw_knob_large_dark.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/img/scale.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AidaDSP/aidadsp-lv2/f86e21dcb3b481246f0406c5f003f1a0c7d93d37/rt-neural-generic/ttl/modgui/img/scale.png -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/img/scale.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 7 | 8 | 9 | 11 | 13 | 15 | 17 | 19 | 21 | 23 | 25 | 27 | 29 | 31 | 33 | 35 | 36 | -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/javascript-rt-neural-generic.js: -------------------------------------------------------------------------------- 1 | function (event) { 2 | function input_size_changed(input_size_f) { 3 | var input_size = parseInt(input_size_f); 4 | /* TODO match HTML elements 5 | switch (input_size) { 6 | case 0: 7 | case 1: 8 | event.icon.find('.conditioned-param-1').hide(); 9 | event.icon.find('.conditioned-param-2').hide(); 10 | break 11 | case 2: 12 | event.icon.find('.conditioned-param-1').show(); 13 | event.icon.find('.conditioned-param-2').hide(); 14 | break 15 | case 3: 16 | event.icon.find('.conditioned-param-1').show(); 17 | event.icon.find('.conditioned-param-2').show(); 18 | break 19 | } 20 | */ 21 | } 22 | 23 | if (event.type === 'start') { 24 | for (var i in event.ports) { 25 | if (event.ports[i].symbol === 'ModelInSize') { 26 | input_size_changed(event.ports[i].value); 27 | break; 28 | } 29 | } 30 | } 31 | else if (event.symbol === 'ModelInSize') { 32 | input_size_changed(event.value); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/knobs/rw_knob_large_dark.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/knobs/scale.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AidaDSP/aidadsp-lv2/f86e21dcb3b481246f0406c5f003f1a0c7d93d37/rt-neural-generic/ttl/modgui/knobs/scale.png -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/screenshot-rt-neural-generic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AidaDSP/aidadsp-lv2/f86e21dcb3b481246f0406c5f003f1a0c7d93d37/rt-neural-generic/ttl/modgui/screenshot-rt-neural-generic.png -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/stylesheet-rt-neural-generic.css: -------------------------------------------------------------------------------- 1 | /* MAIN */ 2 | 3 | .rt-neural-ax { 4 | border-radius: 13px; 5 | background-repeat: no-repeats; 6 | color: white; 7 | width: 900px; 8 | } 9 | 10 | .rt-neural-ax .grid { 11 | display: flex; 12 | flex-direction: column; 13 | background-color: rgba(35, 35, 35, 1); 14 | border-radius: 12px; 15 | background: linear-gradient( 16 | 140deg, 17 | rgba(35, 35, 35, 1) 0%, 18 | rgba(25, 25, 25, 1) 52%, 19 | rgba(19, 19, 19, 1) 100% 20 | ); 21 | background: linear-gradient( 22 | 140deg, 23 | rgb(28, 23, 12) 0%, 24 | rgb(42, 34, 15) 52%, 25 | rgba(19, 19, 19, 1) 100% 26 | ); 27 | } 28 | 29 | .rt-neural-ax .background_head { 30 | display: flex; 31 | justify-content: space-evenly; 32 | padding: 0px; 33 | margin: 15px 15px 0px 15px; 34 | border-radius: 12px; 35 | /* border: 1spx solid #000; */ 36 | box-shadow: inset 0 0px 10px #000; 37 | background-color: #8bf700; 38 | background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='100%25'%3E%3Cdefs%3E%3ClinearGradient id='a' gradientUnits='userSpaceOnUse' x1='0' x2='0' y1='0' y2='100%25' gradientTransform='rotate(0,1280,644)'%3E%3Cstop offset='0' stop-color='%238BF700'/%3E%3Cstop offset='1' stop-color='%23CDFF05'/%3E%3C/linearGradient%3E%3Cpattern patternUnits='userSpaceOnUse' id='b' width='300' height='250' x='0' y='0' viewBox='0 0 1080 900'%3E%3Cg fill-opacity='0.04'%3E%3Cpolygon fill='%23444' points='90 150 0 300 180 300'/%3E%3Cpolygon points='90 150 180 0 0 0'/%3E%3Cpolygon fill='%23AAA' points='270 150 360 0 180 0'/%3E%3Cpolygon fill='%23DDD' points='450 150 360 300 540 300'/%3E%3Cpolygon fill='%23999' points='450 150 540 0 360 0'/%3E%3Cpolygon points='630 150 540 300 720 300'/%3E%3Cpolygon fill='%23DDD' points='630 150 720 0 540 0'/%3E%3Cpolygon fill='%23444' points='810 150 720 300 900 300'/%3E%3Cpolygon fill='%23FFF' points='810 150 900 0 720 0'/%3E%3Cpolygon fill='%23DDD' points='990 150 900 300 1080 300'/%3E%3Cpolygon fill='%23444' points='990 150 1080 0 900 0'/%3E%3Cpolygon fill='%23DDD' points='90 450 0 600 180 600'/%3E%3Cpolygon points='90 450 180 300 0 300'/%3E%3Cpolygon fill='%23666' points='270 450 180 600 360 600'/%3E%3Cpolygon fill='%23AAA' points='270 450 360 300 180 300'/%3E%3Cpolygon fill='%23DDD' points='450 450 360 600 540 600'/%3E%3Cpolygon fill='%23999' points='450 450 540 300 360 300'/%3E%3Cpolygon fill='%23999' points='630 450 540 600 720 600'/%3E%3Cpolygon fill='%23FFF' points='630 450 720 300 540 300'/%3E%3Cpolygon points='810 450 720 600 900 600'/%3E%3Cpolygon fill='%23DDD' points='810 450 900 300 720 300'/%3E%3Cpolygon fill='%23AAA' points='990 450 900 600 1080 600'/%3E%3Cpolygon fill='%23444' points='990 450 1080 300 900 300'/%3E%3Cpolygon fill='%23222' points='90 750 0 900 180 900'/%3E%3Cpolygon points='270 750 180 900 360 900'/%3E%3Cpolygon fill='%23DDD' points='270 750 360 600 180 600'/%3E%3Cpolygon points='450 750 540 600 360 600'/%3E%3Cpolygon points='630 750 540 900 720 900'/%3E%3Cpolygon fill='%23444' points='630 750 720 600 540 600'/%3E%3Cpolygon fill='%23AAA' points='810 750 720 900 900 900'/%3E%3Cpolygon fill='%23666' points='810 750 900 600 720 600'/%3E%3Cpolygon fill='%23999' points='990 750 900 900 1080 900'/%3E%3Cpolygon fill='%23999' points='180 0 90 150 270 150'/%3E%3Cpolygon fill='%23444' points='360 0 270 150 450 150'/%3E%3Cpolygon fill='%23FFF' points='540 0 450 150 630 150'/%3E%3Cpolygon points='900 0 810 150 990 150'/%3E%3Cpolygon fill='%23222' points='0 300 -90 450 90 450'/%3E%3Cpolygon fill='%23FFF' points='0 300 90 150 -90 150'/%3E%3Cpolygon fill='%23FFF' points='180 300 90 450 270 450'/%3E%3Cpolygon fill='%23666' points='180 300 270 150 90 150'/%3E%3Cpolygon fill='%23222' points='360 300 270 450 450 450'/%3E%3Cpolygon fill='%23FFF' points='360 300 450 150 270 150'/%3E%3Cpolygon fill='%23444' points='540 300 450 450 630 450'/%3E%3Cpolygon fill='%23222' points='540 300 630 150 450 150'/%3E%3Cpolygon fill='%23AAA' points='720 300 630 450 810 450'/%3E%3Cpolygon fill='%23666' points='720 300 810 150 630 150'/%3E%3Cpolygon fill='%23FFF' points='900 300 810 450 990 450'/%3E%3Cpolygon fill='%23999' points='900 300 990 150 810 150'/%3E%3Cpolygon points='0 600 -90 750 90 750'/%3E%3Cpolygon fill='%23666' points='0 600 90 450 -90 450'/%3E%3Cpolygon fill='%23AAA' points='180 600 90 750 270 750'/%3E%3Cpolygon fill='%23444' points='180 600 270 450 90 450'/%3E%3Cpolygon fill='%23444' points='360 600 270 750 450 750'/%3E%3Cpolygon fill='%23999' points='360 600 450 450 270 450'/%3E%3Cpolygon fill='%23666' points='540 600 630 450 450 450'/%3E%3Cpolygon fill='%23222' points='720 600 630 750 810 750'/%3E%3Cpolygon fill='%23FFF' points='900 600 810 750 990 750'/%3E%3Cpolygon fill='%23222' points='900 600 990 450 810 450'/%3E%3Cpolygon fill='%23DDD' points='0 900 90 750 -90 750'/%3E%3Cpolygon fill='%23444' points='180 900 270 750 90 750'/%3E%3Cpolygon fill='%23FFF' points='360 900 450 750 270 750'/%3E%3Cpolygon fill='%23AAA' points='540 900 630 750 450 750'/%3E%3Cpolygon fill='%23FFF' points='720 900 810 750 630 750'/%3E%3Cpolygon fill='%23222' points='900 900 990 750 810 750'/%3E%3Cpolygon fill='%23222' points='1080 300 990 450 1170 450'/%3E%3Cpolygon fill='%23FFF' points='1080 300 1170 150 990 150'/%3E%3Cpolygon points='1080 600 990 750 1170 750'/%3E%3Cpolygon fill='%23666' points='1080 600 1170 450 990 450'/%3E%3Cpolygon fill='%23DDD' points='1080 900 1170 750 990 750'/%3E%3C/g%3E%3C/pattern%3E%3C/defs%3E%3Crect x='0' y='0' fill='url(%23a)' width='100%25' height='100%25'/%3E%3Crect x='0' y='0' fill='url(%23b)' width='100%25' height='100%25'/%3E%3C/svg%3E"); 39 | background-attachment: local; 40 | background-size: cover; 41 | } 42 | 43 | .rt-neural-ax .brand { 44 | display: flex; 45 | align-items: flex-end; 46 | justify-content: flex-start; 47 | margin: 1em 0.5em; 48 | width: 30%; 49 | } 50 | 51 | .rt-neural-ax .brand img { 52 | height: 25px; 53 | align-self: flex-end; 54 | filter: invert() opacity(80%); 55 | } 56 | 57 | .rt-neural-ax .plugin { 58 | display: flex; 59 | align-items: flex-end; 60 | justify-content: flex-end; 61 | color: #2727278e; 62 | margin: 1em 0em; 63 | width: 30%; 64 | } 65 | 66 | .rt-neural-ax .plugin h3 { 67 | height: 25px; 68 | align-self: flex-end; 69 | margin: 0; 70 | } 71 | 72 | .rt-neural-ax .plate { 73 | display: flex; 74 | flex-direction: column; 75 | align-items: center; 76 | justify-content: center; 77 | /* padding: 0px; */ 78 | margin: 25px 0 15px 0; 79 | /* width: 50%; */ 80 | 81 | border-radius: 8px; 82 | } 83 | 84 | .rt-neural-ax .plate-brand { 85 | margin: 0; 86 | } 87 | 88 | .rt-neural-ax .plate h3{ 89 | margin-top: 15px; 90 | color: #0c2f03af; 91 | font-weight: bolder; 92 | } 93 | .rt-neural-ax .plate-brand img { 94 | 95 | filter: drop-shadow(2px 2px 2px #0c2f03af); 96 | } 97 | 98 | /* CONTROLS */ 99 | 100 | .rt-neural-ax-controls { 101 | display: flex; 102 | flex-direction: row; 103 | justify-content: space-around; 104 | align-items: end; 105 | 106 | text-align: center; 107 | margin: 15px 10px; 108 | border-radius: 8px; 109 | } 110 | 111 | .rt-neural-ax .lbl { 112 | font-size: 1rem; 113 | font-weight: bolder; 114 | text-transform: uppercase; 115 | } 116 | 117 | .rt-neural-ax .seperator-grid { 118 | margin: 0 10px 10px 10px; 119 | border-left: 4px solid rgba(97, 97, 97, 0.484); 120 | box-shadow: inset 2 0px 10px #000; 121 | border-radius: 2px; 122 | height: 80px; 123 | } 124 | 125 | /* SWITCH */ 126 | 127 | .rt-neural-ax .switch { 128 | align-self: center; 129 | } 130 | 131 | .rt-neural-ax .switch-grid { 132 | display: flex; 133 | flex-direction: column; 134 | } 135 | 136 | .rt-neural-ax .switch-grid .flipsw { 137 | padding: 5px; 138 | } 139 | 140 | .rt-neural-ax .text-switch { 141 | font-size: 0.5rem; 142 | } 143 | 144 | .rt-neural-ax .on .toggle-sw, 145 | .rt-neural-ax .off .toggle-sw { 146 | cursor: pointer; 147 | text-indent: -9999px; 148 | width: 25px; 149 | height: 60px; 150 | background: rgba(129, 247, 0, 1); 151 | display: inline-block; 152 | border-radius: 100px; 153 | position: relative; 154 | } 155 | 156 | .rt-neural-ax .on .toggle-sw { 157 | background: rgb(84, 84, 84); 158 | } 159 | 160 | .rt-neural-ax .on .toggle-sw:after, 161 | .rt-neural-ax .off .toggle-sw:after { 162 | content: ""; 163 | position: absolute; 164 | top: 5px; 165 | left: 3px; 166 | width: 20px; 167 | height: 20px; 168 | background: rgb(24, 112, 4); 169 | border-radius: 90px; 170 | transition: 0.2s; 171 | } 172 | 173 | .rt-neural-ax .off .toggle-sw:after { 174 | top: calc(-80% + 35px); 175 | transform: translateY(100%); 176 | } 177 | 178 | .rt-neural-ax .on .toggle-sw:after { 179 | background: rgb(218, 214, 203); 180 | } 181 | 182 | .rt-neural-ax .on .toggle-sw:after { 183 | top: calc(100% - 10px); 184 | transform: translateY(-100%); 185 | } 186 | 187 | /* KNOBS */ 188 | .rt-neural-ax .knob-grid { 189 | display: flex; 190 | flex-direction: column; 191 | } 192 | 193 | .rt-neural-ax .knob-bg { 194 | height: 55px; 195 | width: 55px; 196 | margin: 20px auto; 197 | background: center/cover url("/resources/img/rw_knob_large_dark.svg{{{ns}}}"); 198 | } 199 | 200 | /* TODO: [AID-88]mousewheel display tooltip */ 201 | .rt-neural-ax .knob-grid span.tip { 202 | display: None; 203 | } 204 | 205 | .rt-neural-ax .knob-grid:active span.tip { 206 | display: block; 207 | position: absolute; 208 | z-index: 10000; 209 | background-color: rgba(0, 0, 0, 0.5); 210 | box-shadow: 3px 3px 3px rgba(0, 0, 0, 0.4); 211 | padding: 4px; 212 | border-radius: 2px; 213 | width: 85px; 214 | } 215 | 216 | .rt-neural-ax .knob-scale { 217 | width: 90px; 218 | height: 90px; 219 | background-position: center; 220 | background: center/cover url("/resources/img/scale.svg{{{ns}}}"); 221 | } 222 | -------------------------------------------------------------------------------- /rt-neural-generic/ttl/modgui/thumbnail-rt-neural-generic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AidaDSP/aidadsp-lv2/f86e21dcb3b481246f0406c5f003f1a0c7d93d37/rt-neural-generic/ttl/modgui/thumbnail-rt-neural-generic.png -------------------------------------------------------------------------------- /rt-neural-generic/ttl/rt-neural-generic.ttl: -------------------------------------------------------------------------------- 1 | @prefix atom: . 2 | @prefix doap: . 3 | @prefix lv2: . 4 | @prefix patch: . 5 | @prefix rdf: . 6 | @prefix rdfs: . 7 | @prefix state: . 8 | @prefix urid: . 9 | @prefix work: . 10 | @prefix param: . 11 | @prefix foaf: . 12 | @prefix mod: . 13 | @prefix bsize: . 14 | @prefix units: . 15 | 16 | 17 | a lv2:Parameter ; 18 | mod:fileTypes "aidadspmodel" ; 19 | rdfs:label "Neural Model" ; 20 | rdfs:range atom:Path . 21 | 22 | 23 | a lv2:Plugin, lv2:SimulatorPlugin ; 24 | doap:name "AIDA-X" ; 25 | lv2:optionalFeature lv2:hardRTCapable ; 26 | 27 | doap:license ; 28 | 29 | rdfs:comment """ 30 | AIDA-X is an Amp Model Player, allowing it to load models of AI trained music gear, which you can then play through! 31 | 32 | Its main intended use is to provide high fidelity simulations of amplifiers. 33 | However, it is also possible to run entire signal chains consisting of any combination of amp, cab, dist, drive, fuzz, boost and eq. 34 | """; 35 | 36 | doap:developer [ 37 | foaf:name "Aida DSP"; 38 | foaf:homepage ; 39 | ]; 40 | 41 | doap:maintainer [ 42 | foaf:name "Aida DSP"; 43 | foaf:homepage ; 44 | ]; 45 | 46 | lv2:minorVersion 1; 47 | lv2:microVersion 1; 48 | 49 | mod:brand "Aida DSP"; 50 | mod:label "AIDA-X"; 51 | 52 | doap:license ; 53 | lv2:project ; 54 | lv2:requiredFeature urid:map , 55 | work:schedule ; 56 | lv2:optionalFeature lv2:hardRTCapable , 57 | state:loadDefaultState, state:mapPath ; 58 | lv2:extensionData state:interface , 59 | work:interface ; 60 | patch:writable ; 61 | lv2:port 62 | [ 63 | a lv2:AudioPort, lv2:InputPort; 64 | lv2:index 0; 65 | lv2:symbol "IN"; 66 | lv2:name "IN"; 67 | ], 68 | [ 69 | a lv2:AudioPort, lv2:OutputPort; 70 | lv2:index 1; 71 | lv2:symbol "OUT"; 72 | lv2:name "OUT"; 73 | ], 74 | [ 75 | a lv2:InputPort, atom:AtomPort; 76 | lv2:index 2; 77 | atom:bufferType atom:Sequence; 78 | atom:supports patch:Message; 79 | lv2:designation lv2:control; 80 | lv2:symbol "CONTROL"; 81 | lv2:name "CONTROL"; 82 | ], 83 | [ 84 | a lv2:OutputPort, atom:AtomPort; 85 | lv2:index 3; 86 | atom:bufferType atom:Sequence; 87 | atom:supports patch:Message; 88 | lv2:designation lv2:control; 89 | lv2:symbol "NOTIFY"; 90 | lv2:name "NOTIFY"; 91 | ], 92 | [ 93 | a lv2:ControlPort, lv2:InputPort; 94 | lv2:index 4; 95 | lv2:symbol "ANTIALIASING"; 96 | lv2:name "ANTIALIASING"; 97 | lv2:default 66.216; 98 | lv2:minimum 0; 99 | lv2:maximum 100.0; 100 | units:unit units:pc; 101 | lv2:scalePoint [rdfs:label "Off"; rdf:value 0]; 102 | ], 103 | [ 104 | a lv2:ControlPort, lv2:InputPort; 105 | lv2:index 5; 106 | lv2:symbol "PREGAIN"; 107 | lv2:name "INPUT"; 108 | lv2:default 0; 109 | lv2:minimum -12.0; 110 | lv2:maximum 12.0; 111 | units:unit units:db; 112 | ], 113 | [ 114 | a lv2:ControlPort, lv2:InputPort; 115 | lv2:index 6; 116 | lv2:symbol "NETBYPASS"; 117 | lv2:name "NETBYPASS"; 118 | lv2:default 0; 119 | lv2:minimum 0; 120 | lv2:maximum 1; 121 | lv2:portProperty lv2:integer; 122 | lv2:portProperty lv2:toggled; 123 | ], 124 | [ 125 | a lv2:ControlPort, lv2:InputPort; 126 | lv2:index 7; 127 | lv2:symbol "PARAM1"; 128 | lv2:name "PARAM1"; 129 | lv2:default 0; 130 | lv2:minimum 0; 131 | lv2:maximum 1.0; 132 | ], 133 | [ 134 | a lv2:ControlPort, lv2:InputPort; 135 | lv2:index 8; 136 | lv2:symbol "PARAM2"; 137 | lv2:name "PARAM2"; 138 | lv2:default 0; 139 | lv2:minimum 0; 140 | lv2:maximum 1.0; 141 | ], 142 | [ 143 | a lv2:ControlPort, lv2:InputPort; 144 | lv2:index 9; 145 | lv2:symbol "EQBYPASS"; 146 | lv2:name "EQBYPASS"; 147 | lv2:default 0; 148 | lv2:minimum 0; 149 | lv2:maximum 1; 150 | lv2:portProperty lv2:integer; 151 | lv2:portProperty lv2:toggled; 152 | ], 153 | [ 154 | a lv2:ControlPort, lv2:InputPort; 155 | lv2:index 10; 156 | lv2:symbol "EQPOS"; 157 | lv2:name "EQPOS"; 158 | lv2:default 0; 159 | lv2:minimum 0; 160 | lv2:maximum 1; 161 | lv2:portProperty lv2:integer; 162 | lv2:portProperty lv2:enumeration; 163 | lv2:scalePoint [rdfs:label "POST"; rdf:value 0]; 164 | lv2:scalePoint [rdfs:label "PRE"; rdf:value 1]; 165 | ], 166 | [ 167 | a lv2:ControlPort, lv2:InputPort; 168 | lv2:index 11; 169 | lv2:symbol "BASS"; 170 | lv2:name "BASS"; 171 | lv2:default 0; 172 | lv2:minimum -8.0; 173 | lv2:maximum 8; 174 | units:unit units:db; 175 | ], 176 | [ 177 | a lv2:ControlPort, lv2:InputPort; 178 | lv2:index 12; 179 | lv2:symbol "BFREQ"; 180 | lv2:name "BFREQ"; 181 | lv2:default 305.0; 182 | lv2:minimum 75.0; 183 | lv2:maximum 600.0; 184 | units:unit units:hz; 185 | ], 186 | [ 187 | a lv2:ControlPort, lv2:InputPort; 188 | lv2:index 13; 189 | lv2:symbol "MID"; 190 | lv2:name "MID"; 191 | lv2:default 0; 192 | lv2:minimum -8.0; 193 | lv2:maximum 8; 194 | units:unit units:db; 195 | ], 196 | [ 197 | a lv2:ControlPort, lv2:InputPort; 198 | lv2:index 14; 199 | lv2:symbol "MFREQ"; 200 | lv2:name "MFREQ"; 201 | lv2:default 750.0; 202 | lv2:minimum 150.0; 203 | lv2:maximum 5000.0; 204 | units:unit units:hz; 205 | ], 206 | [ 207 | a lv2:ControlPort, lv2:InputPort; 208 | lv2:index 15; 209 | lv2:symbol "MIDQ"; 210 | lv2:name "MIDQ"; 211 | lv2:default 0.707; 212 | lv2:minimum 0.2; 213 | lv2:maximum 5.0; 214 | ], 215 | [ 216 | a lv2:ControlPort, lv2:InputPort; 217 | lv2:index 16; 218 | lv2:symbol "MTYPE"; 219 | lv2:name "MTYPE"; 220 | lv2:default 0; 221 | lv2:minimum 0; 222 | lv2:maximum 1; 223 | lv2:portProperty lv2:integer; 224 | lv2:portProperty lv2:enumeration; 225 | lv2:scalePoint [rdfs:label "PEAK"; rdf:value 0]; 226 | lv2:scalePoint [rdfs:label "BANDPASS"; rdf:value 1]; 227 | ], 228 | [ 229 | a lv2:ControlPort, lv2:InputPort; 230 | lv2:index 17; 231 | lv2:symbol "TREBLE"; 232 | lv2:name "TREBLE"; 233 | lv2:default 0; 234 | lv2:minimum -8.0; 235 | lv2:maximum 8; 236 | units:unit units:db; 237 | ], 238 | [ 239 | a lv2:ControlPort, lv2:InputPort; 240 | lv2:index 18; 241 | lv2:symbol "TFREQ"; 242 | lv2:name "TFREQ"; 243 | lv2:default 2000.0; 244 | lv2:minimum 1000.0; 245 | lv2:maximum 4000.0; 246 | units:unit units:hz; 247 | ], 248 | [ 249 | a lv2:ControlPort, lv2:InputPort; 250 | lv2:index 19; 251 | lv2:symbol "DEPTH"; 252 | lv2:name "DEPTH"; 253 | lv2:default 0; 254 | lv2:minimum -8.0; 255 | lv2:maximum 8; 256 | units:unit units:db; 257 | ], 258 | [ 259 | a lv2:ControlPort, lv2:InputPort; 260 | lv2:index 20; 261 | lv2:symbol "PRESENCE"; 262 | lv2:name "PRESENCE"; 263 | lv2:default 0; 264 | lv2:minimum -8.0; 265 | lv2:maximum 8; 266 | units:unit units:db; 267 | ], 268 | [ 269 | a lv2:ControlPort, lv2:InputPort; 270 | lv2:index 21; 271 | lv2:symbol "DCBLOCKER"; 272 | lv2:name "DCBLOCKER"; 273 | lv2:default 1; 274 | lv2:minimum 0; 275 | lv2:maximum 1; 276 | lv2:portProperty lv2:integer; 277 | lv2:portProperty lv2:toggled; 278 | ], 279 | [ 280 | a lv2:ControlPort, lv2:InputPort; 281 | lv2:index 22; 282 | lv2:symbol "MASTER"; 283 | lv2:name "OUTPUT"; 284 | lv2:default 0; 285 | lv2:minimum -15.0; 286 | lv2:maximum 15; 287 | units:unit units:db; 288 | ], 289 | [ 290 | a lv2:ControlPort, lv2:OutputPort; 291 | lv2:index 23; 292 | lv2:symbol "ModelInSize"; 293 | lv2:name "Model Input Size"; 294 | lv2:default 0; 295 | lv2:minimum 0; 296 | lv2:maximum 3; 297 | lv2:portProperty lv2:integer; 298 | lv2:portProperty lv2:enumeration; 299 | lv2:scalePoint [rdfs:label "ERROR"; rdf:value 0]; 300 | lv2:scalePoint [rdfs:label "SNAPSHOT"; rdf:value 1]; 301 | lv2:scalePoint [rdfs:label "WITH 1 PARAM"; rdf:value 2]; 302 | lv2:scalePoint [rdfs:label "WITH 2 PARAMS"; rdf:value 3]; 303 | ], 304 | [ 305 | a lv2:ControlPort, lv2:InputPort; 306 | lv2:index 24; 307 | lv2:symbol "enabled"; 308 | lv2:name "Enabled"; 309 | lv2:default 1; 310 | lv2:minimum 0; 311 | lv2:maximum 1; 312 | lv2:designation lv2:enabled; 313 | ]; 314 | 315 | state:state [ 316 | 317 | ]. 318 | -------------------------------------------------------------------------------- /tests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # CMake file for rt-neural-generic plugin tests 2 | 3 | cmake_minimum_required(VERSION 3.15) 4 | 5 | project(tests) 6 | 7 | # flags and definitions 8 | set(CMAKE_CXX_STANDARD 17) 9 | 10 | set(TEST_NAME "" CACHE STRING "Which test to build") 11 | 12 | message("Bulding binary test-${TEST_NAME}") 13 | 14 | if (NOT TEST_NAME) 15 | error("TEST_NAME cannot be empty") 16 | else() 17 | if(TEST_NAME STREQUAL "rtneural") 18 | set(RTNEURAL_XSIMD ON CACHE BOOL "Use RTNeural with this backend") 19 | message("RTNEURAL_XSIMD in ${CMAKE_PROJECT_NAME} = ${RTNEURAL_XSIMD}") 20 | 21 | # add external libraries 22 | add_subdirectory(../modules/RTNeural ${CMAKE_CURRENT_BINARY_DIR}/RTNeural) 23 | 24 | # configure executable 25 | add_executable(test-rtneural 26 | src/test_rtneural.cpp 27 | ) 28 | 29 | # include and link directories 30 | include_directories(test-rtneural ./src ../modules/RTNeural ../modules/RTNeural/modules/json) 31 | link_directories(test-rtneural ./src ../modules/RTNeural ../modules/RTNeural/modules/json) 32 | 33 | # configure target 34 | target_link_libraries(test-rtneural RTNeural) 35 | target_compile_definitions(test-rtneural PUBLIC) 36 | elseif(TEST_NAME STREQUAL "smoothers") 37 | # configure executable 38 | add_executable(test-smoothers 39 | src/test_smoothers.cpp 40 | ) 41 | 42 | # include and link directories 43 | include_directories(test-smoothers ./src ../common) 44 | link_directories(test-smoothers ./src ../common) 45 | endif() 46 | endif() 47 | 48 | -------------------------------------------------------------------------------- /tests/src/test_rtneural.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | 9 | #define JSON_MODEL_FILE_NAME "model.json" 10 | 11 | using namespace std; 12 | 13 | int main(void) { 14 | RTNeural::ModelT, 16 | RTNeural::DenseT> lstm_16; 17 | RTNeural::ModelT, 19 | RTNeural::DenseT> lstm_12; 20 | RTNeural::ModelT, 22 | RTNeural::DenseT> gru_8; 23 | int model_index = 0; 24 | int n_layers = 0; 25 | int input_size = 0; 26 | int hidden_size = 0; 27 | std::string type; 28 | int in_skip = 0; 29 | 30 | std::string filePath; 31 | 32 | filePath.append(JSON_MODEL_FILE_NAME); 33 | 34 | std::cout << "Loading json file: " << filePath << std::endl; 35 | 36 | try { 37 | std::ifstream jsonStream(filePath); 38 | std::ifstream jsonStream2(filePath); 39 | nlohmann::json modelData; 40 | jsonStream2 >> modelData; 41 | 42 | /* Understand which model type to load */ 43 | n_layers = modelData["layers"].size(); /* Get how many layers we have in this nn model */ 44 | 45 | input_size = modelData["in_shape"].back().get(); 46 | 47 | if (modelData["in_skip"].is_number()) 48 | in_skip = modelData["in_skip"].get(); 49 | else 50 | in_skip = 0; 51 | 52 | type = modelData["layers"][n_layers-1-1]["type"]; 53 | hidden_size = modelData["layers"][n_layers-1-1]["shape"].back().get(); 54 | 55 | std::cout << "input_size: " << input_size << std::endl; 56 | std::cout << "in_skip: " << in_skip << std::endl; 57 | std::cout << "n_layers: " << n_layers << std::endl; 58 | std::cout << "type: " << type << std::endl; 59 | std::cout << "hidden_size: " << hidden_size << std::endl; 60 | 61 | if(type == std::string("lstm")) 62 | { 63 | if(hidden_size == 16) 64 | { 65 | model_index = 0; 66 | } 67 | else if(hidden_size == 12) 68 | { 69 | model_index = 1; 70 | } 71 | } 72 | else if(type == std::string("gru")) 73 | { 74 | if(hidden_size == 8) 75 | { 76 | model_index = 2; 77 | } 78 | } 79 | 80 | switch(model_index) 81 | { 82 | case 0: 83 | lstm_16.parseJson(jsonStream, true); 84 | break; 85 | case 1: 86 | lstm_12.parseJson(jsonStream, true); 87 | break; 88 | case 2: 89 | gru_8.parseJson(jsonStream, true); 90 | break; 91 | } 92 | } 93 | catch (const std::exception& e) { 94 | std::cout << std::endl << "Unable to load json file: " << filePath << std::endl; 95 | std::cout << e.what() << std::endl; 96 | } 97 | } -------------------------------------------------------------------------------- /tests/src/test_smoothers.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | 8 | using namespace std; 9 | 10 | int main(void) { 11 | LinearValueSmoother param1Coeff; 12 | ExponentialValueSmoother param2Coeff; 13 | 14 | param1Coeff.setSampleRate(48000.0f); 15 | param1Coeff.setTimeConstant(0.1f); 16 | param1Coeff.setTargetValue(0.0f); 17 | param1Coeff.clearToTargetValue(); 18 | 19 | param2Coeff.setSampleRate(48000.0f); 20 | param2Coeff.setTimeConstant(0.1f); 21 | param2Coeff.setTargetValue(0.0f); 22 | param2Coeff.clearToTargetValue(); 23 | 24 | param1Coeff.setTargetValue(1.0f); 25 | param2Coeff.setTargetValue(1.0f); 26 | 27 | /* 1 sec */ 28 | for(int i=0; i<48000u; i++) { 29 | param1Coeff.next(); 30 | param2Coeff.next(); 31 | if (i%100 == 0) 32 | printf("%d) %.02f %.02f\n", i, param1Coeff.getCurrentValue(), param2Coeff.getCurrentValue()); 33 | } 34 | } -------------------------------------------------------------------------------- /variant/generate_variant_hpp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | max_input_size = 3 4 | layer_types = ('GRU', 'LSTM') 5 | input_sizes = tuple(range(1, max_input_size + 1)) 6 | hidden_sizes = (8, 12, 16, 20, 24, 32, 40, 64, 80) 7 | 8 | model_variant_using_declarations = [] 9 | model_variant_types = [] 10 | model_type_checkers = [] 11 | 12 | def add_model(input_size, layer_type, hidden_size, model_type): 13 | model_type_alias = f'ModelType_{layer_type}_{hidden_size}_{input_size}' 14 | model_variant_using_declarations.append(f'using {model_type_alias} = {model_type};\n') 15 | model_variant_types.append(model_type_alias) 16 | model_type_checkers.append(f'''inline bool is_model_type_{model_type_alias} (const nlohmann::json& model_json) {{ 17 | const auto json_layers = model_json.at ("layers"); 18 | const auto rnn_layer_type = json_layers.at (0).at ("type").get(); 19 | const auto is_layer_type_correct = rnn_layer_type == "{layer_type.lower()}"; 20 | const auto hidden_size = json_layers.at (0).at ("shape").back().get(); 21 | const auto is_hidden_size_correct = hidden_size == {hidden_size}; 22 | const auto input_size = model_json.at ("in_shape").back().get(); 23 | const auto is_input_size_correct = input_size == {input_size}; 24 | return is_layer_type_correct && is_hidden_size_correct && is_input_size_correct; 25 | }}\n\n''') 26 | 27 | for layer_type in layer_types: 28 | for hidden_size in hidden_sizes: 29 | for input_size in input_sizes: 30 | print(f'Setting up Model: {layer_type} w/ RNN dims {input_size} / {hidden_size}, w/ I/O dims {input_size} / 1') 31 | 32 | if layer_type == 'GRU': 33 | rnn_layer_type = f'RTNeural::GRULayerT' 34 | elif layer_type == 'LSTM': 35 | rnn_layer_type = f'RTNeural::LSTMLayerT' 36 | 37 | dense_layer_type = f'RTNeural::DenseT' 38 | 39 | model_type = f'RTNeural::ModelT' 40 | add_model(input_size, layer_type, hidden_size, model_type) 41 | 42 | with open("rt-neural-generic/src/model_variant.hpp", "w") as header_file: 43 | header_file.write('#include \n') 44 | header_file.write('#include \n') 45 | header_file.write('\n') 46 | 47 | header_file.write(f'#define MAX_INPUT_SIZE {max_input_size}\n') 48 | 49 | header_file.write('struct NullModel { static constexpr int input_size = 0; static constexpr int output_size = 0; };\n') 50 | header_file.writelines(model_variant_using_declarations) 51 | header_file.write(f'using ModelVariantType = std::variant;\n') 52 | header_file.write('\n') 53 | 54 | header_file.writelines(model_type_checkers) 55 | 56 | header_file.write('inline bool custom_model_creator (const nlohmann::json& model_json, ModelVariantType& model) {\n') 57 | if_statement = 'if' 58 | for type_checker, alias in zip(model_type_checkers, model_variant_types): 59 | header_file.write(f' {if_statement} (is_model_type_{alias} (model_json)) {{\n') 60 | header_file.write(f' model.emplace<{alias}>();\n') 61 | header_file.write(f' return true;\n') 62 | header_file.write(' }\n') 63 | if_statement = 'else if' 64 | header_file.write(f' model.emplace();\n') 65 | header_file.write(f' return false;\n') 66 | header_file.write('}\n') 67 | --------------------------------------------------------------------------------