├── .gitignore ├── Commands └── waapi-python-tools.json ├── LICENSE ├── README.md ├── auto-adjust-sound-voice-loudness ├── README.md └── __main__.py ├── auto-create-events ├── README.md └── __main__.py ├── auto-midi-map ├── README.md ├── __main__.py └── auto-midi-map.cmd ├── auto-rename-container ├── README.md ├── __main__.py └── auto-rename-container.cmd ├── auto-trim-sources ├── README.md └── __main__.py ├── new-synth-one ├── __main__.py └── readme.md ├── report-language-duration-inconsistency ├── README.md └── __main__.py └── text-to-speech ├── __main__.py ├── readme.md └── speak.ps1 /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .vscode -------------------------------------------------------------------------------- /Commands/waapi-python-tools.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands":[ 3 | { 4 | "id":"ak.text-to-speech", 5 | "displayName":"Generate Text-to-Speech", 6 | "defaultShortcut":"", 7 | "program":"py.exe", 8 | "startMode":"MultipleSelectionSingleProcessSpaceSeparated", 9 | "args":"-3 ${CurrentCommandDirectory}\\..\\text-to-speech --original \"${WwiseProjectOriginals}\" ${id}", 10 | "cwd":"", 11 | "contextMenu":{ 12 | "basePath":"", 13 | "enabledFor":"Sound" 14 | }, 15 | "redirectOutputs":true 16 | }, 17 | { 18 | "id":"ak.auto-midi-map", 19 | "displayName":"Auto MIDI map", 20 | "defaultShortcut":"", 21 | "program":"py.exe", 22 | "startMode":"MultipleSelectionMultipleProcesses", 23 | "args":"-3 ${CurrentCommandDirectory}\\..\\auto-midi-map ${id}", 24 | "cwd":"", 25 | "contextMenu":{ 26 | "basePath":"", 27 | "enabledFor":"BlendContainer" 28 | }, 29 | "redirectOutputs":true 30 | }, 31 | { 32 | "id":"ak.auto-rename-container", 33 | "displayName":"Auto Rename Container", 34 | "defaultShortcut":"", 35 | "program":"py.exe", 36 | "startMode":"MultipleSelectionMultipleProcesses", 37 | "args":"-3 ${CurrentCommandDirectory}\\..\\auto-rename-container ${id}", 38 | "cwd":"", 39 | "contextMenu":{ 40 | "basePath":"" 41 | }, 42 | "redirectOutputs":true 43 | }, 44 | { 45 | "id":"ak.auto-trim", 46 | "displayName":"Auto Trim Sources", 47 | "defaultShortcut":"", 48 | "program":"py.exe", 49 | "startMode":"MultipleSelectionSingleProcessSpaceSeparated", 50 | "args":"-3 ${CurrentCommandDirectory}\\..\\auto-trim-sources ${id}", 51 | "cwd":"", 52 | "contextMenu":{ 53 | "basePath":"" 54 | }, 55 | "redirectOutputs":false 56 | }, 57 | { 58 | "id":"ak.auto-trim-initial-delay", 59 | "displayName":"Auto Trim Sources (with Initial Delay)", 60 | "defaultShortcut":"", 61 | "program":"py.exe", 62 | "startMode":"MultipleSelectionSingleProcessSpaceSeparated", 63 | "args":"-3 ${CurrentCommandDirectory}\\..\\auto-trim-sources ${id} --initial_delay", 64 | "cwd":"", 65 | "contextMenu":{ 66 | "basePath":"" 67 | }, 68 | "redirectOutputs":false 69 | }, 70 | { 71 | "id":"ak.new-synth-one", 72 | "displayName":"New Random Synth One (x10)", 73 | "defaultShortcut":"", 74 | "program":"py.exe", 75 | "startMode":"SingleSelectionSingleProcess", 76 | "args":"-3 ${CurrentCommandDirectory}\\..\\new-synth-one ${id} --count=10", 77 | "cwd":"", 78 | "contextMenu":{ 79 | "basePath":"" 80 | }, 81 | "redirectOutputs":true 82 | }, 83 | { 84 | "id":"ak.auto-create-events", 85 | "displayName":"Auto Create Event(s) with Work Units", 86 | "defaultShortcut":"", 87 | "program":"py.exe", 88 | "startMode":"MultipleSelectionSingleProcessSpaceSeparated", 89 | "args":"-3 ${CurrentCommandDirectory}\\..\\auto-create-events ${id}", 90 | "cwd":"", 91 | "contextMenu":{ 92 | "basePath":"" 93 | }, 94 | "redirectOutputs":false 95 | } 96 | ] 97 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2019-2020 Audiokinetic Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # waapi-python-tools 2 | 3 | This repository is a collection of tools to be used inside Audiokinetic Wwise. The tools use WAAPI (Wwise Authoring API) and Python to perform several automated tasks. 4 | 5 | Refer to **General Setup Instructions** below, then find the specific instructions README.md in the sub-folders. 6 | 7 | ## Requirements 8 | * Python 3.6+ 9 | * Running instance of Wwise.exe with the Wwise Authoring API enabled (Project > User Preferences... > Enable Wwise Authoring API) 10 | * **waapi-client** python project installed 11 | 12 | ## General Setup Instructions 13 | 14 | We recommend to use the Python Launcher for Windows which is installed with Python 3 from python.org. 15 | 16 | ### Install Python 3.6 17 | 18 | * Install Python 3.6 or greater from: https://www.python.org/downloads/ 19 | 20 | ### Install waapi-client 21 | 22 | * **Windows**: `py -3 -m pip install waapi-client scipy` 23 | * **Other platforms**: `python3 -m pip install waapi-client scipy` 24 | 25 | Additional instructions can be found at: 26 | https://pypi.org/project/waapi-client/ 27 | 28 | ### Installing the Command Add-ons (2019.2.x+) 29 | 30 | 1. Ensure the folder `%APPDATA%\Audiokinetic\Wwise\Add-ons` is present. Create `Add-ons` if not present. 31 | 2. Download this whole repository zip file from GitHub. 32 | 3. Unzip the content from `waapi-python-tools` folder inside `Add-ons`. 33 | 4. Restart Wwise or use the command **Reload Commands** 34 | 35 | At the end, the following file structure should be present: 36 | `%APPDATA%\Audiokinetic\Wwise\Add-ons\Commands\waapi-python-tools.json` 37 | 38 | ### Installing for External Editors (2018.1.x) 39 | 40 | 1. Download this whole repository zip file from GitHub. 41 | 2. Unzip the content of zip file on your computer. 42 | 3. In Wwise, open **Project > User Preferences**. 43 | 4. Add `.cmd` in the **External Editors** list. 44 | 45 | ## Running the script 46 | 47 | * **Windows**: `py -3 ` 48 | * **Other platforms**: `python3 ` 49 | 50 | Replace `` by the name of the folder you want to use. 51 | 52 | ## More information 53 | 54 | To learn more about WAAPI: 55 | https://www.audiokinetic.com/library/edge/?source=SDK&id=waapi.html 56 | 57 | To learn more about using Python with WAAPI: 58 | https://www.audiokinetic.com/library/edge/?source=SDK&id=waapi_client_python_rpc.html 59 | 60 | To learn more about Command Add-ons: 61 | https://www.audiokinetic.com/library/edge/?source=SDK&id=defining_custom_commands.html -------------------------------------------------------------------------------- /auto-adjust-sound-voice-loudness/README.md: -------------------------------------------------------------------------------- 1 | # auto-adjust-sound-voice-loudness 2 | 3 | This script is a utility tool for managing audio consistency across languages in a Wwise project. It adjusts the Make-Up Gain of Sound Voice sources in non-reference languages to align their loudness with the reference language. The tool supports both momentary max and integrated loudness measurements and applies corrections automatically. 4 | 5 | ## Features 6 | 7 | * Analyze and adjust loudness across all or specified languages in the Wwise project. 8 | * Define custom paths for targeting specific Sound Voice objects. 9 | * Choose between momentaryMax or integrated loudness measurements for adjustments. 10 | * Automatically apply calculated Make-up Gain values to align loudness with the reference language. 11 | 12 | ## Overview 13 | ``` 14 | usage: . [-h] [--language [LANGUAGE]] [--path [PATH]] 15 | [--loudness {momentaryMax,integrated}] 16 | 17 | Automatically adjust the loudness of Sound Voice sources in your Wwise project 18 | to align with the reference language, ensuring consistent audio levels across 19 | languages. 20 | 21 | options: 22 | -h, --help show this help message and exit 23 | --language [LANGUAGE] 24 | Optional. Specify the language to validate (e.g., en, 25 | fr). If omitted, all languages will be validated 26 | against the reference language. 27 | --path [PATH] Optional. Specify the path to enumerate Sound Voice 28 | objects (e.g., "\Actor-Mixer 29 | Hierarchy\Character\Voices"). If omitted, the entire 30 | project is scanned. 31 | --loudness {momentaryMax,integrated} 32 | Optional. Specify the loudness measurement to use 33 | (momentaryMax or integrated). Default is momentaryMax. 34 | ``` 35 | 36 | ## Requirements 37 | 38 | * Wwise 2023.1.0+ 39 | * Python 3.6+ 40 | * Python packages: 41 | 42 | `py -3 -m pip install waapi-client` 43 | 44 | ## Instructions 45 | 46 | **Note**: Refer to installation instructions in [waapi-python-tools](../README.md). 47 | 48 | 1. Open a Wwise project. 49 | 2. Run the script. 50 | 51 | 52 | ## Examples 53 | 54 | Adjust all languages using the default loudness type (momentaryMax): 55 | 56 | ``` 57 | python adjust_loudness.py 58 | ``` 59 | 60 | Adjust the French language only: 61 | 62 | ``` 63 | python adjust_loudness.py --language fr 64 | ``` 65 | 66 | Adjust languages within a specific path using integrated loudness: 67 | 68 | ``` 69 | python adjust_loudness.py --path "\Actor-Mixer Hierarchy\Characters" --loudness integrated 70 | ``` -------------------------------------------------------------------------------- /auto-adjust-sound-voice-loudness/__main__.py: -------------------------------------------------------------------------------- 1 | from waapi import WaapiClient, CannotConnectToWaapiException 2 | from pprint import pprint 3 | import argparse 4 | 5 | # Define arguments for the script 6 | parser = argparse.ArgumentParser(description="Automatically adjust the loudness of Sound Voice sources in your Wwise project to align with the reference language, ensuring consistent audio levels across languages.") 7 | parser.add_argument('--language', const=1, default=None, type=str, nargs='?', help='Optional. Specify the language to validate (e.g., en, fr). If omitted, all languages will be validated against the reference language.') 8 | parser.add_argument('--path', const=1, default=None, type=str, nargs='?', help='Optional. Specify the path to enumerate Sound Voice objects (e.g., "\Actor-Mixer Hierarchy\Character\Voices"). If omitted, the entire project is scanned.') 9 | parser.add_argument('--loudness', type=str, default='momentaryMax', choices=['momentaryMax', 'integrated'], required=False, help='Optional. Specify the loudness measurement to use (momentaryMax or integrated). Default is momentaryMax.') 10 | args = parser.parse_args() 11 | 12 | path = args.path 13 | 14 | def get_language_name(id, languages): 15 | language = next((language for language in languages if language["id"] == id), None) 16 | return language["name"] 17 | 18 | def get_language_id(name, languages): 19 | language = next((language for language in languages if language["name"] == name), None) 20 | return language["id"] 21 | 22 | if path is None: 23 | path = F"\Actor-Mixer Hierarchy" 24 | 25 | try: 26 | # Connecting to Waapi using default URL 27 | with WaapiClient() as client: 28 | 29 | projectInfo = client.call("ak.wwise.core.getProjectInfo") 30 | 31 | languages = [] 32 | 33 | if args.language is not None: 34 | # Validate language is valid 35 | if not any(language["name"] == args.language for language in projectInfo["languages"]): 36 | raise ValueError("The language is invalid.") 37 | languages.append(get_language_id(args.language, projectInfo["languages"])) 38 | else: 39 | # take all languages from the project 40 | languages = [language["id"] for language in projectInfo["languages"]] 41 | 42 | ref_language_name = get_language_name(projectInfo["referenceLanguageId"], projectInfo["languages"]) 43 | 44 | # Query the voices of the reference language 45 | get_args = { "waql": F"$ \"{path}\" select descendants where nodeType = \"Sound Voice\" select activeSource"} 46 | get_options = { 47 | "return": ["name", "parent.id as parentId", "id", "path", "loudness.momentaryMax as momentaryMax", "loudness.integrated as integrated", "VolumeOffset"], 48 | "language": projectInfo["referenceLanguageId"]} 49 | references_voices = client.call("ak.wwise.core.object.get", get_args, options=get_options)['return'] 50 | 51 | voices_per_id = {voice["parentId"]: voice for voice in references_voices} 52 | 53 | set_args = { 54 | "objects": [], 55 | "onNameConflict": "merge", 56 | } 57 | 58 | for language in languages: 59 | if language == projectInfo["referenceLanguageId"]: 60 | continue 61 | 62 | language_name = get_language_name(language, projectInfo["languages"]) 63 | print(F"Processing {language_name}...") 64 | 65 | # Use WAQL to obtain all audio sources under the specified root 66 | get_options["language"] = language 67 | voices = client.call("ak.wwise.core.object.get", get_args, options=get_options)['return'] 68 | 69 | for voice in voices: 70 | ref_voice = voices_per_id.get(voice["parentId"], None) 71 | if ref_voice is None: 72 | raise KeyError("The language is invalid.") 73 | 74 | if args.loudness not in ref_voice or args.loudness not in voice: 75 | continue 76 | 77 | offset = float(ref_voice[args.loudness]) - float(voice[args.loudness]) 78 | if abs(offset) > 0.1: 79 | print(F"{voice['path']}: '{ref_language_name}':{ref_voice[args.loudness]} '{language_name}':{voice[args.loudness]} Offset:{offset}") 80 | 81 | # Set the VolumeOffset property which is labelled as Make-up Gain in Wwise 82 | set_args["objects"].append({ 83 | "object":voice['id'], 84 | "@VolumeOffset": offset 85 | }) 86 | 87 | client.call("ak.wwise.core.undo.beginGroup") 88 | client.call("ak.wwise.core.object.set", set_args) 89 | client.call("ak.wwise.core.undo.endGroup", { 'displayName': 'Auto Adjust Volume Offset'}) 90 | 91 | except Exception as e: 92 | print(str(e)) -------------------------------------------------------------------------------- /auto-create-events/README.md: -------------------------------------------------------------------------------- 1 | # auto-create-events 2 | 3 | Automatically create play events on the selected objects and replicate the same work unit hierarchy on the event hierarchy. This is useful in the context of multiple users to avoid modifying the Default Work Unit. 4 | 5 | ## Overview 6 | ``` 7 | usage: [-h] [GUID ...] 8 | 9 | Automatically create new event from the selection and replicate the work unit hierarchy. 10 | 11 | positional arguments: 12 | GUID One or many guid of the form {01234567-89ab-cdef-0123-4567890abcde}. 13 | The script retrieves the current selected if no GUID specified. 14 | ``` 15 | 16 | ## Requirements 17 | 18 | * Wwise 2022.1.x+ 19 | * Python 3.6+ 20 | * Python packages: 21 | 22 | `py -3 -m pip install waapi-client` 23 | 24 | 25 | 26 | ## Instructions 27 | 28 | **Note**: Refer to installation instructions in [waapi-python-tools](../README.md). 29 | 30 | 1. Select objects from the Actor-Mixer Hierarchy. 31 | 2. Right click and select **Auto Create Event(s) with Work Units**. 32 | 33 | 34 | -------------------------------------------------------------------------------- /auto-create-events/__main__.py: -------------------------------------------------------------------------------- 1 | from waapi import WaapiClient 2 | from pprint import pprint 3 | from pathlib import Path 4 | import argparse, traceback 5 | 6 | # Define arguments for the script 7 | parser = argparse.ArgumentParser(description='Automatically create new play events from the selection and replicate the work unit hierarchy.') 8 | parser.add_argument('ids', metavar='GUID', nargs='*', help='One or many guid of the form {01234567-89ab-cdef-0123-4567890abcde}. The script retrieves the current selected if no GUID specified.') 9 | 10 | args = parser.parse_args() 11 | 12 | def create_event(name, target): 13 | # Create an Event withe specified name and target 14 | return { 15 | "type":"Event", 16 | "name":name, 17 | "children":[ 18 | { 19 | "type":"Action", 20 | "name": "", 21 | "@Target": target 22 | } 23 | ] 24 | } 25 | 26 | def create_workunit(name): 27 | # Create a Work Unit with the specified name 28 | return { 29 | "type":"WorkUnit", 30 | "name": name 31 | } 32 | 33 | def create_virtual_folder(name): 34 | # Create a Virtual Folder with the specified name 35 | return { 36 | "type":"Folder", 37 | "name": name 38 | } 39 | 40 | def create_event_and_workunits(path, target): 41 | # Create an event and the parent work units 42 | 43 | # Create the event first 44 | current_hierarchy = create_event(str(path.stem), target) 45 | 46 | # Then create the parents in a loop 47 | current_path = path.parent 48 | while len(current_path.parents) > 1: 49 | 50 | new_parent = None 51 | if len(current_path.parents) == 2: 52 | new_parent = create_workunit(current_path.stem) 53 | else : 54 | new_parent = create_virtual_folder(current_path.stem) 55 | 56 | new_parent["children"] = [current_hierarchy] 57 | 58 | current_path = current_path.parent 59 | current_hierarchy = new_parent 60 | 61 | return current_hierarchy 62 | 63 | 64 | try: 65 | 66 | # Connecting to Waapi using default URL 67 | with WaapiClient() as client: 68 | 69 | selected = [] 70 | 71 | options = { "return" : ["path", "id", "isPlayable"] } 72 | 73 | # if no ID is passed as argument, use the selected object from the project 74 | if args.ids is None or len(args.ids) == 0: 75 | selected = client.call("ak.wwise.ui.getSelectedObjects", {}, options=options)['objects'] 76 | else: 77 | ids_list = ', '.join(f'"{item}"' for item in args.ids) 78 | selected = client.call("ak.wwise.core.object.get", { "waql":f"$ {ids_list}" }, options=options)['return'] 79 | 80 | set_args = { 81 | "objects": [] 82 | } 83 | 84 | for obj in selected: 85 | 86 | # Skip non playable objects 87 | if not obj["isPlayable"]: 88 | continue 89 | 90 | selection_path = Path(obj["path"]) 91 | print(str(selection_path)) 92 | 93 | # Choose the new event path 94 | parts = list(selection_path.parts) 95 | parts[1] = "Events" 96 | object_name = parts.pop() 97 | parts.append("Play_" + object_name) 98 | 99 | hierarchy = create_event_and_workunits(Path(*parts), obj["id"]) 100 | 101 | set_args["objects"].append( 102 | { 103 | "object":"\\Events", 104 | "children": [ hierarchy ] 105 | }); 106 | 107 | client.call("ak.wwise.core.object.set", set_args) 108 | 109 | 110 | except Exception as e: 111 | traceback.print_exc() 112 | print(str(e)) -------------------------------------------------------------------------------- /auto-midi-map/README.md: -------------------------------------------------------------------------------- 1 | # auto-midi-map 2 | 3 | Automatically structure the children of Wwise Blend Container and set the MIDI properties based on a naming convention. 4 | 5 | **Attention**: The project works well with a specific file name convention. Please feel 6 | free to modify in order to support other conventions. 7 | 8 | ## Overview 9 | 10 | It is possible to implement a sample-based MIDI instrument in Wwise directly in the Actor-Mixer hierarchy. However, it can be a tedious task. This tool aims to ease the setup of such complex structures by setting up automatically the structure for you after you imported the sounds. 11 | 12 | ## Requirements 13 | 14 | * Wwise 2019.2.x+ for using with the **Command Add-ons** (custom menus). 15 | * Wwise 2018.1.11+ for using auto-midi-map.cmd with **External Editors**. 16 | 17 | ## Instructions 18 | 19 | **Note**: Refer to installation instructions in [waapi-python-tools](../README.md). 20 | 21 | 1. Create a **Blend container**. 22 | 2. Import sample sounds in the container. The sounds must contain a note name (C,D,E,F,G,A,B, then # or b, then the octave). 23 | 3. Select the **Blend Container**. 24 | 4. Right click and select **Auto MIDI map** or use `auto-midi-map.cmd` as an **External Editor**. 25 | 26 | **Note**: If multiple sounds have the same note, a random container will be created. 27 | 28 | Example of file names: 29 | 30 | ```(sh) 31 | Flute_a#3.wav 32 | Flute_a#4.wav 33 | Flute_c3.wav 34 | Flute_c4.wav 35 | Flute_e3.wav 36 | Flute_e4.wav 37 | Flute_g3.wav 38 | Flute_g4.wav 39 | ``` 40 | -------------------------------------------------------------------------------- /auto-midi-map/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from waapi import WaapiClient, CannotConnectToWaapiException 4 | from pprint import pprint 5 | from collections import defaultdict 6 | import sys, re, math, os, argparse 7 | 8 | # Define arguments for the script 9 | parser = argparse.ArgumentParser(description='Generate text to speech for the specified Wwise object ID.') 10 | parser.add_argument('id', metavar='GUID', nargs='?', help='One guid of the form {01234567-89ab-cdef-0123-4567890abcde}. The script retrieves the current selected if no GUID specified.') 11 | 12 | args = parser.parse_args() 13 | 14 | notes = { 15 | 'c': 0, 16 | 'c#': 1, 17 | 'db': 1, 18 | 'd': 2, 19 | 'd#': 3, 20 | 'eb': 3, 21 | 'e': 4, 22 | 'f': 5, 23 | 'f#': 6, 24 | 'gb': 6, 25 | 'g': 7, 26 | 'g#': 8, 27 | 'ab': 8, 28 | 'a': 9, 29 | 'a#': 10, 30 | 'bb': 10, 31 | 'b': 11 32 | } 33 | 34 | def note_name_to_number(name): 35 | note = 60 36 | match = re.search('(?P[cdefgabCDEFGAB][#b]?)(?P[0-9]+)', name) 37 | if match is not None: 38 | letter = match.group('letter').lower() 39 | octave = int(match.group('octave')) 40 | 41 | # Find the note number 42 | note = notes[letter] 43 | note = note + (octave + 2) * 12 44 | 45 | return note 46 | 47 | try: 48 | # Connecting to Waapi using default URL 49 | with WaapiClient() as client: 50 | 51 | if args.id is None: 52 | selected = client.call("ak.wwise.ui.getSelectedObjects")['objects'] 53 | if len(selected) != 1: 54 | raise Exception('Only works with a single selection') 55 | args.id = selected[0]['id'] 56 | 57 | # Obtain more information for all objects being passed 58 | get_args = { 59 | "from": {"id": [args.id]}, 60 | "transform": [ 61 | {"select": ['children']} 62 | ] 63 | } 64 | options = { 65 | "return": ['id', 'name','type'] 66 | } 67 | sounds = client.call("ak.wwise.core.object.get", get_args, options=options)['return'] 68 | 69 | # Parse the sound names and find the MIDI notes 70 | groups = defaultdict(list) 71 | errors = [] 72 | for child in sounds: 73 | name = child['name'] 74 | 75 | match = re.search('(?P[cdefgabCDEFGAB][#b]?[0-9]+)', name) 76 | if match is None: 77 | errors.append('Could not find a note in ' + name) 78 | else: 79 | note_number = note_name_to_number(match.group('note')) 80 | child['note'] = note_number 81 | groups[note_number].append(child) 82 | 83 | if len(errors) > 0: 84 | raise Exception('\n'.join(errors)) 85 | 86 | # Start the work 87 | client.call("ak.wwise.core.undo.beginGroup") 88 | 89 | children = [] 90 | 91 | # For each group, create a container and move it in 92 | for note, elements in groups.items(): 93 | 94 | if len(elements) == 1: 95 | children.append(elements[0]) 96 | else: 97 | # Find common name for parent 98 | names = list(map(lambda object: object['name'], elements)) 99 | common = os.path.commonprefix(names) 100 | common = common.rstrip('_ -') 101 | 102 | if not common: 103 | common = str(note) 104 | 105 | create_args = { 106 | "parent": selected[0]['id'], 107 | "type": 'RandomSequenceContainer', 108 | "name": common, 109 | "onNameConflict": "rename" 110 | } 111 | 112 | container = client.call("ak.wwise.core.object.create", create_args) 113 | container['note'] = note 114 | 115 | # Move sounds to the new container 116 | for element in elements: 117 | move_args = { 118 | 'object': element['id'], 119 | 'parent': container['id'] 120 | } 121 | client.call("ak.wwise.core.object.move", move_args) 122 | 123 | children.append(container) 124 | 125 | # Try to fill whole between notes & prepare midi settings 126 | children.sort(key=lambda object: object['note']) 127 | 128 | i = 0 129 | for child in children: 130 | min = 0 131 | max = 127 132 | if i != 0: 133 | min = children[i - 1]['@MidiKeyFilterMax'] + 1 134 | if i != len(children) - 1: 135 | max = child['note'] + math.floor((children[i + 1]['note'] - children[i]['note']) / 2) 136 | 137 | child['@MidiKeyFilterMin'] = min 138 | child['@MidiKeyFilterMax'] = max 139 | child['@EnableMidiNoteTracking'] = 1 140 | child['@MidiTrackingRootNote'] = child['note'] 141 | child['@OverrideMidiNoteTracking'] = 1 142 | max = 127 143 | 144 | i += 1 145 | 146 | # Set properties 147 | for child in children: 148 | for key, value in child.items(): 149 | if key.startswith('@'): 150 | set_property_args = { 151 | 'object': child['id'], 152 | 'property': key[1:], 153 | 'value': value 154 | } 155 | client.call("ak.wwise.core.object.setProperty", set_property_args) 156 | 157 | client.call("ak.wwise.core.undo.endGroup", { 'displayName': 'Auto MIDI map'}) 158 | 159 | 160 | except CannotConnectToWaapiException: 161 | print("Could not connect to Waapi: Is Wwise running and Wwise Authoring API enabled?") 162 | 163 | except Exception as e: 164 | print(str(e)) 165 | -------------------------------------------------------------------------------- /auto-midi-map/auto-midi-map.cmd: -------------------------------------------------------------------------------- 1 | call py -3 "%~dp0\" -------------------------------------------------------------------------------- /auto-rename-container/README.md: -------------------------------------------------------------------------------- 1 | # auto-rename-container 2 | 3 | Automatically rename a container based on the name of its children. 4 | 5 | **Attention**: The project only works if all the children of a container start with the same prefix. 6 | 7 | ## Requirements 8 | 9 | * Wwise 2019.2.x+ for using with the **Command Add-ons** (custom menus). 10 | * Wwise 2018.1.11+ for using auto-rename-container.cmd with **External Editors**. 11 | 12 | ## Instructions 13 | 14 | **Note**: Refer to installation instructions in [waapi-python-tools](../README.md). 15 | 16 | 1. Create a container. 17 | 2. Import sample sounds in the container. The sounds must all start with the same prefix. 18 | 3. Select the container. 19 | 4. Right click and select **Auto Rename Container** or run the script manually. 20 | -------------------------------------------------------------------------------- /auto-rename-container/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from waapi import WaapiClient, CannotConnectToWaapiException 4 | import sys, re, os, argparse 5 | 6 | # Define arguments for the script 7 | parser = argparse.ArgumentParser(description='Auto-rename container for the specified Wwise object ID.') 8 | parser.add_argument('id', metavar='GUID', nargs='?', help='One guid of the form {01234567-89ab-cdef-0123-4567890abcde}. The script retrieves the current selected if no GUID specified.') 9 | 10 | args = parser.parse_args() 11 | try: 12 | # Connecting to Waapi using default URL 13 | with WaapiClient() as client: 14 | 15 | if args.id is None: 16 | selected = client.call("ak.wwise.ui.getSelectedObjects")['objects'] 17 | if len(selected) != 1: 18 | raise Exception('Only works with a single selection') 19 | args.id = selected[0]['id'] 20 | 21 | # Obtain more information for all objects being passed 22 | get_args = { 23 | "from": {"id": [args.id]}, 24 | "transform": [ 25 | {"select": ['children']} 26 | ] 27 | } 28 | options = { 29 | "return": ['name'] 30 | } 31 | children = client.call("ak.wwise.core.object.get", get_args, options=options)['return'] 32 | 33 | names = list(map(lambda object: object['name'], children)) 34 | common = os.path.commonprefix(names) 35 | 36 | common = common.rstrip('_ -') 37 | 38 | if not common: 39 | raise Exception('No common prefix found') 40 | 41 | set_name_args = { 42 | "object": args.id, 43 | "value":common 44 | } 45 | client.call("ak.wwise.core.object.setName", set_name_args) 46 | 47 | 48 | except CannotConnectToWaapiException: 49 | print("Could not connect to Waapi: Is Wwise running and Wwise Authoring API enabled?") 50 | 51 | except Exception as e: 52 | print(str(e)) 53 | -------------------------------------------------------------------------------- /auto-rename-container/auto-rename-container.cmd: -------------------------------------------------------------------------------- 1 | call py -3 "%~dp0\" -------------------------------------------------------------------------------- /auto-trim-sources/README.md: -------------------------------------------------------------------------------- 1 | # auto-trim-sources 2 | 3 | Automatically trim sources based on the specified decibel threshold. Optionally add a fade in or fade out. 4 | 5 | ## Overview 6 | ``` 7 | usage: . [-h] [--threshold_begin [THRESHOLD_BEGIN]] [--threshold_end [THRESHOLD_END]] [--no_trim_begin] [--no_trim_end] [--fade_begin [FADE_BEGIN]] [--fade_end [FADE_END]] [GUID ...] 8 | 9 | Automatically trim the sources for specified objects (ID). 10 | 11 | positional arguments: 12 | GUID One or many guid of the form: 13 | "{01234567-89ab-cdef-0123-4567890abcde}". 14 | The script retrieves the current selected if no GUID specified. 15 | optional arguments: 16 | -h, --help show this help message and exit 17 | 18 | --threshold_begin [THRESHOLD_BEGIN] 19 | Threshold in decibels under which the begin is trimmed. (Default:-40) 20 | 21 | --threshold_end [THRESHOLD_END] 22 | Threshold in decibels under which the end is trimmed. (Default:-40) 23 | 24 | --no_trim_begin 25 | Do not trim the begin of the sources 26 | 27 | --no_trim_end 28 | Do not trim the end of the sources 29 | 30 | --fade_begin [FADE_BEGIN] 31 | Fade duration when trimming begin (Default:0) 32 | 33 | --fade_end [FADE_END] 34 | Fade duration when trimming end (Default:0.02) 35 | ``` 36 | 37 | Example: 38 | 39 | `py -3 .\auto-trim-sources\ "{FB573826-0E68-4129-9376-21EC85F3168B}" --no_trim_begin --threshold_end -45` 40 | 41 | 42 | ## Requirements 43 | 44 | * Wwise 2022.1.x+ 45 | * Python 3.6+ 46 | * Python packages: 47 | 48 | `py -3 -m pip install waapi-client` 49 | 50 | `py -3 -m pip install scipy` 51 | 52 | ## Instructions 53 | 54 | **Note**: Refer to installation instructions in [waapi-python-tools](../README.md). 55 | 56 | 1. Select objects from the Actor-Mixer Hierarchy. 57 | 2. Right click and select **Auto Trim Sources**. 58 | 59 | 60 | -------------------------------------------------------------------------------- /auto-trim-sources/__main__.py: -------------------------------------------------------------------------------- 1 | from waapi import WaapiClient, CannotConnectToWaapiException 2 | from pprint import pprint 3 | import argparse 4 | from scipy.io import wavfile 5 | 6 | # Define arguments for the script 7 | parser = argparse.ArgumentParser(description='Automatically trim the sources for specified objects (ID).') 8 | parser.add_argument('ids', metavar='GUID', nargs='*', help='One or many guid of the form {01234567-89ab-cdef-0123-4567890abcde}. The script retrieves the current selected if no GUID specified.') 9 | parser.add_argument('--threshold_begin', const=1, default=-40, type=int, nargs='?', help='Threshold in decibels under which the begin is trimmed.') 10 | parser.add_argument('--threshold_end', const=1, default=-40, type=int, nargs='?', help='Threshold in decibels under which the end is trimmed.') 11 | parser.add_argument('--no_trim_begin', const=1, default=False, type=bool, nargs='?', help='Trim the begin of the sources') 12 | parser.add_argument('--no_trim_end', const=1, default=False, type=bool, nargs='?', help='Trim the end of the sources') 13 | parser.add_argument('--fade_begin', const=1, default=0, type=float, nargs='?', help='Fade duration when trimming begin') 14 | parser.add_argument('--fade_end', const=1, default=0.01, type=float, nargs='?', help='Fade duration when trimming end') 15 | parser.add_argument('--initial_delay', const=1, default=False, type=bool, nargs='?', help='Trimming applied on the begin with be compensated by initial delay') 16 | 17 | args = parser.parse_args() 18 | 19 | # Convert threshold from decibels to linear 20 | threshold_begin = pow( 10, args.threshold_begin * 0.05); 21 | threshold_end = pow( 10, args.threshold_end * 0.05); 22 | 23 | convert_sample_functions = { 24 | "int16" : lambda s : s / 32767, 25 | "int32" : lambda s : s / 2147483647, 26 | "float32" : lambda s : s 27 | } 28 | 29 | def get_convert_sample_function(data): 30 | # return a function converting the raw data to a single float value 31 | base_convert = convert_sample_functions[data.dtype.name] 32 | if len(data.shape) == 1: 33 | return base_convert 34 | return lambda a : base_convert(a.max()) 35 | 36 | try: 37 | 38 | # Connecting to Waapi using default URL 39 | with WaapiClient() as client: 40 | 41 | selected = [] 42 | 43 | # if no ID is passed as argument, use the selected object from the project 44 | if args.ids is None or len(args.ids) == 0: 45 | selected = client.call("ak.wwise.ui.getSelectedObjects")['objects'] 46 | else: 47 | selected = map(lambda id: {"id": id}, args.ids) 48 | 49 | set_args = { 50 | "objects": [] 51 | } 52 | 53 | for obj in selected: 54 | 55 | # Use WAQL to obtain all audio sources under the object 56 | call_args = { "waql": f"$ \"{obj['id']}\" select this, descendants where type = \"AudioFileSource\""} 57 | options = { "return": ["originalWavFilePath", "type", "id", "parent.id"]} 58 | 59 | sources = client.call("ak.wwise.core.object.get", call_args, options=options) 60 | 61 | for source in sources['return']: 62 | 63 | # Open the WAV file 64 | sample_rate, data = wavfile.read(source['originalWavFilePath']) 65 | print(f"Processing {source['originalWavFilePath']}...") 66 | 67 | duration = data.shape[0] / sample_rate 68 | channels = data.shape[1] if len(data.shape) == 2 else 1 69 | num_samples = int(data.size/channels) 70 | trim_end_pos = num_samples-1 71 | trim_begin_pos = 0 72 | 73 | convert_sample = get_convert_sample_function(data) 74 | last_zero_crossing = 0 75 | last_value = 0 76 | 77 | # Look the PCM data, and find a trim begin 78 | for i in range(0, num_samples-1): 79 | value = convert_sample(data[i]) 80 | 81 | # Store zero crossing 82 | if (value > 0 and last_value <= 0) or (value < 0 and last_value >= 0): 83 | last_zero_crossing = i 84 | 85 | # Detect threshold 86 | if abs(value) > threshold_begin: 87 | trim_begin_pos = last_zero_crossing 88 | break; 89 | 90 | last_value = value 91 | 92 | # Find the trim end 93 | last_zero_crossing = num_samples-1 94 | last_value = 0 95 | 96 | for i in range(num_samples-1, trim_begin_pos, -1): 97 | value = convert_sample(data[i]) 98 | 99 | # Store zero crossing 100 | if (value > 0 and last_value <= 0) or (value < 0 and last_value >= 0): 101 | last_zero_crossing = i 102 | 103 | if abs(value) > threshold_end: 104 | trim_end_pos = last_zero_crossing 105 | break; 106 | 107 | last_value = value 108 | 109 | # Set the trim and fade properties on the source object 110 | set_sound = { "object":source['parent.id'] } 111 | set_source = { "object":source['id'] } 112 | 113 | if (not args.no_trim_begin) and trim_begin_pos > 0: 114 | set_source["@TrimBegin"] = trim_begin_pos / sample_rate 115 | 116 | if (not args.no_trim_end) and trim_end_pos < num_samples - 1: 117 | set_source["@TrimEnd"] = trim_end_pos / sample_rate 118 | 119 | if (args.initial_delay): 120 | set_sound["@InitialDelay"] = trim_begin_pos / sample_rate 121 | 122 | set_source["@FadeInDuration"] = args.fade_begin 123 | set_source["@FadeOutDuration"] = args.fade_end 124 | 125 | # Store changes 126 | set_args["objects"].append(set_source) 127 | set_args["objects"].append(set_sound) 128 | 129 | client.call("ak.wwise.core.undo.beginGroup") 130 | client.call("ak.wwise.core.object.set", set_args) 131 | client.call("ak.wwise.core.undo.endGroup", { 'displayName': 'Auto Trim Sources'}) 132 | 133 | 134 | except Exception as e: 135 | print(str(e)) -------------------------------------------------------------------------------- /new-synth-one/__main__.py: -------------------------------------------------------------------------------- 1 | from waapi import WaapiClient, CannotConnectToWaapiException 2 | from pprint import pprint 3 | from random import uniform, randrange, choice 4 | import argparse, traceback 5 | 6 | # Define arguments for the script 7 | parser = argparse.ArgumentParser(description='Automatically creates new Sound SFX objects using the Synth One source plug-in.') 8 | parser.add_argument('id', metavar='GUID', nargs='?', help='One guid of the form {01234567-89ab-cdef-0123-4567890abcde}. The script retrieves the current selected if no GUID specified.') 9 | parser.add_argument('--count', const=1, default=10, type=int, nargs='?', help='Number of instances to generate.') 10 | 11 | args = parser.parse_args() 12 | 13 | effect = [] 14 | 15 | def RandomEffect(): 16 | # return the id of one of the ShareSet effect from the project 17 | return choice(effects)['id'] 18 | 19 | def ADSR(property, a,d,s,r, y_min, y_max, stop): 20 | # Create a RTPC entry with custom ADSR envelope object 21 | return { 22 | "type": "RTPC", 23 | "name": "", 24 | "@Curve": { 25 | "type": "Curve", 26 | "points": [ 27 | { 28 | "x": 0, 29 | "y": y_min, 30 | "shape": "Linear" 31 | }, 32 | { 33 | "x": 1, 34 | "y": y_max, 35 | "shape": "Linear" 36 | } 37 | ] 38 | }, 39 | "@PropertyName": property, 40 | "@ControlInput": { 41 | # Envelope properties 42 | "type":"ModulatorEnvelope", 43 | "name":"ENV", 44 | "@EnvelopeAttackTime": a, 45 | "@EnvelopeAutoRelease": True, 46 | "@EnvelopeStopPlayback": stop, 47 | "@EnvelopeDecayTime": d, 48 | "@EnvelopeReleaseTime": r, 49 | "@EnvelopeSustainTime": s 50 | } 51 | } 52 | 53 | def LFO(property, freq, y_min, y_max): 54 | # Create a RTPC entry with a custom LFO modulator 55 | return { 56 | "type": "RTPC", 57 | "name": "", 58 | "@Curve": { 59 | "type": "Curve", 60 | # "@Flags": 3, 61 | "points": [ 62 | { 63 | "x": 0, 64 | "y": y_min, 65 | "shape": "Linear" 66 | }, 67 | { 68 | "x": 1, 69 | "y": y_max, 70 | "shape": "Linear" 71 | } 72 | ] 73 | }, 74 | "@PropertyName": property, 75 | "@ControlInput": { 76 | # LFO properties 77 | "type":"ModulatorLfo", 78 | "name":"LFO", 79 | "@LfoAttack": 0, 80 | "@LfoDepth": uniform(0,100), 81 | "@LfoFrequency": freq, 82 | "@LfoWaveform": randrange(0,6), 83 | "@LfoPWM": uniform(10,90) 84 | } 85 | } 86 | 87 | def Random(property, y_min, y_max): 88 | # Create a RTPC entry with a custom Random LFO modulator 89 | return { 90 | "type": "RTPC", 91 | "name": "", 92 | "@Curve": { 93 | "type": "Curve", 94 | "points": [ 95 | { 96 | "x": 0, 97 | "y": y_min, 98 | "shape": "Linear" 99 | }, 100 | { 101 | "x": 1, 102 | "y": y_max, 103 | "shape": "Linear" 104 | } 105 | ] 106 | }, 107 | "@PropertyName": property, 108 | "@ControlInput": { 109 | "type":"ModulatorLfo", 110 | "name":"RAND", 111 | "@LfoWaveform": 5, 112 | "@LfoFrequency":0.01 113 | } 114 | } 115 | 116 | def RandomPoints(x_min, x_max, y_min, y_max, count): 117 | # Return an array of random points 118 | points = [ 119 | { 120 | "x": x_min, 121 | "y": uniform(y_min, y_max), 122 | "shape": "Linear" 123 | }, 124 | { 125 | "x": x_max, 126 | "y": uniform(y_min, y_max), 127 | "shape": "Linear" 128 | } 129 | ] 130 | for x in range(0,count-2): 131 | points.insert(1+x, 132 | { 133 | "x": uniform(x_min, x_max), 134 | "y": uniform(y_min, y_max), 135 | "shape": "Linear" 136 | }) 137 | 138 | points.sort(key=lambda p:p["x"]) 139 | return points 140 | 141 | def RandomTimeCurve(property, duration, y_min, y_max, count): 142 | # Return a RTPC entry with random time curve 143 | return { 144 | "type": "RTPC", 145 | "name": "", 146 | "@Curve": { 147 | "type": "Curve", 148 | "points": RandomPoints(0, duration, y_min, y_max, count) 149 | }, 150 | "@PropertyName": property, 151 | "@ControlInput": { 152 | # Time modulator properties 153 | "type":"ModulatorTime", 154 | "name":"TimeMod", 155 | "@TimeModDuration": max(0.1, duration), 156 | "@EnvelopeStopPlayback": False 157 | } 158 | } 159 | 160 | def Modulation(property, duration, y_min, y_max): 161 | # Create a random modulation RTPC entry 162 | 163 | pick = randrange(0,4) 164 | start_ratio = uniform(0,1) 165 | range = (y_max-y_min) 166 | y_min = y_min + range*start_ratio 167 | y_max = y_max - range*(1-start_ratio)*uniform(0,1) 168 | 169 | if pick == 0: 170 | return RandomTimeCurve(property, duration, y_min, y_max, randrange(2,9)) 171 | elif pick == 1: 172 | return Random(property, y_min, y_max) 173 | elif pick == 2: 174 | return LFO(property, uniform(0.01, 30), y_min, y_max) 175 | elif pick == 3: 176 | attack = duration - uniform(0,duration) 177 | release = duration - attack 178 | return ADSR(property, attack, 0, 0, release, y_min, y_max, False) 179 | 180 | def Sound(i, average_duration): 181 | # Return a Sound SFX object with a Synth One source 182 | 183 | attack = uniform(0.01, average_duration/4) 184 | decay = uniform(0.01, average_duration/4) 185 | release = uniform(0.01, average_duration/4) 186 | sustain = average_duration/4 187 | duration = attack + decay + average_duration/4 + release 188 | 189 | return { 190 | "type":"Sound", 191 | "name":"FX" + str(i), 192 | "children":[ 193 | { 194 | "type":"SourcePlugin", 195 | "name":"WSFX", 196 | "classId": 9699330, # synth one: https://www.audiokinetic.com/library/edge/?source=SDK&id=wwiseobject_source_wwise_synth_one.html 197 | "@BaseFrequency": uniform(100, 1000), 198 | "@Osc1Waveform": randrange(0,4), 199 | "@Osc2Waveform": randrange(0,4), 200 | "@NoiseShape": randrange(0,4), 201 | "@NoiseLevel": uniform(-12, 0), 202 | "@RTPC":[ 203 | Modulation("Osc1Transpose", duration, -1200, 1200), 204 | Modulation("Osc2Transpose", duration, -1200, 1200), 205 | Modulation("NoiseLevel", duration, -96, 6), 206 | Modulation("Osc1Pwm", duration, 1, 99), 207 | Modulation("Osc2Pwm", duration, 1, 99), 208 | Modulation("FmAmount", duration, 0, 100), 209 | ], 210 | } 211 | ], 212 | "@Effect0": RandomEffect(), 213 | "@RTPC":[ 214 | ADSR("Volume", attack, decay, sustain, release, -96, 0, True), 215 | Modulation("Lowpass", duration, 0, 20), 216 | Modulation("Highpass", duration, 0, 20), 217 | ], 218 | } 219 | 220 | def Generate(location, num_sounds, start_index): 221 | # Create the sounds using ak.wwise.core.object.set 222 | 223 | set_args = { 224 | "objects": [ 225 | { 226 | "object": location, 227 | "children": list(map( lambda i : Sound(i, uniform(0.4, 1.5)), range(start_index,start_index+num_sounds))) 228 | }, 229 | 230 | ], 231 | "onNameConflict": "rename", 232 | "listMode":"replaceAll" 233 | } 234 | 235 | # Call WAAPI to create the objects 236 | client.call("ak.wwise.core.undo.beginGroup") 237 | client.call("ak.wwise.core.object.set", set_args) 238 | client.call("ak.wwise.core.undo.endGroup", { 'displayName': 'Auto Trim Sources'}) 239 | 240 | try: 241 | 242 | # Connecting to Waapi using default URL 243 | with WaapiClient() as client: 244 | 245 | # Obtain all effects from the project 246 | effects = client.call("ak.wwise.core.object.get", {"waql": '$ from type effect where parent != null and pluginname != "mastering suite"'})["return"] 247 | 248 | selected = [] 249 | 250 | # if no ID is passed as argument, use the selected object from the project 251 | if args.id is None or len(args.id) == 0: 252 | selected = map(lambda s: s["id"], client.call("ak.wwise.ui.getSelectedObjects")['objects']) 253 | else: 254 | selected = [args.id] 255 | 256 | location = "\\Actor-Mixer Hierarchy\\Default Work Unit" 257 | 258 | # Try to find the best location to create new sounds 259 | if len(selected) > 0: 260 | ancestors = client.call("ak.wwise.core.object.get", {"waql": f'$ "{selected[0]}" select this, ancestors where type : "container" or type : "folder" or type : "workunit" and category = "actor-mixer hierarchy"'})["return"] 261 | 262 | if len(ancestors) > 0: 263 | location = ancestors[0]['id'] 264 | 265 | # Count how many FX we already have at the location 266 | siblings = client.call("ak.wwise.core.object.get", {"waql": f'$ "{location}" select children where name = /^FX\\d+/'})["return"] 267 | 268 | Generate(location, args.count, len(siblings)) 269 | 270 | 271 | 272 | except Exception as e: 273 | traceback.print_exc() 274 | print(str(e)) -------------------------------------------------------------------------------- /new-synth-one/readme.md: -------------------------------------------------------------------------------- 1 | # New Synth One 2 | 3 | ## Overview 4 | 5 | WAAPI SFX is a python script that generate Wwise Sound SFX objects in batch. Each Sound SFX is using a Synth One source plug-in, and the objects that are generated make great use of RTPCs and random modulation. 6 | 7 | The script is a demonstration of the [ak.wwise.core.object.set]( 8 | https://www.audiokinetic.com/library/edge/?source=SDK&id=ak_wwise_core_object_set.html) function in WAAPI. 9 | 10 | ## Prerequisites 11 | 12 | * Wwise 2022.1.0 or more recent 13 | - [Python 3.6 or more recent](https://www.python.org/downloads/) 14 | - [waapi-client python library](https://pypi.org/project/waapi-client/) 15 | 16 | ## How to use 17 | 18 | * Download or clone the repository 19 | * Start Wwise and open a project 20 | * Run the script: 21 | `py -3 waapi-sfx.py` 22 | * Play the new sounds in the `\Actor-Mixer Hierarchy\Default Work Unit` 23 | 24 | ## Learn More 25 | 26 | - [WAAPI Reference](https://www.audiokinetic.com/library/edge/?source=SDK&id=waapi_functions_index.html) - Learn the details of each WAAPI functions. 27 | - [ak.wwise.core.object.set]( 28 | https://www.audiokinetic.com/library/edge/?source=SDK&id=ak_wwise_core_object_set.html) - Learn more about ak.wwise.core.object.set. 29 | - [Importing Audio Files and Creating Structures](https://www.audiokinetic.com/library/edge/?source=SDK&id=waapi_import.html) - Learn how to create Wwise structures. 30 | - [Synth One Reference](https://www.audiokinetic.com/library/edge/?source=SDK&id=wwiseobject_source_wwise_synth_one.html) - Learn about Synth One properties. 31 | - [Wwise Object Reference](https://www.audiokinetic.com/library/edge/?source=SDK&id=wobjects_index.html) - Learn about Wwise objects and their properties. -------------------------------------------------------------------------------- /report-language-duration-inconsistency/README.md: -------------------------------------------------------------------------------- 1 | # report-language-duration-inconsistency 2 | 3 | This script is a validation tool designed to work with the currently opened Wwise project. It analyzes Sound Voice objects in the project to determine if their playback durations fall within an acceptable ratio range compared to the reference language. The script is particularly useful for validating multi-language audio projects, ensuring consistency in playback durations across different languages. 4 | 5 | ## Features 6 | * Validate Sound Voice objects against a reference language's playback duration. 7 | * Specify a custom path within the project for validation or scan the entire project. 8 | * Define minimum and maximum acceptable duration ratio thresholds. 9 | * Validate a specific language or all languages in the project. 10 | * Report missing sources or violations of duration thresholds. 11 | 12 | ## Overview 13 | ``` 14 | usage: . [-h] [--language [LANGUAGE]] [--path [PATH]] [--min_threshold [MIN_THRESHOLD]] [--max_threshold [MAX_THRESHOLD]] 15 | 16 | Validate Sound Voice objects against the reference language's playback duration and report missing sources or violations of duration thresholds. 17 | 18 | options: 19 | -h, --help show this help message and exit 20 | --language [LANGUAGE] 21 | Optional. Specify the language to validate (e.g., en, fr). 22 | If omitted, all languages will be validated against the reference language. 23 | --path [PATH] Optional. Specify the path to enumerate Sound Voice objects 24 | If omitted, the entire project is scanned. 25 | --min_threshold [MIN_THRESHOLD] 26 | Optional. Minimum accepted duration ratio as a percentage (default: 60). 27 | --max_threshold [MAX_THRESHOLD] 28 | Optional. Maximum accepted duration ratio as a percentage (default: 140). 29 | ``` 30 | 31 | ## Requirements 32 | 33 | * Wwise 2023.1.0+ 34 | * Python 3.6+ 35 | * Python packages: 36 | 37 | `py -3 -m pip install waapi-client` 38 | 39 | ## Instructions 40 | 41 | **Note**: Refer to installation instructions in [waapi-python-tools](../README.md). 42 | 43 | 1. Open a Wwise project. 44 | 2. Run the script. 45 | 46 | 47 | ## Examples 48 | 49 | Validate all languages in the project against the reference language: 50 | ``` 51 | python validate_audio_duration.py 52 | ``` 53 | 54 | Validate the French language: 55 | ``` 56 | python validate_audio_duration.py --language fr 57 | ``` 58 | 59 | Validate languages under a specific path: 60 | 61 | ``` 62 | python validate_audio_duration.py --path "\Actor-Mixer Hierarchy\Characters" 63 | ``` 64 | 65 | Use custom thresholds for duration ratios: 66 | ``` 67 | python validate_audio_duration.py --min_threshold 80 --max_threshold 120 68 | ``` 69 | -------------------------------------------------------------------------------- /report-language-duration-inconsistency/__main__.py: -------------------------------------------------------------------------------- 1 | from waapi import WaapiClient, CannotConnectToWaapiException 2 | from pprint import pprint 3 | import argparse 4 | 5 | # Define arguments for the script 6 | parser = argparse.ArgumentParser(description="Validate Sound Voice objects against the reference language's playback duration and report missing sources or violations of duration thresholds.") 7 | parser.add_argument('--language', const=1, default=None, type=str, nargs='?', help='Optional. Specify the language to validate (e.g., en, fr). If omitted, all languages will be validated against the reference language.') 8 | parser.add_argument('--path', const=1, default=None, type=str, nargs='?', help='Optional. Specify the path to enumerate Sound Voice objects (e.g., "\Actor-Mixer Hierarchy\Character\Voices"). If omitted, the entire project is scanned.') 9 | parser.add_argument('--min_threshold', const=1, default=60, type=float, nargs='?', help='Optional. Minimum accepted duration ratio as a percentage (default: 60).') 10 | parser.add_argument('--max_threshold', const=1, default=140, type=float, nargs='?', help='Optional. Maximum accepted duration ratio as a percentage (default: 140).') 11 | 12 | args = parser.parse_args() 13 | 14 | # Convert percent to ratio 15 | min_threshold = args.min_threshold / 100; 16 | max_threshold = args.max_threshold / 100; 17 | 18 | path = args.path 19 | 20 | def get_language_name(id, languages): 21 | language = next((language for language in languages if language["id"] == id), None) 22 | return language["name"] 23 | 24 | def get_language_id(name, languages): 25 | language = next((language for language in languages if language["name"] == name), None) 26 | return language["id"] 27 | 28 | if path is None: 29 | path = F"\Actor-Mixer Hierarchy" 30 | 31 | try: 32 | # Connecting to Waapi using default URL 33 | with WaapiClient() as client: 34 | 35 | projectInfo = client.call("ak.wwise.core.getProjectInfo") 36 | 37 | languages = [] 38 | 39 | if args.language is not None: 40 | # Validate language is valid 41 | if not any(language["name"] == args.language for language in projectInfo["languages"]): 42 | raise ValueError("The language is invalid.") 43 | languages.append(get_language_id(args.language, projectInfo["languages"])) 44 | else: 45 | # take all languages from the project 46 | languages = [language["id"] for language in projectInfo["languages"]] 47 | 48 | ref_language_name = get_language_name(projectInfo["referenceLanguageId"], projectInfo["languages"]) 49 | 50 | # Query the voices of the reference language 51 | get_args = { "waql": F"$ \"{path}\" select descendants where nodeType = \"Sound Voice\""} 52 | get_options = { 53 | "return": ["name", "id", "path", "(activeSource != null) as hasSource", "duration.max as duration"], 54 | "language": projectInfo["referenceLanguageId"]} 55 | references_voices = client.call("ak.wwise.core.object.get", get_args, options=get_options)['return'] 56 | 57 | voices_per_id = {voice["id"]: voice for voice in references_voices} 58 | 59 | print(F"Checking {ref_language_name}...") 60 | for ref_voice in references_voices: 61 | if ref_voice["hasSource"] == False: 62 | print(F"{ref_voice['path']}: Reference language '{ref_language_name}' does not have a source.") 63 | 64 | for language in languages: 65 | if language == projectInfo["referenceLanguageId"]: 66 | continue 67 | 68 | language_name = get_language_name(language, projectInfo["languages"]) 69 | print(F"Processing {language_name}...") 70 | 71 | # Use WAQL to obtain all audio sources under the object 72 | get_options["language"] = language 73 | voices = client.call("ak.wwise.core.object.get", get_args, options=get_options)['return'] 74 | 75 | for voice in voices: 76 | ref_voice = voices_per_id.get(voice["id"], None) 77 | if ref_voice is None: 78 | raise KeyError("The language is invalid.") 79 | 80 | if ref_voice["hasSource"] == False: 81 | continue 82 | 83 | if voice["hasSource"] == False: 84 | print(F"{voice['path']}: Language '{language_name}' does not have a source.") 85 | continue 86 | 87 | ratio = voice["duration"] / ref_voice["duration"] 88 | if ratio < min_threshold or ratio > max_threshold: 89 | print(F"{voice['path']}: Voice duration of {voice['duration']} for language '{language_name}' exceeds threshold ratio from reference duration {ref_voice['duration']} with ratio {ratio * 100:.0f}%.") 90 | continue 91 | 92 | 93 | except Exception as e: 94 | print(str(e)) -------------------------------------------------------------------------------- /text-to-speech/__main__.py: -------------------------------------------------------------------------------- 1 | from waapi import WaapiClient, CannotConnectToWaapiException 2 | import argparse, os, subprocess 3 | 4 | # Define arguments for the script 5 | parser = argparse.ArgumentParser(description='Generate text to speech from Wwise object ID') 6 | parser.add_argument('id', metavar='GUID', nargs='+', help='one or multiple guid of the form \{01234567-89ab-cdef-0123-4567890abcde\}') 7 | parser.add_argument('--original', help='path to the original folder',required=True) 8 | 9 | args = parser.parse_args() 10 | 11 | try: 12 | # Connecting to Waapi using default URL 13 | with WaapiClient() as client: 14 | 15 | # Obtain more information for all objects being passed 16 | get_args = { 17 | "from": {"id": args.id}, 18 | } 19 | options = { 20 | "return": ['name', 'notes','type', '@IsVoice', 'path'] 21 | } 22 | get_result = client.call("ak.wwise.core.object.get", get_args, options=options) 23 | 24 | script_dir = os.path.dirname(os.path.realpath(__file__)) 25 | speak_script_path = os.path.join( script_dir, 'speak.ps1') 26 | 27 | imports = [] 28 | 29 | for obj in get_result['return']: 30 | 31 | language_dir = "Voices/English(US)" if obj['@IsVoice'] else "SFX" 32 | wav_file = os.path.join(args.original, language_dir, obj['name']) + '.wav' 33 | 34 | print('Generating {0}...'.format(wav_file)) 35 | 36 | # Execute the powershell script with the Speech Synthesizer from .NET 37 | subprocess.check_output(["powershell.exe", '-executionpolicy', 'bypass', '-File', speak_script_path, wav_file, obj['notes']]) 38 | 39 | imports.append({ 40 | "audioFile": wav_file, 41 | "objectPath": obj['path'] + '\\' + obj['name'], 42 | "importLanguage": "English(US)" if obj['@IsVoice'] else "SFX" 43 | }) 44 | 45 | # Import the generated wav files to Wwise 46 | import_args = { 47 | "importOperation": "useExisting", 48 | "default": {}, 49 | "imports": imports 50 | } 51 | client.call("ak.wwise.core.audio.import",import_args) 52 | 53 | 54 | except CannotConnectToWaapiException: 55 | print("Could not connect to Waapi: Is Wwise running and Wwise Authoring API enabled?") -------------------------------------------------------------------------------- /text-to-speech/readme.md: -------------------------------------------------------------------------------- 1 | # Text To Speech From Wwise 2 | 3 | ## Overview 4 | 5 | This sample demonstrates how to generate WAV files using text-to-speech from Wwise directly. 6 | 7 | Demonstrates: 8 | 9 | - WAAPI [ak.wwise.core.object.get](https://www.audiokinetic.com/library/edge/?source=SDK&id=ak__wwise__core__object__get.html) 10 | - WAAPI [ak.wwise.core.audio.import](https://www.audiokinetic.com/library/edge/?source=SDK&id=ak__wwise__core__audio__import.html) 11 | - Text to Speech using Windows Powershell and SpeechSynthesizer 12 | - Wwise [Command Add-ons](https://www.audiokinetic.com/fr/library/edge/?source=SDK&id=defining_custom_commands.html) 13 | 14 | ## Requirements 15 | 16 | - Wwise 2019.2.x or more recent 17 | - [Python 3.6 or more recent](https://www.python.org/downloads/) 18 | - [waapi-client python library](https://pypi.org/project/waapi-client/) 19 | - Windows 10 and Windows Powershell 20 | 21 | ## Setup 22 | 23 | 1. Install python 3.6 or more recent 24 | 2. Install python dependencies: 25 | 26 | `py -3 -m pip install waapi-client` 27 | 3. Create the `Add-ons` folder under `%APPDATA%\Audiokinetic\Wwise` 28 | 4. Unzip the git repository under: `%APPDATA%\Audiokinetic\Wwise\Add-ons` 29 | 30 | **Note**: ensure the `Commands` and `waapi-text-to-speech` folders are directly located under the `Add-ons` folder. 31 | 32 | 5. Restart Wwise or run the command **Command Add-ons/Reload** 33 | 34 | ## How to use 35 | 36 | 1. Create a **Sound SFX** or **Sound Voice** object in the project. 37 | 2. Type some text in the **Notes** field. 38 | 3. Right-click the object, and select **Generate Text-to-Speech**. 39 | 40 | ## How it works 41 | 42 | The source code is located in [main.py](waapi-text-to-speech/main.py). 43 | 44 | This script is using WAAPI and the Command Add-ons system. It will retrieve the selection from executed command and generate a WAV file for each selected Sound objects using Windows text to speech. The WAV files will be automatically imported in the project with WAAPI. 45 | 46 | Refer to this [blog article](https://blog.audiokinetic.com/waapi-three-open-source-projects-for-wwise-authoring-api/) for more information. -------------------------------------------------------------------------------- /text-to-speech/speak.ps1: -------------------------------------------------------------------------------- 1 | param ( 2 | [Parameter(Mandatory=$true)][string]$path, 3 | [Parameter(Mandatory=$true)][string]$text 4 | ) 5 | 6 | Add-Type -AssemblyName System.Speech; 7 | $synth = New-Object System.Speech.Synthesis.SpeechSynthesizer; 8 | $synth.SetOutputToWaveFile($path); 9 | $synth.Speak($text); 10 | $synth.Dispose(); --------------------------------------------------------------------------------