├── .devcontainer ├── Dockerfile ├── devcontainer.json └── on_create.sh ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── dam_song_tools_cli └── cli.py ├── midi ├── event.py ├── time_converter.py └── utils.py ├── mtf └── saiten_ref_conversion.py ├── okd ├── __init__.py ├── adpcm.py ├── chunks │ ├── __init__.py │ ├── adpcm_chunk.py │ ├── chunk_base.py │ ├── extended_p_track_info_chunk.py │ ├── generic_chunk.py │ ├── m_track_chunk.py │ ├── okd_chunk.py │ ├── p3_track_info_chunk.py │ ├── p_track_chunk.py │ ├── p_track_info_chunk.py │ └── utils.py ├── dump_binary.py ├── m_track_conversion.py ├── mmt_tg │ ├── __init__.py │ ├── midi_parameter_change_table.py │ └── mmt_tg.py ├── oka_file.py ├── okd_file.py ├── okd_file_scramble.py ├── okd_midi.py ├── okd_scramble_pattern.py ├── p_track_conversion.py └── utils.py ├── poetry.lock ├── pyproject.toml ├── sprc └── header.py └── test ├── data └── p_track.mid └── test_okd_midi.py /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM python:3.13-bookworm 4 | 5 | ARG UID=1000 6 | ARG GID=1000 7 | 8 | RUN --mount=type=cache,target=/var/lib/apt/,sharing=locked \ 9 | --mount=type=cache,target=/var/cache/apt/,sharing=locked \ 10 | apt-get update && apt-get install -y --no-install-recommends \ 11 | # For development 12 | sudo 13 | 14 | RUN groupadd -g $GID python \ 15 | && useradd -m -s /bin/bash -u $UID -g $GID python \ 16 | && echo 'python ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 17 | 18 | USER python 19 | 20 | # Install Poetry 21 | RUN curl -sSL https://install.python-poetry.org | python3 - 22 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dam-song-tools", 3 | "build": { 4 | "dockerfile": "Dockerfile" 5 | }, 6 | "workspaceFolder": "/workspace/", 7 | "remoteUser": "python", 8 | "mounts": [ 9 | "source=${localWorkspaceFolder},target=/workspace/,type=bind,consistency=cached" 10 | ], 11 | "onCreateCommand": ".devcontainer/on_create.sh", 12 | "customizations": { 13 | "vscode": { 14 | "extensions": [ 15 | "ms-azuretools.vscode-docker", 16 | "esbenp.prettier-vscode", 17 | "ms-python.python", 18 | "ms-python.black-formatter", 19 | "njpwerner.autodocstring" 20 | ] 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /.devcontainer/on_create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export POETRY_VIRTUALENVS_IN_PROJECT=1 4 | poetry install 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM python:3.13-slim 4 | 5 | ARG UID=1000 6 | ARG GID=1000 7 | 8 | RUN --mount=type=cache,target=/var/lib/apt/,sharing=locked \ 9 | --mount=type=cache,target=/var/cache/apt/,sharing=locked \ 10 | apt-get update && apt-get install -y --no-install-recommends \ 11 | curl 12 | 13 | RUN groupadd -g $GID python \ 14 | && useradd -m -s /bin/bash -u $UID -g $GID python 15 | 16 | USER python 17 | 18 | # Install Poetry 19 | RUN curl -sSL https://install.python-poetry.org | python - 20 | ENV PATH=/home/python/.local/bin:$PATH 21 | 22 | WORKDIR /app 23 | 24 | COPY pyproject.toml poetry.lock README.md /app/ 25 | COPY okd/ okd/ 26 | COPY sprc_header/ sprc_header/ 27 | COPY dam_song_tools_cli/ dam_song_tools_cli/ 28 | 29 | RUN poetry install 30 | 31 | ENTRYPOINT [ "poetry", "run", "dam-song-tools" ] 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024-2025 soltia48 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # dam-song-tools 2 | 3 | Tools for DAM Karaoke Song data 4 | 5 | ## !! Important notes !! 6 | 7 | This software is developed for technical research on DAM Karaoke machines. 8 | 9 | The Karaoke song data normally recorded on DAM Karaoke machines is protected by copyright. You must handle it in accordance with your local laws and regulations. 10 | 11 | ## [Demonstration video](https://twitter.com/soltia48/status/1620095004374093824) 12 | 13 | In this video, a song not normally included in DAM Karaoke machines, "This is an Attack," is played and scored on that machine. 14 | 15 | ## Summary 16 | 17 | This software reads and writes DAM Karaoke machines compatible karaOKe Data (OKD) file. 18 | 19 | ## Usage 20 | 21 | ### dump-okd 22 | 23 | Dump chunks of a OKD 24 | 25 | ``` 26 | $ dam-song-tools dump-okd --help 27 | NAME 28 | dam-song-tools dump-okd - Dump chunks of a OKD 29 | 30 | SYNOPSIS 31 | dam-song-tools dump-okd OKD_PATH OUTPUT_DIR_PATH 32 | 33 | DESCRIPTION 34 | Dump OKD 35 | 36 | POSITIONAL ARGUMENTS 37 | OKD_PATH 38 | Input OKD path 39 | OUTPUT_DIR_PATH 40 | Output directory path 41 | 42 | NOTES 43 | You can also use flags syntax for POSITIONAL ARGUMENTS 44 | ``` 45 | 46 | ### pack-okd 47 | 48 | Pack a OKD by directly inputting a required data in each chunk 49 | 50 | ``` 51 | $ dam-song-tools pack-okd --help 52 | NAME 53 | dam-song-tools pack-okd - Pack a OKD by directly inputting a required data in each chunk 54 | 55 | SYNOPSIS 56 | dam-song-tools pack-okd OKD_PATH [CHUNK_PATHS]... 57 | 58 | DESCRIPTION 59 | Pack OKD 60 | 61 | POSITIONAL ARGUMENTS 62 | OKD_PATH 63 | Output OKD path 64 | CHUNK_PATHS 65 | Input chunk paths 66 | 67 | FLAGS 68 | -s, --scramble=SCRAMBLE 69 | Default: False 70 | Scramble. Defaults to False. 71 | 72 | NOTES 73 | You can also use flags syntax for POSITIONAL ARGUMENTS 74 | ``` 75 | 76 | ### okd-to-midi 77 | 78 | Convert a OKD to a Standard MIDI File 79 | 80 | ``` 81 | $ dam-song-tools okd-to-midi --help 82 | NAME 83 | dam-song-tools okd-to-midi - Convert a OKD to a Standard MIDI File 84 | 85 | SYNOPSIS 86 | dam-song-tools okd-to-midi OKD_PATH MIDI_PATH 87 | 88 | DESCRIPTION 89 | Convert a OKD to a Standard MIDI File 90 | 91 | POSITIONAL ARGUMENTS 92 | OKD_PATH 93 | Input OKD path 94 | MIDI_PATH 95 | Output MIDI path 96 | 97 | FLAGS 98 | -s, --sysex_to_text=SYSEX_TO_TEXT 99 | Default: True 100 | Convert SysEx Messages to Text Meta Messages 101 | 102 | NOTES 103 | You can also use flags syntax for POSITIONAL ARGUMENTS 104 | ``` 105 | 106 | ### midi-to-okd 107 | 108 | Convert a Standard MIDI File to a OKD 109 | 110 | ``` 111 | $ dam-song-tools midi-to-okd --help 112 | NAME 113 | dam-song-tools midi-to-okd - Convert a Standard MIDI File to a OKD 114 | 115 | SYNOPSIS 116 | dam-song-tools midi-to-okd MIDI_PATH PLAYING_OKD_PATH P3_OKD_PATH 117 | 118 | DESCRIPTION 119 | Convert a Standard MIDI File to a OKD 120 | 121 | POSITIONAL ARGUMENTS 122 | MIDI_PATH 123 | Type: str 124 | Input MIDI file path 125 | PLAYING_OKD_PATH 126 | Type: str 127 | Output Playing OKD path 128 | P3_OKD_PATH 129 | Type: str 130 | Output P3 OKD path 131 | 132 | FLAGS 133 | -s, --scramble=SCRAMBLE 134 | Default: False 135 | Scramble. Defaults to False. 136 | 137 | NOTES 138 | You can also use flags syntax for POSITIONAL ARGUMENTS 139 | ``` 140 | 141 | ## How to craete MIDI data for compose 142 | 143 | ### MIDI port and track map 144 | 145 | - Port 0, Track 0-15: Instrument 146 | - Port 1, Track 0-7,9-15: Instrument 147 | - Port 1, Track 8: Guide melody 148 | - Port 15, Track 0: M-Track 149 | 150 | ### P-Track 151 | 152 | P(laying)-Track is performance data of a song. 153 | 154 | ### M-Track 155 | 156 | M(arking)-Track includes list of hook section, two-chorus fadeout position and others. 157 | The note map in MIDI for compose is as follows. 158 | 159 | - Hook section: C3 160 | - Two-chorus fadeout position: C5 (Note on alone is sufficient) 161 | 162 | Please check [the test data](test/data/p_track.mid). 163 | 164 | ## List of verified DAM Karaoke machine 165 | 166 | - DAM-XG5000[G,R] (LIVE DAM [(GOLD EDITION|RED TUNE)]) 167 | - DAM-XG7000[Ⅱ] (LIVE DAM STADIUM [STAGE]) 168 | - DAM-XG8000[R] (LIVE DAM Ai[R]) 169 | 170 | ## Authors 171 | 172 | - KIRISHIKI Yudai 173 | 174 | ## Thanks 175 | 176 | - [Nurupo](https://github.com/gta191977649) - Author of the MIDI file ["This is an Attack"](https://github.com/gta191977649/midi_godekisenda) from which [the test data](test/data/p_track.mid) was derived 177 | 178 | ## License 179 | 180 | [MIT](https://opensource.org/licenses/MIT) 181 | 182 | Copyright (c) 2024-2025 KIRISHIKI Yudai 183 | -------------------------------------------------------------------------------- /dam_song_tools_cli/cli.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import fire 3 | import logging 4 | import mido 5 | import numpy as np 6 | import os 7 | import json 8 | import soundfile as sf 9 | from typing import Any 10 | 11 | from okd import ( 12 | YksOkdHeader, 13 | OkdFile, 14 | GenericChunk, 15 | MTrackInterpretation, 16 | MTrackChunk, 17 | PTrackInfoChunk, 18 | ExtendedPTrackInfoChunk, 19 | P3TrackInfoChunk, 20 | PTrackChunk, 21 | AdpcmChunk, 22 | okd_to_midi, 23 | midi_to_okds, 24 | ) 25 | 26 | 27 | def default(item: Any): 28 | match item: 29 | case bytes(): 30 | return item.hex(" ").upper() 31 | case _ if dataclasses.is_dataclass(item): 32 | return dataclasses.asdict(item) 33 | case _: 34 | raise TypeError(type(item)) 35 | 36 | 37 | class Cli: 38 | """DAM OKD Tools CLI 39 | 40 | Args: 41 | log_level (str, optional): Log level. Defaults to "INFO". {CRITICAL|FATAL|ERROR|WARN|WARNING|INFO|DEBUG|NOTSET} 42 | """ 43 | 44 | @staticmethod 45 | def __config_logger(level: str) -> None: 46 | """Config logger 47 | 48 | Args: 49 | level (str): Log level 50 | """ 51 | 52 | logging.basicConfig( 53 | level=level, 54 | format="[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s", 55 | ) 56 | 57 | def __init__(self, log_level="INFO"): 58 | """DAM OKD Tools CLI 59 | 60 | Args: 61 | log_level (str, optional): Log level. Defaults to "INFO". {CRITICAL|FATAL|ERROR|WARN|WARNING|INFO|DEBUG|NOTSET} 62 | """ 63 | 64 | Cli.__config_logger(log_level) 65 | self.__logger = logging.getLogger(__name__) 66 | 67 | def dump_okd(self, okd_path, output_dir_path) -> None: 68 | """Dump chunks of a OKD 69 | 70 | Args: 71 | okd_path (str): Input OKD path 72 | output_dir_path (str): Output directory path 73 | 74 | Raises: 75 | ValueError: Argument `okd_path` must be str. 76 | ValueError: Argument `output_directory_path` must be str. 77 | """ 78 | 79 | if not isinstance(okd_path, str): 80 | raise ValueError("Argument `okd_path` must be str.") 81 | if not isinstance(output_dir_path, str): 82 | raise ValueError("Argument `output_directory_path` must be str.") 83 | 84 | os.makedirs(output_dir_path, exist_ok=True) 85 | 86 | with open(okd_path, "rb") as okd_file: 87 | okd_file = OkdFile.read(okd_file) 88 | self.__logger.info(f"OKD loaded. header={okd_file.header}") 89 | 90 | for chunk in okd_file.chunks: 91 | chunk_id_hex = chunk.id.hex().upper() 92 | self.__logger.info( 93 | f"{type(chunk).__name__} found. id={chunk.id} (0x{chunk_id_hex})" 94 | ) 95 | if isinstance(chunk, GenericChunk): 96 | output_path = os.path.join( 97 | output_dir_path, 98 | "chunk_0x" + chunk.id.hex().upper() + ".bin", 99 | ) 100 | with open(output_path, "wb") as output_file: 101 | output_file.write(chunk.id) 102 | output_file.write(chunk.payload) 103 | elif isinstance(chunk, MTrackChunk): 104 | m_track_interpritation = MTrackInterpretation.from_track(chunk) 105 | 106 | track_number = chunk.id[3] 107 | output_path = os.path.join( 108 | output_dir_path, "m_track_" + str(track_number) + ".json" 109 | ) 110 | output_json = json.dumps( 111 | chunk.to_json_serializable(), 112 | indent=2, 113 | ) 114 | with open(output_path, "w") as output_file: 115 | output_file.write(output_json) 116 | 117 | output_path = os.path.join( 118 | output_dir_path, 119 | "m_track_interpretation_" + str(track_number) + ".json", 120 | ) 121 | output_json = json.dumps( 122 | m_track_interpritation, 123 | indent=2, 124 | default=default, 125 | ) 126 | with open(output_path, "w") as output_file: 127 | output_file.write(output_json) 128 | elif isinstance(chunk, PTrackInfoChunk): 129 | output_path = os.path.join(output_dir_path, "p_track_info.json") 130 | output_json = json.dumps( 131 | chunk, 132 | indent=2, 133 | default=default, 134 | ) 135 | with open(output_path, "w") as output_file: 136 | output_file.write(output_json) 137 | elif isinstance(chunk, ExtendedPTrackInfoChunk): 138 | output_path = os.path.join( 139 | output_dir_path, "extended_p_track_info.json" 140 | ) 141 | output_json = json.dumps( 142 | chunk, 143 | indent=2, 144 | default=default, 145 | ) 146 | with open(output_path, "w") as output_file: 147 | output_file.write(output_json) 148 | elif isinstance(chunk, P3TrackInfoChunk): 149 | output_path = os.path.join(output_dir_path, "p3_track_info.json") 150 | output_json = json.dumps( 151 | chunk, 152 | indent=2, 153 | default=default, 154 | ) 155 | with open(output_path, "w") as output_file: 156 | output_file.write(output_json) 157 | elif isinstance(chunk, PTrackChunk): 158 | track_number = chunk.id[3] 159 | output_path = os.path.join( 160 | output_dir_path, "p_track_" + str(track_number) + ".json" 161 | ) 162 | output_json = json.dumps( 163 | chunk.to_json_serializable(), 164 | indent=2, 165 | ) 166 | with open(output_path, "w") as output_file: 167 | output_file.write(output_json) 168 | elif isinstance(chunk, AdpcmChunk): 169 | for i, track in enumerate(chunk.tracks): 170 | output_path = os.path.join(output_dir_path, f"adpcm_{i}.wav") 171 | samples = track.decode() 172 | samples = np.array(samples, "int16") 173 | sf.write(output_path, samples, 22050) 174 | else: 175 | self.__logger.error("Unknown chunk type detected.") 176 | 177 | def pack_okd(self, okd_path, *chunk_paths, scramble=False): 178 | """Pack a OKD by directly inputting the required data in chunks 179 | 180 | Args: 181 | okd_path (str): Output OKD path 182 | chunk_paths (*str): Input chunk paths 183 | scramble (bool, optional): Scramble. Defaults to False. 184 | 185 | Raises: 186 | ValueError: Argument `okd_path` must be str. 187 | ValueError: Argument `chunk_paths` must be *str. 188 | ValueError: Argument `scramble` must be bool. 189 | """ 190 | 191 | if not isinstance(okd_path, str): 192 | raise ValueError("Argument `output` must be str.") 193 | if not isinstance(scramble, bool): 194 | raise ValueError("Argument `scramble` must be bool.") 195 | 196 | for chunk_path in chunk_paths: 197 | if not isinstance(chunk_path, str): 198 | raise ValueError("Argument `input` must be *str.") 199 | 200 | chunks: list[GenericChunk] = [] 201 | with open(chunk_path, "rb") as input_file: 202 | chunk = GenericChunk.read(input_file) 203 | chunk_id_hex = chunk.id.hex().upper() 204 | self.__logger.info(f"Add chunk {id} (0x{chunk_id_hex}).") 205 | chunks.append(chunk) 206 | 207 | header = YksOkdHeader(0, "YKS-1 v6.0v110", 0, 0, 0) 208 | self.__logger.info(f"Set header. header={header}") 209 | okd = OkdFile(header, chunks) 210 | with open(okd_path, "wb") as output_file: 211 | okd.write(output_file, scramble) 212 | 213 | def okd_to_midi(self, okd_path, midi_path, sysex_to_text=True) -> None: 214 | """Convert a OKD to a Standard MIDI File 215 | 216 | Args: 217 | okd_path (str): Input OKD path 218 | midi_path (str): Output MIDI path 219 | sysex_to_text (bool): Convert SysEx Messages to Text Meta Messages 220 | 221 | Raises: 222 | ValueError: Argument `okd_path` must be str. 223 | ValueError: Argument `midi_path` must be str. 224 | """ 225 | 226 | if not isinstance(okd_path, str): 227 | raise ValueError("Argument `okd_path` must be str.") 228 | if not isinstance(midi_path, str): 229 | raise ValueError("Argument `midi_path` must be str.") 230 | if not isinstance(sysex_to_text, bool): 231 | raise ValueError("Argument `sysex_to_text` must be bool.") 232 | 233 | with open(okd_path, "rb") as okd_file: 234 | okd = OkdFile.read(okd_file) 235 | midi = okd_to_midi(okd, sysex_to_text) 236 | midi.save(midi_path) 237 | 238 | def midi_to_okd( 239 | self, midi_path: str, playing_okd_path: str, p3_okd_path: str, scramble=False 240 | ) -> None: 241 | """Convert a Standard MIDI File to a OKD 242 | 243 | Args: 244 | midi_path (str): Input MIDI file path 245 | playing_okd_path (str): Output Playing OKD path 246 | p3_okd_path (str): Output P3 OKD path 247 | scramble (bool, optional): Scramble. Defaults to False. 248 | 249 | Raises: 250 | ValueError: Argument `midi_path` must be str. 251 | ValueError: Argument `playing_okd_path` must be str. 252 | ValueError: Argument `p3_okd_path` must be str. 253 | ValueError: Argument `scramble` must be bool. 254 | """ 255 | 256 | if not isinstance(midi_path, str): 257 | raise ValueError("Argument `midi_path` must be str.") 258 | if not isinstance(playing_okd_path, str): 259 | raise ValueError("Argument `playing_okd_path` must be str.") 260 | if not isinstance(p3_okd_path, str): 261 | raise ValueError("Argument `p3_okd_path` must be str.") 262 | if not isinstance(scramble, bool): 263 | raise ValueError("Argument `scramble` must be bool.") 264 | 265 | midi = mido.MidiFile(midi_path) 266 | header = YksOkdHeader(0, "YKS-1 v6.0v110", 0, 0, 0) 267 | self.__logger.info(f"Set header. header={header}") 268 | playing_okd, p3_okd = midi_to_okds(midi, header) 269 | with open(playing_okd_path, "wb") as playing_okd_file: 270 | self.__logger.info("Write Playing OKD.") 271 | playing_okd.write(playing_okd_file, scramble) 272 | with open(p3_okd_path, "wb") as p3_okd_file: 273 | self.__logger.info("Write P3 OKD.") 274 | p3_okd.write(p3_okd_file, scramble) 275 | 276 | 277 | def main() -> None: 278 | fire.Fire(Cli) 279 | 280 | 281 | if __name__ == "__main__": 282 | main() 283 | -------------------------------------------------------------------------------- /midi/event.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BufferedWriter 3 | 4 | 5 | @dataclass 6 | class MidiEvent: 7 | """MIDI Event""" 8 | 9 | status_byte: int 10 | data_bytes: bytes 11 | 12 | def status_byte_type(self) -> int: 13 | """Get Status Byte type 14 | 15 | Returns: 16 | int: Status Byte type 17 | """ 18 | return self.status_byte & 0xF0 19 | 20 | def channel(self) -> int: 21 | """Get channel 22 | 23 | Returns: 24 | int: Channel 25 | """ 26 | return self.status_byte & 0x0F 27 | 28 | def write(self, stream: BufferedWriter) -> None: 29 | """Write 30 | 31 | Args: 32 | stream (BufferedReader): Output stream 33 | """ 34 | stream.write(self.status_byte.to_bytes()) 35 | stream.write(self.data_bytes) 36 | 37 | def to_bytes(self) -> bytes: 38 | """To bytes 39 | 40 | Returns: 41 | bytes: This instance as bytes 42 | """ 43 | return self.status_byte.to_bytes() + self.data_bytes 44 | 45 | 46 | @dataclass 47 | class MidiTrackEvent(MidiEvent): 48 | """MIDI Track Event""" 49 | 50 | delta_time: int 51 | -------------------------------------------------------------------------------- /midi/time_converter.py: -------------------------------------------------------------------------------- 1 | import mido 2 | 3 | 4 | class MidiTimeConverter: 5 | """MIDI time converter""" 6 | 7 | def __init__(self): 8 | self.ppqn = 480 9 | # Tempo changes (position_ms, tempo_bpm) 10 | self.tempo_changes: list[tuple[int, float]] = [] 11 | 12 | def add_tempo_change(self, position_ms: int, tempo_bpm: float): 13 | """Add a tempo change event at the specified position.""" 14 | self.tempo_changes.append((position_ms, tempo_bpm)) 15 | # Keep tempo changes sorted by position 16 | self.tempo_changes.sort(key=lambda x: x[0]) 17 | 18 | def load_from_midi(self, midi: mido.MidiFile): 19 | """Load tempo changes from a MIDI file 20 | 21 | Parameters: 22 | midi (MidiFile): MIDI file 23 | """ 24 | self.ppqn: int = midi.ticks_per_beat 25 | 26 | current_time_ticks = 0 27 | current_time_ms = 0.0 28 | current_tempo = 500000 # 120 BPM 29 | 30 | # Clear existing tempo changes 31 | self.tempo_changes = [(0, mido.tempo2bpm(current_tempo))] 32 | 33 | # Find the first track with tempo changes 34 | tempo_track = None 35 | for track in midi.tracks: 36 | if any(message.type == "set_tempo" for message in track): 37 | tempo_track = track 38 | break 39 | 40 | if tempo_track: 41 | for message in tempo_track: 42 | current_time_ticks += message.time 43 | 44 | # Convert current position to milliseconds 45 | if message.time > 0: 46 | ms_per_tick = current_tempo / (self.ppqn * 1000) 47 | current_time_ms += message.time * ms_per_tick 48 | 49 | if message.type == "set_tempo": 50 | current_tempo = message.tempo 51 | self.add_tempo_change( 52 | round(current_time_ms), mido.tempo2bpm(current_tempo) 53 | ) 54 | 55 | def ms_to_ticks(self, time_ms: int) -> int: 56 | """Convert milliseconds to MIDI ticks""" 57 | if not self.tempo_changes: 58 | raise ValueError("No tempo information available") 59 | 60 | total_ticks = 0.0 61 | 62 | # Handle time before first tempo change 63 | if time_ms < self.tempo_changes[0][0]: 64 | return self._calculate_ticks_at_tempo(time_ms, self.tempo_changes[0][1]) 65 | 66 | # Process each tempo section 67 | for i in range(len(self.tempo_changes)): 68 | current_tempo = self.tempo_changes[i][1] 69 | 70 | # Calculate end of current tempo section 71 | section_end = ( 72 | self.tempo_changes[i + 1][0] 73 | if i < len(self.tempo_changes) - 1 74 | else time_ms 75 | ) 76 | section_end = min(section_end, time_ms) 77 | 78 | # Calculate ticks for this section 79 | section_duration = section_end - self.tempo_changes[i][0] 80 | if section_duration > 0: 81 | total_ticks += self._calculate_ticks_at_tempo( 82 | section_duration, current_tempo 83 | ) 84 | 85 | if section_end == time_ms: 86 | break 87 | 88 | return round(total_ticks) 89 | 90 | def _calculate_ticks_at_tempo(self, duration_ms, tempo_bpm) -> float: 91 | """Calculate ticks for a duration at a constant tempo.""" 92 | microseconds_per_beat = 60_000_000 / tempo_bpm 93 | microseconds = duration_ms * 1000 94 | return (microseconds / microseconds_per_beat) * self.ppqn 95 | 96 | def ticks_to_ms(self, ticks: int): 97 | """Convert MIDI ticks to milliseconds""" 98 | if not self.tempo_changes: 99 | raise ValueError("No tempo information available") 100 | 101 | remaining_ticks = ticks 102 | current_time = 0 103 | 104 | for i in range(len(self.tempo_changes)): 105 | current_tempo = self.tempo_changes[i][1] 106 | 107 | # Calculate how many ticks until next tempo change 108 | if i < len(self.tempo_changes) - 1: 109 | section_duration = ( 110 | self.tempo_changes[i + 1][0] - self.tempo_changes[i][0] 111 | ) 112 | section_ticks = self._calculate_ticks_at_tempo( 113 | section_duration, current_tempo 114 | ) 115 | else: 116 | section_ticks = remaining_ticks 117 | 118 | if remaining_ticks <= section_ticks: 119 | # Convert remaining ticks to ms at current tempo 120 | microseconds_per_beat = 60_000_000 / current_tempo 121 | ms = (remaining_ticks * microseconds_per_beat) / (self.ppqn * 1000) 122 | return round(current_time + ms) 123 | 124 | remaining_ticks -= section_ticks 125 | current_time = self.tempo_changes[i + 1][0] 126 | 127 | return round(current_time) 128 | -------------------------------------------------------------------------------- /midi/utils.py: -------------------------------------------------------------------------------- 1 | import mido 2 | 3 | 4 | def is_meta_track(track: mido.MidiTrack) -> bool: 5 | """Check if a MIDI track contains any meta messages. 6 | 7 | Args: 8 | track (mido.MidiTrack): MIDI track to check 9 | 10 | Returns: 11 | bool: True if track contains at least one meta message, False otherwise 12 | """ 13 | return any(isinstance(message, mido.MetaMessage) for message in track) 14 | 15 | 16 | def get_meta_track(tracks: list[mido.MidiTrack]) -> mido.MidiTrack | None: 17 | """Find and return the first meta track from a list of MIDI tracks. 18 | 19 | Args: 20 | tracks: List of MIDI tracks to search through. 21 | 22 | Returns: 23 | mido.MidiTrack | None: The first meta track found, or None if no meta track exists. 24 | """ 25 | return next( 26 | (track for track in tracks if is_meta_track(track)), 27 | None, 28 | ) 29 | 30 | 31 | def get_track_port(track: mido.MidiTrack) -> int | None: 32 | """Retrieves the MIDI port number from a MIDI track. 33 | 34 | Args: 35 | track (mido.MidiTrack): A MIDI track object to extract port information from. 36 | 37 | Returns: 38 | int | None: The port number if a midi_port message exists, None otherwise. 39 | """ 40 | return next( 41 | (message.port for message in track if message.type == "midi_port"), None 42 | ) 43 | 44 | 45 | def get_track_by_port_channel( 46 | tracks: list[mido.MidiTrack], port: int, channel: int 47 | ) -> mido.MidiTrack | None: 48 | """Find the first MIDI track that matches the specified port and channel numbers. 49 | 50 | Args: 51 | tracks (list[mido.MidiTrack]): List of MIDI tracks to search through 52 | port (int): Target MIDI port number 53 | channel (int): Target MIDI channel number 54 | 55 | Returns: 56 | mido.MidiTrack | None: The first matching MIDI track, or None if no match is found 57 | """ 58 | for track in tracks: 59 | has_matching_port = False 60 | has_matching_channel = False 61 | 62 | has_matching_port = any( 63 | message.type == "midi_port" and message.port == port for message in track 64 | ) 65 | 66 | if has_matching_port: 67 | has_matching_channel = any( 68 | message.type == "note_on" and message.channel == channel 69 | for message in track 70 | ) 71 | 72 | if has_matching_channel: 73 | return track 74 | 75 | 76 | def get_first_and_last_note_times(tracks: list[mido.MidiTrack]): 77 | """Get the absolute time of the first and last notes in a MIDI tracks 78 | 79 | Args: 80 | tracks (list[mido.MidiTrack]): MIDI tracks 81 | 82 | Returns: 83 | tuple: (first_note_time, last_note_time) in seconds. Returns (None, None) if no notes are found 84 | """ 85 | first_note_time = 0xFFFFFFFF 86 | last_note_time = 0 87 | for track in tracks: 88 | absolute_time = 0 89 | for message in track: 90 | absolute_time += message.time 91 | 92 | if message.type == "note_on": 93 | if absolute_time < first_note_time: 94 | first_note_time = absolute_time 95 | if message.type == "note_off": 96 | if absolute_time > last_note_time: 97 | last_note_time = absolute_time 98 | 99 | return first_note_time, last_note_time 100 | 101 | 102 | def get_time_signatures(tracks: list[mido.MidiTrack]) -> list[tuple[int, int, int]]: 103 | """Get time signatures from MIDI tracks 104 | 105 | Args: 106 | tracks: MIDI tracks 107 | 108 | Returns: 109 | list[tuple[int, int, int]]: List of (tick, numerator, denominator) 110 | """ 111 | time_signatures: list[tuple[int, int, int]] = [] 112 | for track in tracks: 113 | absolute_tick = 0 114 | for message in track: 115 | absolute_tick += message.time 116 | if message.type == "time_signature": 117 | time_signatures.append( 118 | (absolute_tick, message.numerator, message.denominator) 119 | ) 120 | return sorted(time_signatures, key=lambda x: x[0]) 121 | -------------------------------------------------------------------------------- /mtf/saiten_ref_conversion.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from enum import Enum 3 | 4 | import mido 5 | 6 | from midi.time_converter import MidiTimeConverter 7 | from midi.utils import ( 8 | get_track_by_port_channel, 9 | get_first_and_last_note_times, 10 | get_time_signatures, 11 | ) 12 | 13 | 14 | class SaitenRefEventType(Enum): 15 | OLD_HAMORUN_OFF_1 = 0x8B 16 | OLD_HAMORUN_ON_1 = 0x9B 17 | OLDHAMORUN_OFF_2 = 0x8C 18 | OLD_HAMORUN_ON_2 = 0x9C 19 | HAMORUN_OFF = 0x8D 20 | HAMORUN_ON = 0x9D 21 | NOTE_OFF = 0x8E 22 | NOTE_ON = 0x9E 23 | PLAY_MARK = 0xFF 24 | 25 | 26 | class PlayMarkType(Enum): 27 | START_OF_SONG = 0x00 28 | END_OF_SONG = 0x01 29 | BEATMARK_ACCENT = 0x02 30 | BEATMARK_NOT_ACCENT = 0x03 31 | START_OF_VOCAL = 0x04 32 | START_OF_BRIDGE = 0x05 33 | START_OF_ENDING = 0x06 34 | START_OF_LYRICS_PAGE = 0x07 35 | START_OF_SABI = 0x08 36 | END_OF_SABI = 0x09 37 | START_OF_CLIMAX = 0x0A 38 | END_OF_CLIMAX = 0x0B 39 | SECOND_CHORUS_FADEOUT = 0x0C 40 | NOT_PLAY_MARK = 0x7F 41 | 42 | 43 | @dataclass 44 | class SaitenRefEvent: 45 | time: int 46 | event_type: SaitenRefEventType 47 | note_number: int 48 | value: int 49 | 50 | def to_dict(self): 51 | return { 52 | "Clock": self.time, 53 | "msg": [self.event_type.value, self.note_number, self.value], 54 | } 55 | 56 | 57 | MIDI_M_TRACK_PORT = 16 58 | 59 | 60 | def midi_to_saiten_ref(midi: mido.MidiFile) -> list[SaitenRefEvent]: 61 | absolute_time_track: list[SaitenRefEvent] = [] 62 | 63 | midi_time_converter = MidiTimeConverter() 64 | midi_time_converter.load_from_midi(midi) 65 | 66 | melody_track = get_track_by_port_channel(midi.tracks, 1, 8) 67 | if melody_track is None: 68 | raise ValueError("Melody track not found.") 69 | 70 | melody_notes: list[tuple[int, int]] = [] 71 | track_time = 0 72 | for midi_message in melody_track: 73 | track_time += midi_message.time 74 | absolute_time = midi_time_converter.ticks_to_ms(track_time) 75 | 76 | if not isinstance(midi_message, mido.Message): 77 | continue 78 | 79 | if midi_message.type == "note_on": # type: ignore 80 | absolute_time_track.append( 81 | SaitenRefEvent( 82 | absolute_time, SaitenRefEventType.NOTE_ON, midi_message.note, 100 # type: ignore 83 | ) 84 | ) 85 | elif midi_message.type == "note_off": # type: ignore 86 | absolute_time_track.append( 87 | SaitenRefEvent( 88 | absolute_time, SaitenRefEventType.NOTE_OFF, midi_message.note, 100 # type: ignore 89 | ) 90 | ) 91 | 92 | if midi_message.type == "note_on": # type: ignore 93 | current_melody_note_start = absolute_time 94 | current_melody_node_number = midi_message.note # type: ignore 95 | elif ( 96 | midi_message.type == "note_off" # type: ignore 97 | and midi_message.note == current_melody_node_number # type: ignore 98 | ): 99 | melody_notes.append((current_melody_note_start, absolute_time)) # type: ignore 100 | 101 | if len(melody_notes) < 1: 102 | raise ValueError("Melody note not found.") 103 | 104 | m_track = get_track_by_port_channel(midi.tracks, MIDI_M_TRACK_PORT, 0) 105 | 106 | hooks: list[tuple[int, int]] = [] 107 | 108 | two_chorus_fadeout_time = -1 109 | 110 | if m_track is not None: 111 | current_hook_start = -1 112 | track_time = 0 113 | for midi_message in midi.tracks[1]: 114 | track_time += midi_message.time 115 | absolute_time = midi_time_converter.ticks_to_ms(track_time) 116 | 117 | if not isinstance(midi_message, mido.Message): 118 | continue 119 | 120 | if midi_message.type == "note_on": # type: ignore 121 | if midi_message.note == 48: # type: ignore 122 | current_hook_start = absolute_time 123 | elif midi_message.note == 72: # type: ignore 124 | two_chorus_fadeout_time = absolute_time 125 | elif midi_message.type == "note_off": # type: ignore 126 | if midi_message.note == 48: # type: ignore 127 | hooks.append((current_hook_start, absolute_time)) 128 | 129 | first_note_on_tick, last_note_off_tick = get_first_and_last_note_times(midi.tracks) 130 | first_note_on_time = midi_time_converter.ticks_to_ms(first_note_on_tick) 131 | last_note_off_time = midi_time_converter.ticks_to_ms(last_note_off_tick) 132 | 133 | time_signatures = get_time_signatures(midi.tracks) 134 | 135 | if len(time_signatures) > 0: 136 | current_beat_time = 0 137 | current_beat_count = time_signatures[0][1] 138 | while current_beat_time < last_note_off_time + 1: 139 | time_signature_time = current_beat_time 140 | time_signature = next( 141 | ( 142 | time_signature 143 | for time_signature in reversed(time_signatures) 144 | if time_signature[0] <= time_signature_time 145 | ), 146 | None, 147 | ) 148 | if time_signature is None: 149 | raise ValueError("Time signature not found.") 150 | 151 | if current_beat_count < time_signature[1]: 152 | absolute_time_track.append( 153 | SaitenRefEvent( 154 | midi_time_converter.ticks_to_ms(current_beat_time), 155 | SaitenRefEventType.PLAY_MARK, 156 | 0x30, 157 | PlayMarkType.BEATMARK_NOT_ACCENT.value, 158 | ) 159 | ) 160 | current_beat_count += 1 161 | else: 162 | absolute_time_track.append( 163 | SaitenRefEvent( 164 | midi_time_converter.ticks_to_ms(current_beat_time), 165 | SaitenRefEventType.PLAY_MARK, 166 | 0x30, 167 | PlayMarkType.BEATMARK_ACCENT.value, 168 | ) 169 | ) 170 | current_beat_count = 1 171 | 172 | current_beat_time += midi.ticks_per_beat 173 | 174 | absolute_time_track.append( 175 | SaitenRefEvent( 176 | midi_time_converter.ticks_to_ms(first_note_on_time), 177 | SaitenRefEventType.PLAY_MARK, 178 | 0x30, 179 | PlayMarkType.START_OF_SONG.value, 180 | ) 181 | ) 182 | absolute_time_track.append( 183 | SaitenRefEvent( 184 | midi_time_converter.ticks_to_ms(last_note_off_time), 185 | SaitenRefEventType.PLAY_MARK, 186 | 0x30, 187 | PlayMarkType.END_OF_SONG.value, 188 | ) 189 | ) 190 | 191 | for hook_start, hook_end in hooks[:-1]: 192 | absolute_time_track.append( 193 | SaitenRefEvent( 194 | midi_time_converter.ticks_to_ms(hook_start), 195 | SaitenRefEventType.PLAY_MARK, 196 | 0x30, 197 | PlayMarkType.START_OF_SABI.value, 198 | ) 199 | ) 200 | absolute_time_track.append( 201 | SaitenRefEvent( 202 | midi_time_converter.ticks_to_ms(hook_end), 203 | SaitenRefEventType.PLAY_MARK, 204 | 0x30, 205 | PlayMarkType.END_OF_SABI.value, 206 | ) 207 | ) 208 | 209 | if len(hooks) > 0: 210 | last_hook_start, last_hook_end = hooks[-1] 211 | absolute_time_track.append( 212 | SaitenRefEvent( 213 | midi_time_converter.ticks_to_ms(last_hook_start), 214 | SaitenRefEventType.PLAY_MARK, 215 | 0x30, 216 | PlayMarkType.START_OF_CLIMAX.value, 217 | ) 218 | ) 219 | absolute_time_track.append( 220 | SaitenRefEvent( 221 | midi_time_converter.ticks_to_ms(last_hook_end), 222 | SaitenRefEventType.PLAY_MARK, 223 | 0x30, 224 | PlayMarkType.END_OF_CLIMAX.value, 225 | ) 226 | ) 227 | 228 | if two_chorus_fadeout_time != -1: 229 | absolute_time_track.append( 230 | SaitenRefEvent( 231 | midi_time_converter.ticks_to_ms(two_chorus_fadeout_time), 232 | SaitenRefEventType.PLAY_MARK, 233 | 0x30, 234 | PlayMarkType.SECOND_CHORUS_FADEOUT.value, 235 | ) 236 | ) 237 | 238 | absolute_time_track.sort(key=lambda absolute_time_event: absolute_time_event.time) 239 | 240 | return absolute_time_track 241 | -------------------------------------------------------------------------------- /okd/__init__.py: -------------------------------------------------------------------------------- 1 | from .okd_file import * 2 | from .oka_file import * 3 | from .chunks import * 4 | 5 | from .utils import okd_to_midi, midi_to_okds 6 | -------------------------------------------------------------------------------- /okd/adpcm.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | import os 3 | from typing import BinaryIO, Self 4 | 5 | FRAMES_PER_FRAME_GROUP = 18 6 | 7 | SUB_FRAMES = 4 8 | SUB_FRAME_NIBBLES = 28 9 | SAMPLES_PER_FRAME = SUB_FRAME_NIBBLES * SUB_FRAMES 10 | 11 | SHIFT_LIMIT = 12 12 | INDEX_LIMIT = 3 13 | 14 | K0 = [0.0, 0.9375, 1.796875, 1.53125] 15 | K1 = [0.0, 0.0, -0.8125, -0.859375] 16 | SIGNED_NIBBLES = [0, 1, 2, 3, 4, 5, 6, 7, -8, -7, -6, -5, -4, -3, -2, -1] 17 | 18 | 19 | @dataclass 20 | class AdpcmFrame: 21 | """ADPCM Frame""" 22 | 23 | parameters: bytes 24 | samples: bytes 25 | 26 | @classmethod 27 | def read(cls, stream: BinaryIO) -> Self: 28 | """Read 29 | 30 | Args: 31 | stream (BinaryIO): Input stream 32 | 33 | Returns: 34 | Self: Instance of this class 35 | """ 36 | buffer = stream.read(128) 37 | if len(buffer) < 128: 38 | raise ValueError("Too less read bytes.") 39 | 40 | parameters = buffer[0:16] 41 | samples = buffer[16:128] 42 | 43 | return cls(parameters, samples) 44 | 45 | 46 | class AdpcmDecoder: 47 | """ADPCM Decoder""" 48 | 49 | def __init__(self): 50 | """Constructor""" 51 | self.prev1 = 0 52 | self.prev2 = 0 53 | 54 | @staticmethod 55 | def __clamp16(value: float) -> int: 56 | """Clamp float to signed 16 bit int 57 | 58 | Args: 59 | value (float): float value 60 | 61 | Returns: 62 | int: int value 63 | """ 64 | if value > 32767.0: 65 | return 32767 66 | elif value < -32768.0: 67 | return -32768 68 | else: 69 | return round(value) 70 | 71 | def __decode_sample(self, sp: int, su: int) -> int: 72 | """Decode Sample 73 | 74 | Args: 75 | sp (int): Parameter 76 | su (int): Sample 77 | 78 | Raises: 79 | ValueError: Parameter `shift` out of range. 80 | ValueError: Parameter `index` out of range. 81 | 82 | Returns: 83 | int: Decoded sample 84 | """ 85 | shift = sp & 0x0F 86 | if SHIFT_LIMIT < shift: 87 | raise ValueError("Parameter `shift` out of range.") 88 | index = sp >> 4 89 | if INDEX_LIMIT < index: 90 | raise ValueError("Parameter `index` out of range.") 91 | 92 | sample = SIGNED_NIBBLES[su] << (12 - (shift & 0x1F)) 93 | sample += K0[index] * self.prev1 + K1[index] * self.prev2 94 | sample = AdpcmDecoder.__clamp16(sample) 95 | 96 | self.prev2 = self.prev1 97 | self.prev1 = sample 98 | 99 | return sample 100 | 101 | def __decode_subframe( 102 | self, sp: int, samples: bytes, subframe_index: int, nibble: int 103 | ) -> list[int]: 104 | """Decode Subframe 105 | 106 | Args: 107 | sp (int): Parameter 108 | samples (bytes): Samples 109 | subframe_index (int): Subframe index 110 | nibble (int): Nibble (0: High, 1: Low) 111 | 112 | Returns: 113 | list[int]: Decoded subframe 114 | """ 115 | decoded = [0] * SUB_FRAME_NIBBLES 116 | for i in range(SUB_FRAME_NIBBLES): 117 | su_index = i * SUB_FRAMES + subframe_index 118 | su = samples[su_index] 119 | su = su >> 4 if nibble != 0 else su & 0x0F 120 | decoded[i] = self.__decode_sample(sp, su) 121 | return decoded 122 | 123 | def __decode_frame(self, frame: AdpcmFrame) -> list[int]: 124 | """Decode Frame 125 | 126 | Args: 127 | frame (AdpcmFrame): Frame 128 | 129 | Returns: 130 | list[int]: Decoded Frame 131 | """ 132 | decoded: list[int] = [] 133 | for i in range(SUB_FRAMES): 134 | for j in range(2): 135 | sp_index = j + i * 2 136 | if 2 <= i: 137 | sp_index += 4 138 | sp = frame.parameters[sp_index] 139 | decoded += self.__decode_subframe(sp, frame.samples, i, j) 140 | return decoded 141 | 142 | def __decode_frame_group(self, stream: BinaryIO) -> list[int]: 143 | """Decode Frame Group 144 | 145 | Args: 146 | stream (BinaryIO): Input stream 147 | 148 | Returns: 149 | list[int]: Decoded Frame Group 150 | """ 151 | decoded: list[int] = [] 152 | for _ in range(FRAMES_PER_FRAME_GROUP): 153 | frame = AdpcmFrame.read(stream) 154 | decoded += self.__decode_frame(frame) 155 | return decoded 156 | 157 | def decode(self, stream: BinaryIO) -> list[int]: 158 | """Decode 159 | 160 | Args: 161 | stream (BinaryIO): Input stream 162 | 163 | Returns: 164 | list[int]: Decoded samples 165 | """ 166 | decoded: list[int] = [] 167 | while True: 168 | try: 169 | decoded += self.__decode_frame_group(stream) 170 | except ValueError: 171 | break 172 | # Skip null bytes 173 | stream.seek(20, os.SEEK_CUR) 174 | return decoded 175 | -------------------------------------------------------------------------------- /okd/chunks/__init__.py: -------------------------------------------------------------------------------- 1 | from .chunk_base import ChunkBase 2 | from .generic_chunk import GenericChunk 3 | from .p_track_info_chunk import ( 4 | PTrackInfoChannelInfoEntry, 5 | PTrackInfoEntry, 6 | PTrackInfoChunk, 7 | ) 8 | from .p3_track_info_chunk import P3TrackInfoChannelInfoEntry, P3TrackInfoChunk 9 | from .extended_p_track_info_chunk import ( 10 | ExtendedPTrackInfoChannelInfoEntry, 11 | ExtendedPTrackInfoEntry, 12 | ExtendedPTrackInfoChunk, 13 | ) 14 | from .m_track_chunk import ( 15 | MTrackEvent, 16 | MTrackAbsoluteTimeEvent, 17 | MTrackInterpretation, 18 | MTrackChunk, 19 | ) 20 | from .p_track_chunk import PTrackEvent, PTrackAbsoluteTimeEvent, PTrackChunk 21 | from .adpcm_chunk import AdpcmChunk 22 | 23 | from .okd_chunk import OkdChunk 24 | 25 | from .utils import ( 26 | read_chunk, 27 | p_track_info_chunk_by_p_track_chunks, 28 | p3_track_info_chunk_by_p_track_chunks, 29 | ) 30 | -------------------------------------------------------------------------------- /okd/chunks/adpcm_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | from typing import BinaryIO, Self 4 | 5 | from ..adpcm import AdpcmDecoder 6 | 7 | from .chunk_base import ChunkBase 8 | from .generic_chunk import GenericChunk 9 | 10 | 11 | @dataclass 12 | class AdpcmChunkTrack: 13 | """ADPCM Chunk""" 14 | 15 | TRACK_ID = b"YAWV" 16 | 17 | data: bytes 18 | 19 | def decode(self) -> list[int]: 20 | """Decode 21 | 22 | Returns: 23 | list[int]: Decoded samples 24 | """ 25 | stream = BytesIO(self.data) 26 | decoder = AdpcmDecoder() 27 | return decoder.decode(stream) 28 | 29 | def write(self, stream: BinaryIO) -> None: 30 | """Write 31 | 32 | Args: 33 | stream (BufferedReader): Output stream 34 | """ 35 | stream.write(AdpcmChunkTrack.TRACK_ID) 36 | stream.write(len(self.data).to_bytes(4, "big")) 37 | stream.write(self.data) 38 | 39 | 40 | @dataclass 41 | class AdpcmChunk(ChunkBase): 42 | """ADPCM Chunk""" 43 | 44 | tracks: list[AdpcmChunkTrack] 45 | 46 | @classmethod 47 | def from_generic(cls, generic: GenericChunk) -> Self: 48 | """From Generic Chunk 49 | 50 | Args: 51 | generic (GenericChunk): Generic Chunk 52 | 53 | Returns: 54 | Self: Instance of this class 55 | """ 56 | stream = BytesIO(generic.payload) 57 | tracks: list[AdpcmChunkTrack] = [] 58 | while True: 59 | buffer = stream.read(8) 60 | if len(buffer) < 8: 61 | break 62 | 63 | chunk_id = buffer[0:4] 64 | if chunk_id == AdpcmChunkTrack.TRACK_ID: 65 | chunk_size = int.from_bytes(buffer[4:8], "big") 66 | chunk_data = stream.read(chunk_size) 67 | if len(chunk_data) < chunk_size: 68 | raise ValueError("Too less read bytes.") 69 | tracks.append(AdpcmChunkTrack(chunk_data)) 70 | else: 71 | raise ValueError(f"Unknown Chunk ID detected. chunk_id=`{chunk_id}`") 72 | 73 | return cls(generic.id, tracks) 74 | 75 | def _payload_buffer(self) -> bytes: 76 | buffer = b"" 77 | for track in self.tracks: 78 | buffer += track.data 79 | return buffer 80 | -------------------------------------------------------------------------------- /okd/chunks/chunk_base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from dataclasses import dataclass 3 | import os 4 | from typing import BinaryIO 5 | 6 | 7 | @dataclass 8 | class ChunkBase(ABC): 9 | """Chunk Base Class""" 10 | 11 | END_OF_FILE_MARK = b"\x00\x00\x00\x00" 12 | 13 | id: bytes 14 | 15 | @staticmethod 16 | def __descramble_header(id: bytes, size: int) -> tuple[bytes, int]: 17 | """Descramble Chunk Header 18 | 19 | Args: 20 | id (bytes): ID 21 | size (int): Size 22 | 23 | Returns: 24 | tuple[int, bytes]: ID and Size 25 | """ 26 | # Scrambled YADD chunk header 27 | if id == b"\x4e\x96\x53\x93": 28 | id = b"YADD" 29 | size ^= 0x17D717D7 30 | return id, size 31 | 32 | @staticmethod 33 | def _read_common(stream: BinaryIO) -> tuple[bytes, bytes]: 34 | """Read Common Part 35 | 36 | Args: 37 | stream (BinaryIO): Input stream 38 | 39 | Returns: 40 | tuple[int, bytes]: Chunk ID and Payload 41 | """ 42 | buffer = stream.read(8) 43 | if len(buffer) == 0 or buffer == ChunkBase.END_OF_FILE_MARK: 44 | # End of File 45 | raise ValueError("Reached to End of File.") 46 | if len(buffer) != 8: 47 | stream.seek(-len(buffer), os.SEEK_CUR) 48 | raise ValueError("Reached to End of File.") 49 | id = buffer[0:4] 50 | size = int.from_bytes(buffer[4:8], "big") 51 | id, size = ChunkBase.__descramble_header(id, size) 52 | payload = stream.read(size) 53 | return id, payload 54 | 55 | @staticmethod 56 | def peek_header(stream: BinaryIO) -> tuple[bytes, int] | None: 57 | """Peek Header 58 | 59 | Args: 60 | stream (BinaryIO): Input stream 61 | 62 | Returns: 63 | bytes: ID and Size 64 | """ 65 | buffer = stream.read(8) 66 | stream.seek(-len(buffer), os.SEEK_CUR) 67 | if len(buffer) == 0 or buffer == ChunkBase.END_OF_FILE_MARK: 68 | # End of File 69 | return 70 | if len(buffer) != 8: 71 | # End of File 72 | return 73 | id = buffer[0:4] 74 | size = int.from_bytes(buffer[4:8], "big") 75 | return ChunkBase.__descramble_header(id, size) 76 | 77 | @staticmethod 78 | def __seek_header( 79 | stream: BinaryIO, id: bytes | None = None 80 | ) -> tuple[bytes, int] | None: 81 | """Seek header 82 | 83 | Args: 84 | stream (BinaryIO): Input stream 85 | id (bytes | None, optional): Target ID. Defaults to None. 86 | 87 | Returns: 88 | tuple[int, int] | None: If ID and size found, else not found 89 | """ 90 | while True: 91 | header = ChunkBase.peek_header(stream) 92 | if header is None: 93 | return 94 | current_id, current_size = header 95 | if id is None: 96 | return (current_id, current_size) 97 | else: 98 | if current_id == id: 99 | return (current_id, current_size) 100 | stream.seek(8 + current_size, os.SEEK_CUR) 101 | 102 | @staticmethod 103 | def index_chunk(stream: BinaryIO) -> list[tuple[int, int, bytes]]: 104 | """Index Chunk 105 | 106 | Args: 107 | stream (BinaryIO): Input stream 108 | 109 | Returns: 110 | list[tuple[int, int, bytes]]: List of offset, size and ID 111 | """ 112 | index: list[tuple[int, int, bytes]] = [] 113 | 114 | id = b"" 115 | last_position = -1 116 | while True: 117 | header = ChunkBase.__seek_header(stream) 118 | if header is None: 119 | break 120 | id, size = header 121 | position = stream.tell() 122 | if last_position != -1: 123 | index.append((last_position, position - last_position, id)) 124 | last_position = position 125 | stream.seek(8 + size, os.SEEK_CUR) 126 | 127 | if last_position != -1: 128 | position = stream.tell() 129 | index.append((last_position, position - last_position, id)) 130 | 131 | return index 132 | 133 | @abstractmethod 134 | def _payload_buffer(self) -> bytes: 135 | """Payload Buffer 136 | 137 | Returns: 138 | bytes: Payload Buffer 139 | """ 140 | pass 141 | 142 | def write(self, stream: BinaryIO) -> None: 143 | """Write 144 | 145 | Args: 146 | stream (BinaryIO): Output stream 147 | """ 148 | payload_buffer = self._payload_buffer() 149 | stream.write(self.id) 150 | if len(payload_buffer) % 2 != 0: 151 | payload_buffer += b"\x00" 152 | stream.write(len(payload_buffer).to_bytes(4, "big")) 153 | stream.write(payload_buffer) 154 | -------------------------------------------------------------------------------- /okd/chunks/extended_p_track_info_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | from typing import BinaryIO, Self 4 | 5 | from .chunk_base import ChunkBase 6 | from .generic_chunk import GenericChunk 7 | 8 | 9 | @dataclass 10 | class ExtendedPTrackInfoChannelInfoEntry: 11 | """Extended P-Track Information Channel Information Entry""" 12 | 13 | attribute: int 14 | ports: int 15 | unknown_0: int 16 | control_change_ax: int 17 | control_change_cx: int 18 | 19 | @classmethod 20 | def read(cls, stream: BinaryIO) -> Self: 21 | """Read 22 | 23 | Args: 24 | stream (BinaryIO): Input stream 25 | 26 | Returns: 27 | Self: Instance of this class 28 | """ 29 | buffer = stream.read(8) 30 | if len(buffer) < 8: 31 | raise ValueError("Too less read bytes.") 32 | 33 | attribute = int.from_bytes(buffer[0:2], "little") 34 | ports = int.from_bytes(buffer[2:4], "big") 35 | unknown_0 = int.from_bytes(buffer[4:6], "big") 36 | control_change_ax = buffer[6] 37 | control_change_cx = buffer[7] 38 | return cls(attribute, ports, unknown_0, control_change_ax, control_change_cx) 39 | 40 | def is_chorus(self) -> bool: 41 | """Is Chorus 42 | 43 | Returns: 44 | bool: True if Chorus, else False 45 | """ 46 | return self.attribute & 0x0080 != 0x0080 47 | 48 | def is_guide_melody(self) -> bool: 49 | """Is Guide Melody 50 | 51 | Returns: 52 | bool: True if Guide Melody, else False 53 | """ 54 | return self.attribute & 0x0100 == 0x0100 55 | 56 | def write(self, stream: BinaryIO) -> None: 57 | """Write 58 | 59 | Args: 60 | stream (BinaryIO): Output stream 61 | """ 62 | stream.write(self.attribute.to_bytes(2, "little")) 63 | stream.write(self.ports.to_bytes(2, "big")) 64 | stream.write(self.unknown_0.to_bytes(2, "big")) 65 | stream.write(self.control_change_ax.to_bytes()) 66 | stream.write(self.control_change_cx.to_bytes()) 67 | 68 | 69 | @dataclass 70 | class ExtendedPTrackInfoEntry: 71 | """Extended P-Track Information Entry""" 72 | 73 | track_number: int 74 | track_status: int 75 | unused_0: int 76 | default_channel_groups: list[int] 77 | channel_groups: list[int] 78 | channel_info: list[ExtendedPTrackInfoChannelInfoEntry] 79 | system_ex_ports: int 80 | unknown_0: int 81 | 82 | @classmethod 83 | def read(cls, stream: BinaryIO) -> Self: 84 | """Read 85 | 86 | Args: 87 | stream (BinaryIO): Input stream 88 | 89 | Returns: 90 | Self: Instance of this class 91 | """ 92 | buffer = stream.read(68) 93 | if len(buffer) < 68: 94 | raise ValueError("Too less read bytes.") 95 | 96 | track_number = buffer[0] 97 | track_status = buffer[1] 98 | unused_0 = int.from_bytes(buffer[2:4], "big") 99 | 100 | default_channel_groups: list[int] = [] 101 | for channel in range(16): 102 | offset = 4 + 2 * channel 103 | default_channel_groups.append( 104 | int.from_bytes(buffer[offset : offset + 2], "big") 105 | ) 106 | 107 | channel_groups: list[int] = [] 108 | for channel in range(16): 109 | offset = 36 + 2 * channel 110 | channel_groups.append(int.from_bytes(buffer[offset : offset + 2], "big")) 111 | 112 | channel_info: list[ExtendedPTrackInfoChannelInfoEntry] = [] 113 | for _ in range(16): 114 | channel_info.append(ExtendedPTrackInfoChannelInfoEntry.read(stream)) 115 | 116 | buffer = stream.read(4) 117 | if len(buffer) < 4: 118 | raise ValueError("Too less read bytes.") 119 | 120 | system_ex_ports = int.from_bytes(buffer[0:2], "big") 121 | unknown_0 = int.from_bytes(buffer[2:4], "big") 122 | 123 | return cls( 124 | track_number, 125 | track_status, 126 | unused_0, 127 | default_channel_groups, 128 | channel_groups, 129 | channel_info, 130 | system_ex_ports, 131 | unknown_0, 132 | ) 133 | 134 | def is_lossless_track(self) -> bool: 135 | return self.track_status & 0x80 == 0x80 136 | 137 | def write(self, stream: BinaryIO) -> None: 138 | """Write 139 | 140 | Args: 141 | stream (BinaryIO): Output stream 142 | """ 143 | stream.write(self.track_number.to_bytes()) 144 | stream.write(self.track_status.to_bytes()) 145 | stream.write(self.unused_0.to_bytes(2, "big")) 146 | for default_channel_group in self.default_channel_groups: 147 | stream.write(default_channel_group.to_bytes(2, "big")) 148 | for channel_group in self.channel_groups: 149 | stream.write(channel_group.to_bytes(2, "big")) 150 | for channel_info_entry in self.channel_info: 151 | channel_info_entry.write(stream) 152 | stream.write(self.system_ex_ports.to_bytes(2, "big")) 153 | stream.write(self.unknown_0.to_bytes(2, "big")) 154 | 155 | 156 | @dataclass 157 | class ExtendedPTrackInfoChunk(ChunkBase): 158 | """Extended P-Track Information Chunk""" 159 | 160 | unknown_0: bytes 161 | tg_mode: int 162 | data: list[ExtendedPTrackInfoEntry] 163 | 164 | @classmethod 165 | def from_generic(cls, generic: GenericChunk) -> Self: 166 | """From Generic Chunk 167 | 168 | Args: 169 | generic (GenericChunk): Generic Chunk 170 | 171 | Returns: 172 | Self: ExtendedPTrackInfoChunk 173 | """ 174 | unknown_0 = generic.payload[0:8] 175 | tg_mode = int.from_bytes(generic.payload[8:10:3], "big") 176 | entry_count = int.from_bytes(generic.payload[10:12], "big") 177 | data: list[ExtendedPTrackInfoEntry] = [] 178 | stream = BytesIO(generic.payload[12:]) 179 | for _ in range(entry_count): 180 | entry = ExtendedPTrackInfoEntry.read(stream) 181 | data.append(entry) 182 | return cls(generic.id, unknown_0, tg_mode, data) 183 | 184 | def _payload_buffer(self) -> bytes: 185 | buffer = self.unknown_0 186 | buffer += self.tg_mode.to_bytes(2, "big") 187 | buffer += len(self.data).to_bytes(2, "big") 188 | 189 | stream = BytesIO() 190 | for entry in self.data: 191 | entry.write(stream) 192 | stream.seek(0) 193 | buffer += stream.read() 194 | 195 | return buffer 196 | -------------------------------------------------------------------------------- /okd/chunks/generic_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import BinaryIO, Self 3 | 4 | from .chunk_base import ChunkBase 5 | 6 | 7 | @dataclass 8 | class GenericChunk(ChunkBase): 9 | """Generic Chunk""" 10 | 11 | payload: bytes 12 | 13 | @classmethod 14 | def read(cls, stream: BinaryIO) -> Self: 15 | """Read 16 | 17 | Args: 18 | stream (BinaryIOBufferedReader): Input stream 19 | 20 | Returns: 21 | Self: Generic Chunk 22 | """ 23 | id, payload = ChunkBase._read_common(stream) 24 | return cls(id, payload) 25 | 26 | def _payload_buffer(self) -> bytes: 27 | return self.payload 28 | -------------------------------------------------------------------------------- /okd/chunks/m_track_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | import os 4 | from typing import BinaryIO, Self 5 | 6 | from midi.event import MidiEvent, MidiTrackEvent 7 | from ..okd_midi import ( 8 | read_status_byte, 9 | is_data_bytes, 10 | read_extended_variable_int, 11 | write_extended_variable_int, 12 | ) 13 | 14 | from .chunk_base import ChunkBase 15 | from .generic_chunk import GenericChunk 16 | 17 | 18 | @dataclass 19 | class MTrackEvent(MidiTrackEvent): 20 | """M-Track Event""" 21 | 22 | __END_OF_TRACK_MARK = b"\x00\x00\x00\x00" 23 | 24 | @staticmethod 25 | def read_sysex_data_bytes(stream: BinaryIO) -> bytes: 26 | """Read Data Bytes of SysEx Message 27 | 28 | Args: 29 | stream (BinaryIO): Input stream 30 | 31 | Raises: 32 | ValueError: Unterminated SysEx message detected 33 | 34 | Returns: 35 | bytes: Data Bytes 36 | """ 37 | data_bytes = b"" 38 | while True: 39 | byte = stream.read(1) 40 | if len(byte) < 1: 41 | raise ValueError("Too less read bytes.") 42 | data_bytes += byte 43 | byte = byte[0] 44 | if byte & 0x80 == 0x80: 45 | if byte != 0xFE: 46 | raise ValueError( 47 | f"Unterminated SysEx message detected. stop_byte={hex(byte)}" 48 | ) 49 | break 50 | return data_bytes 51 | 52 | @classmethod 53 | def read(cls, stream: BinaryIO) -> Self | None: 54 | """Read 55 | 56 | Args: 57 | stream (BinaryIO): Input stream 58 | 59 | Raises: 60 | ValueError: Unknown Status Byte detected 61 | 62 | Returns: 63 | Self: Instance of this class 64 | """ 65 | delta_time = read_extended_variable_int(stream) 66 | 67 | end_of_track = stream.read(4) 68 | if end_of_track == MTrackEvent.__END_OF_TRACK_MARK: 69 | return None 70 | stream.seek(-4, os.SEEK_CUR) 71 | 72 | status_byte = read_status_byte(stream) 73 | 74 | # System messages 75 | if status_byte == 0xFF: 76 | # SysEx message 77 | data_bytes = MTrackEvent.read_sysex_data_bytes(stream) 78 | return cls(status_byte, data_bytes, delta_time) 79 | elif status_byte == 0xF1: 80 | # Strong beat 81 | data_bytes_length = 0 82 | elif status_byte == 0xF2: 83 | # Weak beat 84 | data_bytes_length = 0 85 | elif status_byte == 0xF3: 86 | # Hook section 87 | data_bytes_length = 1 88 | elif status_byte == 0xF4: 89 | # Visible Guide Melody page delimiter 90 | data_bytes_length = 1 91 | elif status_byte == 0xF5: 92 | # Two chorus fadeout position 93 | data_bytes_length = 0 94 | elif status_byte == 0xF6: 95 | # Playing section 96 | data_bytes_length = 1 97 | elif status_byte == 0xF8: 98 | # ADPCM playing section 99 | data_bytes_length = 1 100 | else: 101 | raise ValueError( 102 | f"Unknown Status Byte detected. status_byte={hex(status_byte)}" 103 | ) 104 | 105 | data_bytes = stream.read(data_bytes_length) 106 | if not is_data_bytes(data_bytes): 107 | raise ValueError( 108 | f"Invalid Data Byte detected. data_bytes=`{data_bytes.hex(" ").upper()}`" 109 | ) 110 | 111 | return cls(status_byte, data_bytes, delta_time) 112 | 113 | def write(self, stream: BinaryIO) -> None: 114 | """Write 115 | 116 | Args: 117 | stream (BinaryIO): Output stream 118 | """ 119 | write_extended_variable_int(stream, self.delta_time) 120 | stream.write(self.status_byte.to_bytes()) 121 | stream.write(self.data_bytes) 122 | 123 | 124 | @dataclass 125 | class MTrackAbsoluteTimeEvent(MidiEvent): 126 | """M-Track Absolute Time Event""" 127 | 128 | time: int 129 | 130 | 131 | @dataclass 132 | class MTrackChunk(ChunkBase): 133 | """M-Track Chunk""" 134 | 135 | events: list[MTrackEvent] 136 | 137 | @classmethod 138 | def from_generic(cls, generic: GenericChunk) -> Self: 139 | """From Generic Chunk 140 | 141 | Args: 142 | generic (GenericChunk): Generic Chunk 143 | 144 | Returns: 145 | Self: Instance of this class 146 | """ 147 | stream = BytesIO(generic.payload) 148 | events: list[MTrackEvent] = [] 149 | while True: 150 | event = MTrackEvent.read(stream) 151 | if event is None: 152 | # End of Track 153 | break 154 | events.append(event) 155 | return cls(generic.id, events) 156 | 157 | def track_number(self) -> int: 158 | """Track Number 159 | 160 | Returns: 161 | int: Track Number 162 | """ 163 | return self.id[3] 164 | 165 | def _payload_buffer(self) -> bytes: 166 | stream = BytesIO() 167 | for event in self.events: 168 | event.write(stream) 169 | stream.seek(0) 170 | return stream.read() 171 | 172 | def to_json_serializable(self): 173 | json_events = [] 174 | for event in self.events: 175 | json_events.append( 176 | { 177 | "delta_time": event.delta_time, 178 | "status_byte": format(event.status_byte, "02X"), 179 | "data": event.data_bytes.hex(" ").upper(), 180 | } 181 | ) 182 | return {"events": json_events} 183 | 184 | def absolute_time_track( 185 | self, 186 | ) -> list[MTrackAbsoluteTimeEvent]: 187 | absolute_time_track: list[MTrackAbsoluteTimeEvent] = [] 188 | absolute_time = 0 189 | for event in self.events: 190 | absolute_time += event.delta_time 191 | absolute_time_track.append( 192 | MTrackAbsoluteTimeEvent( 193 | event.status_byte, event.data_bytes, absolute_time 194 | ) 195 | ) 196 | return absolute_time_track 197 | 198 | 199 | @dataclass 200 | class MTrackInterpretation: 201 | tempos: list[tuple[int, int]] 202 | time_signatures: list[tuple[int, int, int]] 203 | hooks: list[tuple[int, int]] 204 | visible_guide_melody_delimiters: list[tuple[int, int]] 205 | two_chorus_fadeout_time: int 206 | song_section: tuple[int, int] 207 | adpcm_sections: list[tuple[int, int]] 208 | 209 | @classmethod 210 | def from_track(cls, track: MTrackChunk): 211 | tempos: list[tuple[int, int]] = [] 212 | time_signatures: list[tuple[int, int, int]] = [] 213 | hooks: list[tuple[int, int]] = [] 214 | visible_guide_melody_delimiters: list[tuple[int, int]] = [] 215 | two_chorus_fadeout_time = -1 216 | song_section: tuple[int, int] = (-1, -1) 217 | adpcm_sections: list[tuple[int, int]] = [] 218 | 219 | absolute_time_track = track.absolute_time_track() 220 | 221 | beats = 1 222 | current_beat_start = next( 223 | ( 224 | event.time 225 | for event in absolute_time_track 226 | if event.status_byte == 0xF1 or event.status_byte == 0xF2 227 | ), 228 | -1, 229 | ) 230 | current_bpm = 125 231 | current_hook_start_time = 0 232 | song_section_start = -1 233 | current_adpcm_section_start = -1 234 | 235 | for event in absolute_time_track: 236 | if event.status_byte == 0xF1: 237 | if current_beat_start != -1: 238 | beat_length = event.time - current_beat_start 239 | if beat_length == 0: 240 | continue 241 | bpm = round(60000 / beat_length) 242 | if bpm != current_bpm: 243 | tempos.append( 244 | ( 245 | current_beat_start, 246 | bpm, 247 | ) 248 | ) 249 | current_bpm = bpm 250 | beats = 1 251 | current_beat_start = event.time 252 | elif event.status_byte == 0xF2: 253 | if current_beat_start != -1: 254 | beat_length = event.time - current_beat_start 255 | if beat_length == 0: 256 | continue 257 | bpm = round(60000 / beat_length) 258 | if bpm != current_bpm: 259 | tempos.append( 260 | ( 261 | current_beat_start, 262 | bpm, 263 | ) 264 | ) 265 | current_bpm = bpm 266 | beats += 1 267 | current_beat_start = event.time 268 | elif event.status_byte == 0xF3: 269 | mark_type = event.data_bytes[0] 270 | if mark_type == 0x00 or mark_type == 0x02: 271 | current_hook_start_time = event.time 272 | elif mark_type == 0x01 or mark_type == 0x03: 273 | hooks.append((current_hook_start_time, event.time)) 274 | elif event.status_byte == 0xF4: 275 | visible_guide_melody_delimiters.append( 276 | (event.time, event.data_bytes[0]) 277 | ) 278 | pass 279 | elif event.status_byte == 0xF5: 280 | two_chorus_fadeout_time = event.time 281 | elif event.status_byte == 0xF6: 282 | mark_type = event.data_bytes[0] 283 | if mark_type == 0x00: 284 | song_section_start = event.time 285 | elif mark_type == 0x01: 286 | song_section = (song_section_start, event.time) 287 | elif event.status_byte == 0xF8: 288 | mark_type = event.data_bytes[0] 289 | if mark_type == 0x00: 290 | current_adpcm_section_start = event.time 291 | elif mark_type == 0x01: 292 | adpcm_sections.append((current_adpcm_section_start, event.time)) 293 | elif event.status_byte == 0xFF: 294 | time_signatures.append( 295 | (event.time, event.data_bytes[1], 2 ** event.data_bytes[2]) 296 | ) 297 | 298 | return cls( 299 | tempos, 300 | time_signatures, 301 | hooks, 302 | visible_guide_melody_delimiters, 303 | two_chorus_fadeout_time, 304 | song_section, 305 | adpcm_sections, 306 | ) 307 | -------------------------------------------------------------------------------- /okd/chunks/okd_chunk.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | from .chunk_base import ChunkBase 4 | from .generic_chunk import GenericChunk 5 | from .p_track_info_chunk import PTrackInfoChunk 6 | 7 | from .p3_track_info_chunk import P3TrackInfoChunk 8 | from .extended_p_track_info_chunk import ExtendedPTrackInfoChunk 9 | from .m_track_chunk import MTrackChunk 10 | from .p_track_chunk import PTrackChunk 11 | from .adpcm_chunk import AdpcmChunk 12 | 13 | OkdChunk = Union[ 14 | ChunkBase, 15 | GenericChunk, 16 | PTrackInfoChunk, 17 | P3TrackInfoChunk, 18 | ExtendedPTrackInfoChunk, 19 | MTrackChunk, 20 | PTrackChunk, 21 | AdpcmChunk, 22 | ] 23 | -------------------------------------------------------------------------------- /okd/chunks/p3_track_info_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | from typing import Self 4 | 5 | from .chunk_base import ChunkBase 6 | from .generic_chunk import GenericChunk 7 | from .p_track_info_chunk import PTrackInfoChannelInfoEntry 8 | 9 | 10 | @dataclass 11 | class P3TrackInfoChannelInfoEntry(PTrackInfoChannelInfoEntry): 12 | """P3-Track Information Channel Information Entry""" 13 | 14 | 15 | @dataclass 16 | class P3TrackInfoChunk(ChunkBase): 17 | """P3-Track Information Chunk""" 18 | 19 | track_number: int 20 | track_status: int 21 | use_channel_group_flag: int 22 | default_channel_groups: list[int] 23 | channel_groups: list[int] 24 | channel_info: list[PTrackInfoChannelInfoEntry] 25 | system_ex_ports: int 26 | 27 | @classmethod 28 | def from_generic(cls, generic: GenericChunk) -> Self: 29 | """From Generic Chunk 30 | 31 | Args: 32 | generic (GenericChunk): Generic Chunk 33 | 34 | Returns: 35 | Self: Instance of this class 36 | """ 37 | stream = BytesIO(generic.payload) 38 | 39 | buffer = stream.read(4) 40 | if len(buffer) < 4: 41 | raise ValueError("Too less read bytes.") 42 | 43 | track_number = buffer[0] 44 | track_status = buffer[1] 45 | use_channel_group_flag = int.from_bytes(buffer[2:4], "big") 46 | 47 | default_channel_groups: list[int] = [] 48 | for channel in range(16): 49 | if (use_channel_group_flag >> channel) & 0x0001 == 0x0001: 50 | buffer = stream.read(2) 51 | if len(buffer) < 2: 52 | raise ValueError("Too less read bytes.") 53 | 54 | default_channel_groups.append(int.from_bytes(buffer, "big")) 55 | else: 56 | default_channel_groups.append(0x0000) 57 | 58 | buffer = stream.read(32) 59 | if len(buffer) < 32: 60 | raise ValueError("Too less read bytes.") 61 | 62 | channel_groups: list[int] = [] 63 | for channel in range(16): 64 | offset = 2 * channel 65 | channel_groups.append(int.from_bytes(buffer[offset : offset + 2], "big")) 66 | 67 | channel_info: list[PTrackInfoChannelInfoEntry] = [] 68 | for channel in range(16): 69 | channel_info.append(PTrackInfoChannelInfoEntry.read(stream)) 70 | 71 | buffer = stream.read(2) 72 | if len(buffer) < 2: 73 | raise ValueError("Too less read bytes.") 74 | 75 | system_ex_ports = int.from_bytes(buffer[0:2], "big") 76 | 77 | return cls( 78 | generic.id, 79 | track_number, 80 | track_status, 81 | use_channel_group_flag, 82 | default_channel_groups, 83 | channel_groups, 84 | channel_info, 85 | system_ex_ports, 86 | ) 87 | 88 | def is_lossless_track(self) -> bool: 89 | return self.track_status & 0x80 == 0x80 90 | 91 | def _payload_buffer(self) -> bytes: 92 | """Write 93 | 94 | Args: 95 | stream (BufferedReader): Output stream 96 | """ 97 | stream = BytesIO() 98 | 99 | stream.write(self.track_number.to_bytes()) 100 | stream.write(self.track_status.to_bytes()) 101 | stream.write(self.use_channel_group_flag.to_bytes(2, "big")) 102 | for channel, default_channel_group in enumerate(self.default_channel_groups): 103 | if (self.use_channel_group_flag >> channel) & 0x0001 != 0x0001: 104 | continue 105 | stream.write(default_channel_group.to_bytes(2, "big")) 106 | for channel_group in self.channel_groups: 107 | stream.write(channel_group.to_bytes(2, "big")) 108 | for channel_info_entry in self.channel_info: 109 | channel_info_entry.write(stream) 110 | stream.write(self.system_ex_ports.to_bytes(2, "little")) 111 | 112 | stream.seek(0) 113 | return stream.read() 114 | -------------------------------------------------------------------------------- /okd/chunks/p_track_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | import os 4 | from typing import BinaryIO, Self 5 | 6 | from midi.event import MidiEvent, MidiTrackEvent 7 | from ..okd_midi import ( 8 | read_status_byte, 9 | is_data_bytes, 10 | read_variable_int, 11 | read_extended_variable_int, 12 | write_variable_int, 13 | write_extended_variable_int, 14 | ) 15 | 16 | from .chunk_base import ChunkBase 17 | from .generic_chunk import GenericChunk 18 | from .p_track_info_chunk import PTrackInfoEntry, PTrackInfoChunk 19 | from .extended_p_track_info_chunk import ( 20 | ExtendedPTrackInfoEntry, 21 | ExtendedPTrackInfoChunk, 22 | ) 23 | from .p3_track_info_chunk import P3TrackInfoChunk 24 | 25 | 26 | @dataclass 27 | class PTrackEvent(MidiTrackEvent): 28 | """P-Track Event""" 29 | 30 | __END_OF_TRACK_MARK = b"\x00\x00\x00\x00" 31 | 32 | duration: int | None = None 33 | 34 | @staticmethod 35 | def read_sysex_data_bytes(stream: BinaryIO) -> bytes: 36 | """Read Data Bytes of SysEx Message 37 | 38 | Args: 39 | stream (BinaryIO): Input stream 40 | 41 | Raises: 42 | ValueError: Unterminated SysEx message detected 43 | 44 | Returns: 45 | bytes: Data Bytes 46 | """ 47 | data_bytes = b"" 48 | while True: 49 | byte = stream.read(1) 50 | if len(byte) < 1: 51 | raise ValueError("Too less read bytes.") 52 | data_bytes += byte 53 | byte = byte[0] 54 | if byte & 0x80 == 0x80: 55 | if byte != 0xF7: 56 | raise ValueError( 57 | f"Unterminated SysEx message detected. stop_byte={hex(byte)}" 58 | ) 59 | break 60 | return data_bytes 61 | 62 | @classmethod 63 | def read(cls, stream: BinaryIO) -> Self | None: 64 | delta_time = read_extended_variable_int(stream) 65 | 66 | end_of_track = stream.read(4) 67 | if end_of_track == PTrackEvent.__END_OF_TRACK_MARK: 68 | return None 69 | stream.seek(-4, os.SEEK_CUR) 70 | 71 | status_byte = read_status_byte(stream) 72 | status_type = status_byte & 0xF0 73 | 74 | # Channel voice messages 75 | if status_type == 0x80: 76 | # Note off 77 | data_bytes_length = 3 78 | elif status_type == 0x90: 79 | # Note on 80 | data_bytes_length = 2 81 | elif status_type == 0xA0: 82 | # Alternative CC AX 83 | data_bytes_length = 1 84 | elif status_type == 0xB0: 85 | # Control change 86 | data_bytes_length = 2 87 | elif status_type == 0xC0: 88 | # Alternative CC CX 89 | data_bytes_length = 1 90 | elif status_type == 0xD0: 91 | # Channel pressure 92 | data_bytes_length = 1 93 | elif status_type == 0xE0: 94 | # Pitch bend 95 | data_bytes_length = 2 96 | # System messages 97 | elif status_byte == 0xF0: 98 | # SysEx message 99 | data_bytes = PTrackEvent.read_sysex_data_bytes(stream) 100 | return cls(status_byte, data_bytes, delta_time) 101 | elif status_byte == 0xF8: 102 | # ADPCM note on 103 | data_bytes_length = 3 104 | elif status_byte == 0xF9: 105 | # Unknown 106 | data_bytes_length = 1 107 | elif status_byte == 0xFA: 108 | # ADPCM channel volume 109 | data_bytes_length = 1 110 | elif status_byte == 0xFD: 111 | # Enable channel grouping 112 | data_bytes_length = 0 113 | elif status_byte == 0xFE: 114 | # Compensation of Alternative CC 115 | byte = stream.read(1) 116 | if len(byte) < 1: 117 | raise ValueError("Too less read bytes.") 118 | stream.seek(-1, os.SEEK_CUR) 119 | byte = byte[0] 120 | if byte & 0xF0 == 0xA0: 121 | # Polyphonic key pressure 122 | data_bytes_length = 3 123 | elif byte & 0xF0 == 0xC0: 124 | # Program change 125 | data_bytes_length = 2 126 | else: 127 | raise ValueError( 128 | f"Unknown Compensation of Alternative CC detected. data_bytes[0]={format(byte, "02X")}" 129 | ) 130 | else: 131 | raise ValueError( 132 | f"Unknown Status byte detected. status_byte={format(status_byte, "02X")}" 133 | ) 134 | 135 | data_bytes: bytes = stream.read(data_bytes_length) 136 | data_bytes_validate = data_bytes[1:] if status_byte == 0xFE else data_bytes 137 | if not is_data_bytes(data_bytes_validate): 138 | raise ValueError( 139 | f"Invalid Data Byte detected. data_bytes=`{data_bytes.hex(" ").upper()}`" 140 | ) 141 | 142 | duration = None 143 | if status_type == 0x80 or status_type == 0x90: 144 | duration = read_variable_int(stream) 145 | 146 | return cls(status_byte, data_bytes, delta_time, duration) 147 | 148 | def write(self, stream: BinaryIO) -> None: 149 | """Write 150 | 151 | Args: 152 | stream (BinaryIO): Output stream 153 | """ 154 | write_extended_variable_int(stream, self.delta_time) 155 | stream.write(self.status_byte.to_bytes()) 156 | stream.write(self.data_bytes) 157 | if self.duration is not None: 158 | write_variable_int(stream, self.duration) 159 | 160 | 161 | @dataclass 162 | class PTrackAbsoluteTimeEvent(MidiEvent): 163 | """P-Track Absolute Time Event""" 164 | 165 | port: int 166 | track: int 167 | time: int 168 | 169 | 170 | @dataclass 171 | class PTrackChunk(ChunkBase): 172 | """P-Track Chunk""" 173 | 174 | PORTS = 4 175 | CHANNELS_PER_PORT = 16 176 | TOTAL_CHANNELS = CHANNELS_PER_PORT * PORTS 177 | 178 | CHUNK_NUMBER_PORT_MAP = [0, 1, 2, 2, 3] 179 | 180 | events: list[PTrackEvent] 181 | 182 | @classmethod 183 | def from_generic(cls, generic: GenericChunk) -> Self: 184 | """From Generic Chunk 185 | 186 | Args: 187 | generic (GenericChunk): Generic Chunk 188 | 189 | Returns: 190 | Self: Instance of this class 191 | """ 192 | stream = BytesIO(generic.payload) 193 | events: list[PTrackEvent] = [] 194 | while True: 195 | message = PTrackEvent.read(stream) 196 | if message is None: 197 | # End of Track 198 | break 199 | events.append(message) 200 | return cls(generic.id, events) 201 | 202 | @staticmethod 203 | def __relocate_event( 204 | track_info_entry: PTrackInfoEntry | ExtendedPTrackInfoEntry | P3TrackInfoChunk, 205 | status_byte: int, 206 | data_bytes: bytes, 207 | time: int, 208 | group_channel: bool, 209 | ) -> list[PTrackAbsoluteTimeEvent]: 210 | status_type = status_byte & 0xF0 211 | 212 | if status_byte == 0xFE: 213 | # Compensation of Alternative CC 214 | status_byte = data_bytes[0] 215 | status_type = status_byte & 0xF0 216 | data_bytes = data_bytes[1:] 217 | 218 | relocated_events: list[PTrackAbsoluteTimeEvent] = [] 219 | 220 | if status_type == 0xF0: 221 | # System messages 222 | for port in range(PTrackChunk.PORTS): 223 | if (track_info_entry.system_ex_ports >> port) & 0x0001 != 0x0001: 224 | continue 225 | 226 | track = port * PTrackChunk.CHANNELS_PER_PORT 227 | relocated_events.append( 228 | PTrackAbsoluteTimeEvent( 229 | status_byte, 230 | data_bytes, 231 | port, 232 | track, 233 | time, 234 | ) 235 | ) 236 | return relocated_events 237 | 238 | channel = status_byte & 0x0F 239 | channel_info_entry = track_info_entry.channel_info[channel] 240 | 241 | default_channel_group = track_info_entry.default_channel_groups[channel] 242 | # Fill default channel group 243 | if default_channel_group == 0x0000: 244 | default_channel_group = 0x0001 << channel 245 | 246 | for port in range(PTrackChunk.PORTS): 247 | if (channel_info_entry.ports >> port) & 0x0001 != 0x0001: 248 | continue 249 | 250 | for grouped_channel in range(PTrackChunk.CHANNELS_PER_PORT): 251 | if group_channel: 252 | if ( 253 | track_info_entry.channel_groups[channel] >> grouped_channel 254 | ) & 0x0001 != 0x0001: 255 | continue 256 | else: 257 | if (default_channel_group >> grouped_channel) & 0x0001 != 0x0001: 258 | continue 259 | 260 | track = (port * PTrackChunk.CHANNELS_PER_PORT) + grouped_channel 261 | relocated_status_byte = status_type | grouped_channel 262 | relocated_events.append( 263 | PTrackAbsoluteTimeEvent( 264 | relocated_status_byte, 265 | data_bytes, 266 | port, 267 | track, 268 | time, 269 | ) 270 | ) 271 | 272 | return relocated_events 273 | 274 | def track_number(self) -> int: 275 | """Track Number 276 | 277 | Returns: 278 | int: Track Number 279 | """ 280 | return self.id[3] 281 | 282 | def exists_channel_message(self, channel: int) -> bool: 283 | """Check if there exists a message for a specific channel in the P-Track chunk 284 | 285 | Args: 286 | channel: Channel number 287 | 288 | Returns: 289 | bool: True if a message exists for the specified channel, False otherwise 290 | """ 291 | return any( 292 | (event.status_byte & 0xF0) != 0xF0 and (event.status_byte & 0x0F) == channel 293 | for event in self.events 294 | ) 295 | 296 | def _payload_buffer(self) -> bytes: 297 | stream = BytesIO() 298 | for message in self.events: 299 | message.write(stream) 300 | stream.seek(0) 301 | return stream.read() 302 | 303 | def to_json_serializable(self): 304 | json_events = [] 305 | for message in self.events: 306 | json_events.append( 307 | { 308 | "delta_time": message.delta_time, 309 | "status_byte": format(message.status_byte, "02X"), 310 | "data": message.data_bytes.hex(" ").upper(), 311 | "duration": message.duration, 312 | } 313 | ) 314 | return {"events": json_events} 315 | 316 | def absolute_time_track( 317 | self, 318 | track_info: PTrackInfoChunk | ExtendedPTrackInfoChunk | P3TrackInfoChunk, 319 | ) -> list[PTrackAbsoluteTimeEvent]: 320 | if isinstance(track_info, (PTrackInfoChunk, ExtendedPTrackInfoChunk)): 321 | track_info_list = track_info.data 322 | elif isinstance(track_info, P3TrackInfoChunk): 323 | track_info_list = [track_info] 324 | else: 325 | raise ValueError( 326 | "Argument `track_info` must be PTrackInfoChunk, ExtendedPTrackInfoChunk or P3TrackInfoChunk." 327 | ) 328 | 329 | absolute_time_track: list[PTrackAbsoluteTimeEvent] = [] 330 | track_info_entry = next( 331 | ( 332 | entry 333 | for entry in track_info_list 334 | if entry.track_number == self.track_number() 335 | ), 336 | None, 337 | ) 338 | if track_info_entry is None: 339 | raise ValueError(f"P-Track Info for track {self.track_number()} not found.") 340 | 341 | is_lossless_track = track_info_entry.is_lossless_track() 342 | 343 | absolute_time_track: list[PTrackAbsoluteTimeEvent] = [] 344 | absolute_time = 0 345 | channel_grouping_enabled = False 346 | for event in self.events: 347 | absolute_time += event.delta_time 348 | 349 | status_type = event.status_byte_type() 350 | if status_type == 0x80: 351 | duration = event.duration 352 | if duration is None: 353 | continue 354 | 355 | channel = event.channel() 356 | note_number = event.data_bytes[0] 357 | note_on_velocity = event.data_bytes[1] 358 | note_off_velocity = event.data_bytes[2] 359 | if not is_lossless_track: 360 | duration <<= 2 361 | # Note on 362 | absolute_time_track += PTrackChunk.__relocate_event( 363 | track_info_entry, 364 | 0x90 | channel, 365 | bytes([note_number, note_on_velocity]), 366 | absolute_time, 367 | channel_grouping_enabled, 368 | ) 369 | # Note off 370 | absolute_time_track += PTrackChunk.__relocate_event( 371 | track_info_entry, 372 | 0x80 | channel, 373 | bytes([note_number, note_off_velocity]), 374 | absolute_time + duration, 375 | channel_grouping_enabled, 376 | ) 377 | elif status_type == 0x90: 378 | duration = event.duration 379 | if duration is None: 380 | continue 381 | 382 | channel = event.channel() 383 | note_number = event.data_bytes[0] 384 | note_on_velocity = event.data_bytes[1] 385 | if not is_lossless_track: 386 | duration <<= 2 387 | # Note on 388 | absolute_time_track += PTrackChunk.__relocate_event( 389 | track_info_entry, 390 | event.status_byte, 391 | event.data_bytes, 392 | absolute_time, 393 | channel_grouping_enabled, 394 | ) 395 | # Note off 396 | absolute_time_track += PTrackChunk.__relocate_event( 397 | track_info_entry, 398 | 0x80 | channel, 399 | bytes([note_number, 0x40]), 400 | absolute_time + duration, 401 | channel_grouping_enabled, 402 | ) 403 | elif status_type == 0xA0: 404 | # CC: channel_info_entry.control_change_ax 405 | channel = event.channel() 406 | channel_info_entry = track_info_entry.channel_info[channel] 407 | absolute_time_track += PTrackChunk.__relocate_event( 408 | track_info_entry, 409 | 0xB0 | channel, 410 | bytes([channel_info_entry.control_change_ax, event.data_bytes[0]]), 411 | absolute_time, 412 | channel_grouping_enabled, 413 | ) 414 | elif status_type == 0xC0: 415 | # CC: channel_info_entry.control_change_cx 416 | channel = event.channel() 417 | channel_info_entry = track_info_entry.channel_info[channel] 418 | absolute_time_track += PTrackChunk.__relocate_event( 419 | track_info_entry, 420 | 0xB0 | channel, 421 | bytes([channel_info_entry.control_change_cx, event.data_bytes[0]]), 422 | absolute_time, 423 | channel_grouping_enabled, 424 | ) 425 | else: 426 | absolute_time_track += PTrackChunk.__relocate_event( 427 | track_info_entry, 428 | event.status_byte, 429 | event.data_bytes, 430 | absolute_time, 431 | channel_grouping_enabled, 432 | ) 433 | 434 | channel_grouping_enabled = event.status_byte == 0xFD 435 | 436 | absolute_time_track.sort( 437 | key=lambda absolute_time_event: absolute_time_event.time 438 | ) 439 | 440 | return absolute_time_track 441 | -------------------------------------------------------------------------------- /okd/chunks/p_track_info_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | from typing import BinaryIO, Self 4 | 5 | from .chunk_base import ChunkBase 6 | from .generic_chunk import GenericChunk 7 | 8 | 9 | @dataclass 10 | class PTrackInfoChannelInfoEntry: 11 | """P-Track Information Channel Information Entry""" 12 | 13 | attribute: int 14 | ports: int 15 | control_change_ax: int 16 | control_change_cx: int 17 | 18 | @classmethod 19 | def read(cls, stream: BinaryIO) -> Self: 20 | """Read 21 | 22 | Args: 23 | stream (BinaryIO): Input stream 24 | 25 | Returns: 26 | Self: Instance of this class 27 | """ 28 | buffer = stream.read(4) 29 | if len(buffer) < 4: 30 | raise ValueError("Too less read bytes.") 31 | 32 | attribute = buffer[0] 33 | ports = buffer[1] 34 | control_change_ax = buffer[2] 35 | control_change_cx = buffer[3] 36 | return cls(attribute, ports, control_change_ax, control_change_cx) 37 | 38 | def is_chorus(self) -> bool: 39 | """Is Chorus 40 | 41 | Returns: 42 | bool: True if Chorus, else False 43 | """ 44 | return self.attribute & 0x01 != 0x01 45 | 46 | def is_guide_melody(self) -> bool: 47 | """Is Guide Melody 48 | 49 | Returns: 50 | bool: True if Guide Melody, else False 51 | """ 52 | 53 | return self.attribute & 0x80 != 0x80 54 | 55 | def write(self, stream: BinaryIO) -> None: 56 | """Write 57 | 58 | Args: 59 | stream (BinaryIO): Output stream 60 | """ 61 | stream.write(self.attribute.to_bytes()) 62 | stream.write(self.ports.to_bytes()) 63 | stream.write(self.control_change_ax.to_bytes()) 64 | stream.write(self.control_change_cx.to_bytes()) 65 | 66 | 67 | @dataclass 68 | class PTrackInfoEntry: 69 | """P-Track Information Entry""" 70 | 71 | track_number: int 72 | track_status: int 73 | use_channel_group_flag: int 74 | default_channel_groups: list[int] 75 | channel_groups: list[int] 76 | channel_info: list[PTrackInfoChannelInfoEntry] 77 | system_ex_ports: int 78 | 79 | @classmethod 80 | def read(cls, stream: BinaryIO) -> Self: 81 | """Read 82 | 83 | Args: 84 | stream (BinaryIO): Input stream 85 | 86 | Returns: 87 | Self: Instance of this class 88 | """ 89 | buffer = stream.read(4) 90 | if len(buffer) < 4: 91 | raise ValueError("Too less read bytes.") 92 | 93 | track_number = buffer[0] 94 | track_status = buffer[1] 95 | use_channel_group_flag = int.from_bytes(buffer[2:4], "big") 96 | 97 | default_channel_groups: list[int] = [] 98 | for channel in range(16): 99 | if (use_channel_group_flag >> channel) & 0x0001 == 0x0001: 100 | buffer = stream.read(2) 101 | if len(buffer) < 2: 102 | raise ValueError("Too less read bytes.") 103 | 104 | default_channel_groups.append(int.from_bytes(buffer, "big")) 105 | else: 106 | default_channel_groups.append(0x0000) 107 | 108 | buffer = stream.read(32) 109 | if len(buffer) < 32: 110 | raise ValueError("Too less read bytes.") 111 | 112 | channel_groups: list[int] = [] 113 | for channel in range(16): 114 | offset = 2 * channel 115 | channel_groups.append(int.from_bytes(buffer[offset : offset + 2], "big")) 116 | 117 | channel_info: list[PTrackInfoChannelInfoEntry] = [] 118 | for channel in range(16): 119 | channel_info.append(PTrackInfoChannelInfoEntry.read(stream)) 120 | 121 | buffer = stream.read(2) 122 | if len(buffer) < 2: 123 | raise ValueError("Too less read bytes.") 124 | 125 | system_ex_ports = int.from_bytes(buffer[0:2], "little") 126 | 127 | return cls( 128 | track_number, 129 | track_status, 130 | use_channel_group_flag, 131 | default_channel_groups, 132 | channel_groups, 133 | channel_info, 134 | system_ex_ports, 135 | ) 136 | 137 | def is_lossless_track(self) -> bool: 138 | return self.track_status & 0x80 == 0x80 139 | 140 | def write(self, stream: BinaryIO) -> None: 141 | """Write 142 | 143 | Args: 144 | stream (BinaryIO): Output stream 145 | """ 146 | stream.write(self.track_number.to_bytes()) 147 | stream.write(self.track_status.to_bytes()) 148 | stream.write(self.use_channel_group_flag.to_bytes(2, "big")) 149 | for channel, default_channel_group in enumerate(self.default_channel_groups): 150 | if (self.use_channel_group_flag >> channel) & 0x0001 != 0x0001: 151 | continue 152 | stream.write(default_channel_group.to_bytes(2, "big")) 153 | for channel_group in self.channel_groups: 154 | stream.write(channel_group.to_bytes(2, "big")) 155 | for channel_info_entry in self.channel_info: 156 | channel_info_entry.write(stream) 157 | stream.write(self.system_ex_ports.to_bytes(2, "little")) 158 | 159 | 160 | @dataclass 161 | class PTrackInfoChunk(ChunkBase): 162 | """P-Track Information Chunk""" 163 | 164 | data: list[PTrackInfoEntry] 165 | 166 | @classmethod 167 | def from_generic(cls, generic: GenericChunk) -> Self: 168 | """From Generic Chunk 169 | 170 | Args: 171 | generic (GenericChunk): Generic Chunk 172 | 173 | Returns: 174 | Self: Instance of this class 175 | """ 176 | p_track_info: list[PTrackInfoEntry] = [] 177 | entry_count = int.from_bytes(generic.payload[0:2], "big") 178 | stream = BytesIO(generic.payload[2:]) 179 | for _ in range(entry_count): 180 | entry = PTrackInfoEntry.read(stream) 181 | p_track_info.append(entry) 182 | return cls(generic.id, p_track_info) 183 | 184 | def _payload_buffer(self) -> bytes: 185 | buffer = len(self.data).to_bytes(2, "big") 186 | 187 | stream = BytesIO() 188 | for entry in self.data: 189 | entry.write(stream) 190 | stream.seek(0) 191 | buffer += stream.read() 192 | 193 | return buffer 194 | -------------------------------------------------------------------------------- /okd/chunks/utils.py: -------------------------------------------------------------------------------- 1 | from typing import BinaryIO 2 | 3 | from .generic_chunk import GenericChunk 4 | from .p_track_info_chunk import ( 5 | PTrackInfoChannelInfoEntry, 6 | PTrackInfoEntry, 7 | PTrackInfoChunk, 8 | ) 9 | from .p3_track_info_chunk import P3TrackInfoChunk 10 | from .extended_p_track_info_chunk import ( 11 | ExtendedPTrackInfoChannelInfoEntry, 12 | ExtendedPTrackInfoEntry, 13 | ExtendedPTrackInfoChunk, 14 | ) 15 | from .m_track_chunk import MTrackChunk 16 | from .p_track_chunk import PTrackChunk 17 | from .adpcm_chunk import AdpcmChunk 18 | from .okd_chunk import OkdChunk 19 | 20 | 21 | def read_chunk(stream: BinaryIO) -> OkdChunk: 22 | """Read Chunk 23 | 24 | Args: 25 | stream (BufferedReader): Input stream 26 | 27 | Returns: 28 | OkdChunk: OKD Chunk 29 | """ 30 | generic = GenericChunk.read(stream) 31 | 32 | if generic.id == b"YPTI": 33 | return PTrackInfoChunk.from_generic(generic) 34 | elif generic.id == b"YP3I": 35 | return P3TrackInfoChunk.from_generic(generic) 36 | elif generic.id == b"YPXI": 37 | return ExtendedPTrackInfoChunk.from_generic(generic) 38 | elif generic.id[0:3] == b"\xffMR": 39 | return MTrackChunk.from_generic(generic) 40 | elif generic.id[0:3] == b"\xffPR": 41 | return PTrackChunk.from_generic(generic) 42 | elif generic.id == b"YADD": 43 | return AdpcmChunk.from_generic(generic) 44 | 45 | return generic 46 | 47 | 48 | def p_track_info_chunk_by_p_track_chunks( 49 | p_track_chunks: list[PTrackChunk], 50 | ) -> PTrackInfoChunk | ExtendedPTrackInfoChunk: 51 | if len(p_track_chunks) <= 2: 52 | p_track_info_entries_1: list[PTrackInfoEntry] = [] 53 | for p_track_chunk in p_track_chunks: 54 | ports = ( 55 | 0x0001 56 | << PTrackChunk.CHUNK_NUMBER_PORT_MAP[p_track_chunk.track_number()] 57 | ) 58 | sysex_ports = 4 if p_track_chunk.track_number() >= 2 else 1 59 | 60 | track_info_channel_info_entries_1: list[PTrackInfoChannelInfoEntry] = [] 61 | for channel in range(16): 62 | exists_message = p_track_chunk.exists_channel_message(channel) 63 | channel_attribute = ( 64 | 127 if p_track_chunk.track_number() == 1 and channel == 9 else 255 65 | ) 66 | track_info_channel_info_entries_1.append( 67 | PTrackInfoChannelInfoEntry( 68 | channel_attribute if exists_message else 0, 69 | ports, 70 | 0x00, 71 | 0x00, 72 | ) 73 | ) 74 | 75 | p_track_info_entries_1.append( 76 | PTrackInfoEntry( 77 | p_track_chunk.track_number(), 78 | 0x40, 79 | 0x0000, 80 | [0] * 16, 81 | [0] * 16, 82 | track_info_channel_info_entries_1, 83 | sysex_ports, 84 | ) 85 | ) 86 | 87 | return PTrackInfoChunk(b"YPTI", p_track_info_entries_1) 88 | 89 | else: 90 | p_track_info_entries_2: list[ExtendedPTrackInfoEntry] = [] 91 | for p_track_chunk in p_track_chunks: 92 | ports = ( 93 | 0x0001 94 | << PTrackChunk.CHUNK_NUMBER_PORT_MAP[p_track_chunk.track_number()] 95 | ) 96 | sysex_ports = 4 if p_track_chunk.track_number() >= 2 else 1 97 | 98 | track_info_channel_info_entries_2: list[ 99 | ExtendedPTrackInfoChannelInfoEntry 100 | ] = [] 101 | for channel in range(16): 102 | exists_message = p_track_chunk.exists_channel_message(channel) 103 | channel_attribute = ( 104 | 127 if p_track_chunk.track_number() == 1 and channel == 9 else 255 105 | ) 106 | track_info_channel_info_entries_2.append( 107 | ExtendedPTrackInfoChannelInfoEntry( 108 | channel_attribute if exists_message else 0, 109 | ports, 110 | 0x00, 111 | 0x00, 112 | 0x00, 113 | ) 114 | ) 115 | 116 | p_track_info_entries_2.append( 117 | ExtendedPTrackInfoEntry( 118 | p_track_chunk.track_number(), 119 | 0x40, 120 | 0x00, 121 | [0] * 16, 122 | [0] * 16, 123 | track_info_channel_info_entries_2, 124 | sysex_ports, 125 | 0x00, 126 | ) 127 | ) 128 | 129 | return ExtendedPTrackInfoChunk( 130 | b"YPXI", b"\x00\x00\x00\x00\x00\x00\x00\x00", 0, p_track_info_entries_2 131 | ) 132 | 133 | 134 | def p3_track_info_chunk_by_p_track_chunks( 135 | p_track_chunk: PTrackChunk, 136 | ) -> P3TrackInfoChunk: 137 | track_info_channel_info_entries: list[PTrackInfoChannelInfoEntry] = [] 138 | for channel in range(16): 139 | exists_message = p_track_chunk.exists_channel_message(channel) 140 | track_info_channel_info_entries.append( 141 | PTrackInfoChannelInfoEntry( 142 | 255 if exists_message else 0, 143 | 0x0004, 144 | 0x00, 145 | 0x00, 146 | ) 147 | ) 148 | 149 | return P3TrackInfoChunk( 150 | b"YP3I", 151 | 0x02, 152 | 0x40, 153 | 0x0000, 154 | [0] * 16, 155 | [0] * 16, 156 | track_info_channel_info_entries, 157 | 0x0004, 158 | ) 159 | -------------------------------------------------------------------------------- /okd/dump_binary.py: -------------------------------------------------------------------------------- 1 | """Dump binary with HEX""" 2 | 3 | import math 4 | 5 | 6 | def __dump_binary_line(address: int, chunk: bytes, chunk_size: int): 7 | BYTES_PER_SEP = " " 8 | 9 | line = "0x" 10 | line += format(address, "08X") 11 | line += " " 12 | line += chunk.hex(BYTES_PER_SEP).upper() 13 | padding = chunk_size - len(chunk) 14 | line += " " * padding 15 | line += " " 16 | for byte in chunk: 17 | if byte < 0x20 or 0x7E < byte: 18 | # Control character 19 | line += "." 20 | continue 21 | line += chr(byte) 22 | line += "\n" 23 | return line 24 | 25 | 26 | def dump_binary(data: bytes, chunk_size=16) -> str: 27 | """Dump binary with HEX 28 | 29 | Args: 30 | data (bytes): Data 31 | chunk_size (int, optional): Chunk size. Defaults to 16. 32 | 33 | Returns: 34 | str: Binary dumped string 35 | """ 36 | output = "" 37 | 38 | chunk_count = math.floor(len(data) / chunk_size) 39 | fraction_length = len(data) % chunk_size 40 | 41 | for i in range(chunk_count): 42 | address = chunk_size * i 43 | chunk: bytes = data[address : address + chunk_size] 44 | output += __dump_binary_line(address, chunk, chunk_size) 45 | 46 | if fraction_length == 0: 47 | return output[:-1] 48 | 49 | address = chunk_size * chunk_count 50 | fraction: bytes = data[address : address + fraction_length] 51 | output += __dump_binary_line(address, fraction, chunk_size) 52 | 53 | return output[:-1] 54 | -------------------------------------------------------------------------------- /okd/m_track_conversion.py: -------------------------------------------------------------------------------- 1 | import math 2 | import mido 3 | 4 | from .chunks import MTrackEvent, MTrackAbsoluteTimeEvent, MTrackChunk 5 | from midi.time_converter import MidiTimeConverter 6 | from midi.utils import ( 7 | get_track_by_port_channel, 8 | get_first_and_last_note_times, 9 | get_time_signatures, 10 | ) 11 | 12 | MIDI_M_TRACK_PORT = 16 13 | 14 | 15 | def __midi_to_absolute_time_track(midi: mido.MidiFile) -> list[MTrackAbsoluteTimeEvent]: 16 | midi_time_converter = MidiTimeConverter() 17 | midi_time_converter.load_from_midi(midi) 18 | 19 | melody_track = get_track_by_port_channel(midi.tracks, 1, 8) 20 | if melody_track is None: 21 | raise ValueError("Melody track not found.") 22 | 23 | melody_notes: list[tuple[int, int]] = [] 24 | current_melody_note_start = -1 25 | current_melody_node_number = -1 26 | track_time = 0 27 | for midi_message in melody_track: 28 | track_time += midi_message.time 29 | absolute_time = midi_time_converter.ticks_to_ms(track_time) 30 | 31 | if not isinstance(midi_message, mido.Message): 32 | continue 33 | 34 | if midi_message.type == "note_on": # type: ignore 35 | current_melody_note_start = absolute_time 36 | current_melody_node_number = midi_message.note # type: ignore 37 | elif ( 38 | midi_message.type == "note_off" # type: ignore 39 | and midi_message.note == current_melody_node_number # type: ignore 40 | ): 41 | melody_notes.append((current_melody_note_start, absolute_time)) 42 | 43 | if len(melody_notes) < 1: 44 | raise ValueError("Melody note not found.") 45 | 46 | m_track = get_track_by_port_channel(midi.tracks, MIDI_M_TRACK_PORT, 0) 47 | 48 | hooks: list[tuple[int, int]] = [] 49 | 50 | two_chorus_fadeout_time = -1 51 | 52 | if m_track is not None: 53 | current_hook_start = -1 54 | track_time = 0 55 | for midi_message in midi.tracks[1]: 56 | track_time += midi_message.time 57 | absolute_time = midi_time_converter.ticks_to_ms(track_time) 58 | 59 | if not isinstance(midi_message, mido.Message): 60 | continue 61 | 62 | if midi_message.type == "note_on": # type: ignore 63 | if midi_message.note == 48: # type: ignore 64 | current_hook_start = absolute_time 65 | elif midi_message.note == 72: # type: ignore 66 | two_chorus_fadeout_time = absolute_time 67 | elif midi_message.type == "note_off": # type: ignore 68 | if midi_message.note == 48: # type: ignore 69 | hooks.append((current_hook_start, absolute_time)) 70 | 71 | first_note_on_tick, last_note_off_tick = get_first_and_last_note_times(midi.tracks) 72 | first_note_on_time = midi_time_converter.ticks_to_ms(first_note_on_tick) 73 | last_note_off_time = midi_time_converter.ticks_to_ms(last_note_off_tick) 74 | 75 | absolute_time_track: list[MTrackAbsoluteTimeEvent] = [] 76 | 77 | time_signatures = get_time_signatures(midi.tracks) 78 | 79 | visible_guide_melody_delimiters: list[tuple[int, int]] = [] 80 | for tick, numerator, denominator in time_signatures: 81 | absolute_time_track.append( 82 | MTrackAbsoluteTimeEvent( 83 | 0xFF, 84 | bytes(bytearray([0x00, numerator, int(math.log2(denominator)), 0xFE])), 85 | midi_time_converter.ticks_to_ms(tick), 86 | ) 87 | ) 88 | 89 | melody_notes_copy = melody_notes.copy() 90 | current_page_start = -1 91 | while True: 92 | melody_note: tuple[int, int] 93 | try: 94 | melody_note = melody_notes_copy.pop(0) 95 | except IndexError: 96 | break 97 | melody_note_start, melody_note_end = melody_note 98 | 99 | if current_page_start == -1: 100 | current_page_start = melody_note_start 101 | visible_guide_melody_delimiters.append((melody_note_start, 0)) 102 | continue 103 | 104 | next_melody_note: tuple[int, int] 105 | try: 106 | next_melody_note = melody_notes_copy[0] 107 | except IndexError: 108 | visible_guide_melody_delimiters.append((melody_note_end + 1, 2)) 109 | break 110 | next_melody_note_start, next_melody_note_end = next_melody_note 111 | 112 | page_length = melody_note_end - current_page_start 113 | if 7000 < page_length: 114 | void_length = next_melody_note_start - melody_note_end 115 | if 7000 < void_length: 116 | melody_notes_copy.pop(0) 117 | visible_guide_melody_delimiters.append((melody_note_end + 1, 1)) 118 | current_page_start = -1 119 | else: 120 | visible_guide_melody_delimiters.append((next_melody_note_start, 3)) 121 | current_page_start = next_melody_note_start 122 | 123 | if len(time_signatures) > 0: 124 | current_beat_time = 0 125 | current_beat_count = time_signatures[0][1] 126 | while current_beat_time < last_note_off_time + 1: 127 | time_signature_time = current_beat_time 128 | time_signature = next( 129 | ( 130 | time_signature 131 | for time_signature in reversed(time_signatures) 132 | if time_signature[0] <= time_signature_time 133 | ), 134 | None, 135 | ) 136 | if time_signature is None: 137 | raise ValueError("Time signature not found.") 138 | 139 | if current_beat_count < time_signature[1]: 140 | absolute_time_track.append( 141 | MTrackAbsoluteTimeEvent( 142 | 0xF2, b"", midi_time_converter.ticks_to_ms(current_beat_time) 143 | ) 144 | ) 145 | current_beat_count += 1 146 | else: 147 | absolute_time_track.append( 148 | MTrackAbsoluteTimeEvent( 149 | 0xF1, b"", midi_time_converter.ticks_to_ms(current_beat_time) 150 | ) 151 | ) 152 | current_beat_count = 1 153 | 154 | current_beat_time += midi.ticks_per_beat 155 | 156 | absolute_time_track.append( 157 | MTrackAbsoluteTimeEvent( 158 | 0xF6, b"\x00", midi_time_converter.ticks_to_ms(first_note_on_time) 159 | ) 160 | ) 161 | absolute_time_track.append( 162 | MTrackAbsoluteTimeEvent( 163 | 0xF6, b"\x01", midi_time_converter.ticks_to_ms(last_note_off_time) 164 | ) 165 | ) 166 | 167 | for hook_start, hook_end in hooks[:-1]: 168 | absolute_time_track.append( 169 | MTrackAbsoluteTimeEvent( 170 | 0xF3, b"\x00", midi_time_converter.ticks_to_ms(hook_start) 171 | ) 172 | ) 173 | absolute_time_track.append( 174 | MTrackAbsoluteTimeEvent( 175 | 0xF3, b"\x01", midi_time_converter.ticks_to_ms(hook_end) 176 | ) 177 | ) 178 | 179 | if len(hooks) > 0: 180 | last_hook_start, last_hook_end = hooks[-1] 181 | absolute_time_track.append( 182 | MTrackAbsoluteTimeEvent( 183 | 0xF3, b"\x02", midi_time_converter.ticks_to_ms(last_hook_start) 184 | ) 185 | ) 186 | absolute_time_track.append( 187 | MTrackAbsoluteTimeEvent( 188 | 0xF3, b"\x03", midi_time_converter.ticks_to_ms(last_hook_end) 189 | ) 190 | ) 191 | 192 | for ( 193 | visible_guide_melody_delimiter_time, 194 | visible_guide_melody_delimiter_type, 195 | ) in visible_guide_melody_delimiters: 196 | absolute_time_track.append( 197 | MTrackAbsoluteTimeEvent( 198 | 0xF4, 199 | visible_guide_melody_delimiter_type.to_bytes(), 200 | midi_time_converter.ticks_to_ms(visible_guide_melody_delimiter_time), 201 | ) 202 | ) 203 | 204 | if two_chorus_fadeout_time != -1: 205 | absolute_time_track.append( 206 | MTrackAbsoluteTimeEvent( 207 | 0xF5, b"", midi_time_converter.ticks_to_ms(two_chorus_fadeout_time) 208 | ) 209 | ) 210 | 211 | absolute_time_track.sort(key=lambda absolute_time_event: absolute_time_event.time) 212 | 213 | return absolute_time_track 214 | 215 | 216 | def midi_to_m_track( 217 | midi: mido.MidiFile, 218 | ) -> MTrackChunk: 219 | absolute_time_track = __midi_to_absolute_time_track(midi) 220 | events: list[MTrackEvent] = [] 221 | current_time = 0 222 | for event in absolute_time_track: 223 | delta_time = event.time - current_time 224 | events.append(MTrackEvent(event.status_byte, event.data_bytes, delta_time)) 225 | current_time = event.time 226 | # End of Track 227 | events.append(MTrackEvent(0x00, b"\x00\x00\x00", 0)) 228 | return MTrackChunk(b"\xffMR\x00", events) 229 | -------------------------------------------------------------------------------- /okd/mmt_tg/__init__.py: -------------------------------------------------------------------------------- 1 | from .midi_parameter_change_table import System, MultiEffect, MultiPartEntry 2 | from .mmt_tg import MmtTg 3 | -------------------------------------------------------------------------------- /okd/mmt_tg/midi_parameter_change_table.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, asdict 2 | from typing import Union 3 | 4 | import mido 5 | 6 | 7 | @dataclass 8 | class System: 9 | master_tune: int 10 | master_volume: int 11 | transpose: int 12 | master_pan: int 13 | master_cutoff: int 14 | master_pitch_modulation_depth: int 15 | variation_effect_send_control_change_number: int 16 | 17 | @classmethod 18 | def from_memory(cls, memory: list[int]): 19 | master_tune = ( 20 | ((memory[0x000000] & 0x0F) << 12) 21 | | ((memory[0x000001] & 0x0F) << 8) 22 | | ((memory[0x000002] & 0x0F) << 4) 23 | | (memory[0x000003] & 0x0F) 24 | ) 25 | master_volume = memory[0x000004] 26 | transpose = memory[0x000005] 27 | master_pan = memory[0x000006] 28 | master_cutoff = memory[0x000007] 29 | master_pitch_modulation_depth = memory[0x000008] 30 | variation_effect_send_control_change_number = memory[0x000009] 31 | 32 | return cls( 33 | master_tune, 34 | master_volume, 35 | transpose, 36 | master_pan, 37 | master_cutoff, 38 | master_pitch_modulation_depth, 39 | variation_effect_send_control_change_number, 40 | ) 41 | 42 | 43 | @dataclass 44 | class MultiEffect: 45 | chorus_type: int 46 | variation_type: int 47 | pre_variation_type: int 48 | pre_reverb_type: int 49 | reverb_input: int 50 | chorus_input: int 51 | variation_input: int 52 | dry_level: int 53 | reverb_return: int 54 | chorus_return: int 55 | variation_return: int 56 | send_variation_to_chorus: int 57 | send_variation_to_reverb: int 58 | send_chorus_to_reverb: int 59 | 60 | chorus_param_1: int 61 | chorus_param_2: int 62 | chorus_param_3: int 63 | chorus_param_4: int 64 | chorus_param_5: int 65 | chorus_param_6: int 66 | chorus_param_7: int 67 | chorus_param_8: int 68 | chorus_param_9: int 69 | chorus_param_10: int 70 | 71 | variation_param_1_msb: int 72 | variation_param_1_lsb: int 73 | variation_param_2_msb: int 74 | variation_param_2_lsb: int 75 | variation_param_3_msb: int 76 | variation_param_3_lsb: int 77 | variation_param_4_msb: int 78 | variation_param_4_lsb: int 79 | variation_param_5_msb: int 80 | variation_param_5_lsb: int 81 | variation_param_6: int 82 | variation_param_7: int 83 | variation_param_8: int 84 | variation_param_9: int 85 | variation_param_10: int 86 | 87 | pre_variation_param_1: int 88 | pre_variation_param_2: int 89 | pre_variation_param_3: int 90 | pre_variation_param_4: int 91 | pre_variation_param_5: int 92 | pre_variation_param_6: int 93 | pre_variation_param_7: int 94 | pre_variation_param_8: int 95 | 96 | pre_reverb_param_1: int 97 | pre_reverb_param_2: int 98 | pre_reverb_param_3: int 99 | pre_reverb_param_4: int 100 | pre_reverb_param_5: int 101 | pre_reverb_param_6: int 102 | pre_reverb_param_7: int 103 | pre_reverb_param_8: int 104 | pre_reverb_param_9: int 105 | 106 | reverb_param_1: int 107 | reverb_param_2: int 108 | reverb_param_3: int 109 | reverb_param_4: int 110 | reverb_param_5: int 111 | reverb_param_6: int 112 | reverb_param_7: int 113 | reverb_param_8: int 114 | reverb_param_9: int 115 | reverb_param_10: int 116 | 117 | 118 | @dataclass 119 | class MultiPartEntry: 120 | PART_NUMBER_TO_ENTRY_INDEX_TABLE = [ 121 | 0x01, 122 | 0x02, 123 | 0x03, 124 | 0x04, 125 | 0x05, 126 | 0x06, 127 | 0x07, 128 | 0x08, 129 | 0x09, 130 | 0x00, 131 | 0x0A, 132 | 0x0B, 133 | 0x0C, 134 | 0x0D, 135 | 0x0E, 136 | 0x0F, 137 | 0x11, 138 | 0x12, 139 | 0x13, 140 | 0x14, 141 | 0x15, 142 | 0x16, 143 | 0x17, 144 | 0x18, 145 | 0x19, 146 | 0x10, 147 | 0x1A, 148 | 0x1B, 149 | 0x1C, 150 | 0x1D, 151 | 0x1E, 152 | 0x1F, 153 | ] 154 | 155 | ENTRY_INDEX_TO_PART_NUMBER_TABLE = [ 156 | 0x09, 157 | 0x00, 158 | 0x01, 159 | 0x02, 160 | 0x03, 161 | 0x04, 162 | 0x05, 163 | 0x06, 164 | 0x07, 165 | 0x08, 166 | 0x0A, 167 | 0x0B, 168 | 0x0C, 169 | 0x0D, 170 | 0x0E, 171 | 0x0F, 172 | 0x19, 173 | 0x10, 174 | 0x11, 175 | 0x12, 176 | 0x13, 177 | 0x14, 178 | 0x15, 179 | 0x16, 180 | 0x17, 181 | 0x18, 182 | 0x1A, 183 | 0x1B, 184 | 0x1C, 185 | 0x1D, 186 | 0x1E, 187 | 0x1F, 188 | ] 189 | 190 | bank_select_msb: int 191 | bank_select_lsb: int 192 | program_number: int 193 | rcv_channel: int 194 | rcv_pitch_bend: int 195 | rcv_ch_after_touch: int 196 | rcv_program_change: int 197 | rcv_control_change: int 198 | rcv_poly_after_touch: int 199 | rcv_note_message: int 200 | rcv_rpn: int 201 | rcv_nrpn: int 202 | rcv_modulation: int 203 | rcv_volume: int 204 | rcv_pan: int 205 | rcv_expression: int 206 | rcv_hold_1: int 207 | rcv_portamento: int 208 | rcv_sostenuto: int 209 | rcv_soft_pedal: int 210 | 211 | mono_poly_mode: int 212 | same_note_number_key_on_assign: int 213 | part_mode: int 214 | note_shift: int 215 | detune: int 216 | volume: int 217 | velocity_sense_depth: int 218 | velocity_sense_offset: int 219 | pan: int 220 | note_limit_low: int 221 | note_limit_high: int 222 | ac_1_controller_number: int 223 | ac_2_controller_number: int 224 | dry_level: int 225 | chorus_send: int 226 | reverb_send: int 227 | variation_send: int 228 | 229 | vibrato_rate: int 230 | vibrato_depth: int 231 | filter_cutoff_frequency: int 232 | filter_resonance: int 233 | eg_attack_time: int 234 | eg_decay_time: int 235 | eg_release_time: int 236 | vibrato_delay: int 237 | 238 | scale_tuning_c: int 239 | scale_tuning_c_sharp: int 240 | scale_tuning_d: int 241 | scale_tuning_d_sharp: int 242 | scale_tuning_e: int 243 | scale_tuning_f: int 244 | scale_tuning_f_sharp: int 245 | scale_tuning_g: int 246 | scale_tuning_g_sharp: int 247 | scale_tuning_a: int 248 | scale_tuning_a_sharp: int 249 | scale_tuning_b: int 250 | 251 | mw_pitch_control: int 252 | mw_filter_control: int 253 | mw_amplitude_control: int 254 | mw_lfo_pmod_depth: int 255 | mw_lfo_fmod_depth: int 256 | 257 | bend_pitch_control: int 258 | bend_filter_control: int 259 | bend_amplitude_control: int 260 | bend_lfo_pmod_depth: int 261 | bend_lfo_fmod_depth: int 262 | 263 | cat_pitch_control: int 264 | cat_filter_control: int 265 | cat_amplitude_control: int 266 | cat_lfo_pmod_depth: int 267 | cat_lfo_fmod_depth: int 268 | 269 | pat_pitch_control: int 270 | pat_filter_control: int 271 | pat_amplitude_control: int 272 | pat_lfo_pmod_depth: int 273 | pat_lfo_fmod_depth: int 274 | 275 | ac_1_pitch_control: int 276 | ac_1_filter_control: int 277 | ac_1_amplitude_control: int 278 | ac_1_lfo_pmod_depth: int 279 | ac_1_lfo_fmod_depth: int 280 | 281 | ac_2_pitch_control: int 282 | ac_2_filter_control: int 283 | ac_2_amplitude_control: int 284 | ac_2_lfo_pmod_depth: int 285 | ac_2_lfo_fmod_depth: int 286 | 287 | portamento_switch: int 288 | portamento_time: int 289 | 290 | @classmethod 291 | def from_memory(cls, memory: list[int], part_number: int): 292 | entry_index = cls.PART_NUMBER_TO_ENTRY_INDEX_TABLE[part_number] 293 | entry_address = 0x008000 + (entry_index << 7) 294 | 295 | bank_select_msb = memory[entry_address + 0x01] 296 | bank_select_lsb = memory[entry_address + 0x02] 297 | program_number = memory[entry_address + 0x03] 298 | rcv_channel = memory[entry_address + 0x04] 299 | rcv_pitch_bend = memory[entry_address + 0x05] 300 | rcv_ch_after_touch = memory[entry_address + 0x06] 301 | rcv_program_change = memory[entry_address + 0x07] 302 | rcv_control_change = memory[entry_address + 0x08] 303 | rcv_poly_after_touch = memory[entry_address + 0x09] 304 | rcv_note_message = memory[entry_address + 0x0A] 305 | rcv_rpn = memory[entry_address + 0x0B] 306 | rcv_nrpn = memory[entry_address + 0x0C] 307 | rcv_modulation = memory[entry_address + 0x0D] 308 | rcv_volume = memory[entry_address + 0x0E] 309 | rcv_pan = memory[entry_address + 0x0F] 310 | rcv_expression = memory[entry_address + 0x10] 311 | rcv_hold_1 = memory[entry_address + 0x11] 312 | rcv_portamento = memory[entry_address + 0x12] 313 | rcv_sostenuto = memory[entry_address + 0x13] 314 | rcv_soft_pedal = memory[entry_address + 0x14] 315 | 316 | mono_poly_mode = memory[entry_address + 0x15] 317 | same_note_number_key_on_assign = memory[entry_address + 0x16] 318 | part_mode = memory[entry_address + 0x17] 319 | note_shift = memory[entry_address + 0x18] 320 | detune = ((memory[entry_address + 0x19] & 0x0F) << 4) | ( 321 | memory[entry_address + 0x1A] & 0x0F 322 | ) 323 | volume = memory[entry_address + 0x1B] 324 | velocity_sense_depth = memory[entry_address + 0x1C] 325 | velocity_sense_offset = memory[entry_address + 0x1D] 326 | pan = memory[entry_address + 0x1E] 327 | note_limit_low = memory[entry_address + 0x1F] 328 | note_limit_high = memory[entry_address + 0x20] 329 | ac_1_controller_number = memory[entry_address + 0x21] 330 | ac_2_controller_number = memory[entry_address + 0x22] 331 | dry_level = memory[entry_address + 0x23] 332 | chorus_send = memory[entry_address + 0x24] 333 | reverb_send = memory[entry_address + 0x25] 334 | variation_send = memory[entry_address + 0x26] 335 | 336 | vibrato_rate = memory[entry_address + 0x27] 337 | vibrato_depth = memory[entry_address + 0x28] 338 | filter_cutoff_frequency = memory[entry_address + 0x29] 339 | filter_resonance = memory[entry_address + 0x2A] 340 | eg_attack_time = memory[entry_address + 0x2B] 341 | eg_decay_time = memory[entry_address + 0x2C] 342 | eg_release_time = memory[entry_address + 0x2D] 343 | vibrato_delay = memory[entry_address + 0x2E] 344 | 345 | scale_tuning_c = memory[entry_address + 0x2F] 346 | scale_tuning_c_sharp = memory[entry_address + 0x30] 347 | scale_tuning_d = memory[entry_address + 0x31] 348 | scale_tuning_d_sharp = memory[entry_address + 0x32] 349 | scale_tuning_e = memory[entry_address + 0x33] 350 | scale_tuning_f = memory[entry_address + 0x34] 351 | scale_tuning_f_sharp = memory[entry_address + 0x35] 352 | scale_tuning_g = memory[entry_address + 0x36] 353 | scale_tuning_g_sharp = memory[entry_address + 0x37] 354 | scale_tuning_a = memory[entry_address + 0x38] 355 | scale_tuning_a_sharp = memory[entry_address + 0x39] 356 | scale_tuning_b = memory[entry_address + 0x3A] 357 | 358 | mw_pitch_control = memory[entry_address + 0x3B] 359 | mw_filter_control = memory[entry_address + 0x3C] 360 | mw_amplitude_control = memory[entry_address + 0x3D] 361 | mw_lfo_pmod_depth = memory[entry_address + 0x3E] 362 | mw_lfo_fmod_depth = memory[entry_address + 0x3F] 363 | 364 | bend_pitch_control = memory[entry_address + 0x41] 365 | bend_filter_control = memory[entry_address + 0x42] 366 | bend_amplitude_control = memory[entry_address + 0x43] 367 | bend_lfo_pmod_depth = memory[entry_address + 0x44] 368 | bend_lfo_fmod_depth = memory[entry_address + 0x45] 369 | 370 | cat_pitch_control = memory[entry_address + 0x47] 371 | cat_filter_control = memory[entry_address + 0x48] 372 | cat_amplitude_control = memory[entry_address + 0x49] 373 | cat_lfo_pmod_depth = memory[entry_address + 0x4A] 374 | cat_lfo_fmod_depth = memory[entry_address + 0x4B] 375 | 376 | pat_pitch_control = memory[entry_address + 0x4D] 377 | pat_filter_control = memory[entry_address + 0x4E] 378 | pat_amplitude_control = memory[entry_address + 0x4F] 379 | pat_lfo_pmod_depth = memory[entry_address + 0x50] 380 | pat_lfo_fmod_depth = memory[entry_address + 0x51] 381 | 382 | ac_1_pitch_control = memory[entry_address + 0x53] 383 | ac_1_filter_control = memory[entry_address + 0x54] 384 | ac_1_amplitude_control = memory[entry_address + 0x55] 385 | ac_1_lfo_pmod_depth = memory[entry_address + 0x56] 386 | ac_1_lfo_fmod_depth = memory[entry_address + 0x57] 387 | 388 | ac_2_pitch_control = memory[entry_address + 0x59] 389 | ac_2_filter_control = memory[entry_address + 0x5A] 390 | ac_2_amplitude_control = memory[entry_address + 0x5B] 391 | ac_2_lfo_pmod_depth = memory[entry_address + 0x5C] 392 | ac_2_lfo_fmod_depth = memory[entry_address + 0x5D] 393 | 394 | portamento_switch = memory[entry_address + 0x5F] 395 | portamento_time = memory[entry_address + 0x60] 396 | 397 | return cls( 398 | bank_select_msb, 399 | bank_select_lsb, 400 | program_number, 401 | rcv_channel, 402 | rcv_pitch_bend, 403 | rcv_ch_after_touch, 404 | rcv_program_change, 405 | rcv_control_change, 406 | rcv_poly_after_touch, 407 | rcv_note_message, 408 | rcv_rpn, 409 | rcv_nrpn, 410 | rcv_modulation, 411 | rcv_volume, 412 | rcv_pan, 413 | rcv_expression, 414 | rcv_hold_1, 415 | rcv_portamento, 416 | rcv_sostenuto, 417 | rcv_soft_pedal, 418 | mono_poly_mode, 419 | same_note_number_key_on_assign, 420 | part_mode, 421 | note_shift, 422 | detune, 423 | volume, 424 | velocity_sense_depth, 425 | velocity_sense_offset, 426 | pan, 427 | note_limit_low, 428 | note_limit_high, 429 | ac_1_controller_number, 430 | ac_2_controller_number, 431 | dry_level, 432 | chorus_send, 433 | reverb_send, 434 | variation_send, 435 | vibrato_rate, 436 | vibrato_depth, 437 | filter_cutoff_frequency, 438 | filter_resonance, 439 | eg_attack_time, 440 | eg_decay_time, 441 | eg_release_time, 442 | vibrato_delay, 443 | scale_tuning_c, 444 | scale_tuning_c_sharp, 445 | scale_tuning_d, 446 | scale_tuning_d_sharp, 447 | scale_tuning_e, 448 | scale_tuning_f, 449 | scale_tuning_f_sharp, 450 | scale_tuning_g, 451 | scale_tuning_g_sharp, 452 | scale_tuning_a, 453 | scale_tuning_a_sharp, 454 | scale_tuning_b, 455 | mw_pitch_control, 456 | mw_filter_control, 457 | mw_amplitude_control, 458 | mw_lfo_pmod_depth, 459 | mw_lfo_fmod_depth, 460 | bend_pitch_control, 461 | bend_filter_control, 462 | bend_amplitude_control, 463 | bend_lfo_pmod_depth, 464 | bend_lfo_fmod_depth, 465 | cat_pitch_control, 466 | cat_filter_control, 467 | cat_amplitude_control, 468 | cat_lfo_pmod_depth, 469 | cat_lfo_fmod_depth, 470 | pat_pitch_control, 471 | pat_filter_control, 472 | pat_amplitude_control, 473 | pat_lfo_pmod_depth, 474 | pat_lfo_fmod_depth, 475 | ac_1_pitch_control, 476 | ac_1_filter_control, 477 | ac_1_amplitude_control, 478 | ac_1_lfo_pmod_depth, 479 | ac_1_lfo_fmod_depth, 480 | ac_2_pitch_control, 481 | ac_2_filter_control, 482 | ac_2_amplitude_control, 483 | ac_2_lfo_pmod_depth, 484 | ac_2_lfo_fmod_depth, 485 | portamento_switch, 486 | portamento_time, 487 | ) 488 | 489 | @staticmethod 490 | def to_mido_messages( 491 | partial: Union["MultiPartEntry", dict[str, int]], 492 | part_number: int, 493 | delta_time: int, 494 | ) -> list[mido.Message]: 495 | if isinstance(partial, MultiPartEntry): 496 | partial = asdict(partial) 497 | 498 | mido_messages: list[mido.Message] = [] 499 | for key, value in partial.items(): 500 | if key == "bank_select_msb": 501 | mido_messages.append( 502 | mido.Message( 503 | "control_change", 504 | channel=part_number, 505 | control=0x00, 506 | value=value, 507 | time=delta_time, 508 | ) 509 | ) 510 | elif key == "bank_select_lsb": 511 | mido_messages.append( 512 | mido.Message( 513 | "control_change", 514 | channel=part_number, 515 | control=0x20, 516 | value=value, 517 | time=delta_time, 518 | ) 519 | ) 520 | elif key == "program_number": 521 | mido_messages.append( 522 | mido.Message( 523 | "program_change", 524 | channel=part_number, 525 | program=value, 526 | time=delta_time, 527 | ) 528 | ) 529 | elif key == "volume": 530 | mido_messages.append( 531 | mido.Message( 532 | "control_change", 533 | channel=part_number, 534 | control=0x07, 535 | value=value, 536 | time=delta_time, 537 | ) 538 | ) 539 | elif key == "pan": 540 | mido_messages.append( 541 | mido.Message( 542 | "control_change", 543 | channel=part_number, 544 | control=0x0A, 545 | value=value, 546 | time=delta_time, 547 | ) 548 | ) 549 | elif key == "chorus_send": 550 | mido_messages.append( 551 | mido.Message( 552 | "control_change", 553 | channel=part_number, 554 | control=0x5D, 555 | value=value, 556 | time=delta_time, 557 | ) 558 | ) 559 | elif key == "reverb_send": 560 | mido_messages.append( 561 | mido.Message( 562 | "control_change", 563 | channel=part_number, 564 | control=0x5B, 565 | value=value, 566 | time=delta_time, 567 | ) 568 | ) 569 | elif key == "variation_send": 570 | mido_messages.append( 571 | mido.Message( 572 | "control_change", 573 | channel=part_number, 574 | control=0x5E, 575 | value=value, 576 | time=delta_time, 577 | ) 578 | ) 579 | elif key == "vibrato_rate": 580 | mido_messages.append( 581 | mido.Message( 582 | "control_change", 583 | channel=part_number, 584 | control=0x4C, 585 | value=value, 586 | time=delta_time, 587 | ) 588 | ) 589 | elif key == "vibrato_depth": 590 | mido_messages.append( 591 | mido.Message( 592 | "control_change", 593 | channel=part_number, 594 | control=0x4D, 595 | value=value, 596 | time=delta_time, 597 | ) 598 | ) 599 | elif key == "vibrato_delay": 600 | mido_messages.append( 601 | mido.Message( 602 | "control_change", 603 | channel=part_number, 604 | control=0x4E, 605 | value=value, 606 | time=delta_time, 607 | ) 608 | ) 609 | elif key == "bend_pitch_control": 610 | mido_messages += [ 611 | mido.Message( 612 | "control_change", 613 | channel=part_number, 614 | control=0x65, 615 | value=0x00, 616 | time=delta_time, 617 | ), 618 | mido.Message( 619 | "control_change", 620 | channel=part_number, 621 | control=0x64, 622 | value=0x00, 623 | ), 624 | mido.Message( 625 | "control_change", 626 | channel=part_number, 627 | control=0x06, 628 | value=value - 0x40, 629 | ), 630 | ] 631 | elif key == "sysex_portamento_switch": 632 | mido_messages.append( 633 | mido.Message( 634 | "control_change", 635 | channel=part_number, 636 | control=0x41, 637 | value=0x00 if value == 0x00 else 0x7F, 638 | time=delta_time, 639 | ) 640 | ) 641 | elif key == "sysex_portamento_time": 642 | mido_messages.append( 643 | mido.Message( 644 | "control_change", 645 | channel=part_number, 646 | control=0x05, 647 | value=value, 648 | time=delta_time, 649 | ) 650 | ) 651 | return mido_messages 652 | -------------------------------------------------------------------------------- /okd/mmt_tg/mmt_tg.py: -------------------------------------------------------------------------------- 1 | from logging import getLogger 2 | 3 | from midi.event import MidiEvent 4 | 5 | from .midi_parameter_change_table import System, MultiPartEntry 6 | 7 | 8 | class MmtTg: 9 | """YAMAHA MMT TG MIDI Device""" 10 | 11 | PARTS_PER_PORT = 16 12 | PORTS = 2 13 | PARTS = PARTS_PER_PORT * PORTS 14 | 15 | sound_module_mode: int 16 | native_parameter_memory: list[int] 17 | 18 | @staticmethod 19 | def __is_sysex_message(event: MidiEvent) -> bool: 20 | if len(event.data_bytes) < 2: 21 | return False 22 | if event.status_byte != 0xF0: 23 | return False 24 | end_mark = event.data_bytes[-1] 25 | if end_mark != 0xF7: 26 | return False 27 | return True 28 | 29 | @staticmethod 30 | def __is_universal_realtime_message(event: MidiEvent) -> bool: 31 | if not MmtTg.__is_sysex_message(event): 32 | return False 33 | if len(event.data_bytes) < 7: 34 | return False 35 | manufacture_id = event.data_bytes[0] 36 | if manufacture_id != 0x7F: 37 | return False 38 | return True 39 | 40 | @staticmethod 41 | def __is_universal_non_realtime_message(event: MidiEvent) -> bool: 42 | if not MmtTg.__is_sysex_message(event): 43 | return False 44 | if len(event.data_bytes) < 5: 45 | return False 46 | manufacture_id = event.data_bytes[0] 47 | if manufacture_id != 0x7E: 48 | return False 49 | return True 50 | 51 | @staticmethod 52 | def __is_native_parameter_change_message(event: MidiEvent) -> bool: 53 | if not MmtTg.__is_sysex_message(event): 54 | return False 55 | if len(event.data_bytes) < 9: 56 | return False 57 | manufacture_id = event.data_bytes[0] 58 | if manufacture_id != 0x43: 59 | return False 60 | return True 61 | 62 | @staticmethod 63 | def effecting_multi_part_number(event: MidiEvent): 64 | if not MmtTg.__is_native_parameter_change_message(event): 65 | return 66 | if event.data_bytes[3] != 0x02: 67 | return 68 | return MultiPartEntry.ENTRY_INDEX_TO_PART_NUMBER_TABLE[event.data_bytes[4]] 69 | 70 | def __init__(self) -> None: 71 | self.__logger = getLogger(__name__) 72 | 73 | self.initialize_state() 74 | 75 | def initialize_state(self) -> None: 76 | self.sound_module_mode = 0x00 77 | self.native_parameter_memory = [0x00] * 0x200000 78 | 79 | # Set default value 80 | for entry_index in range(0x20): 81 | entry_address = 0x008000 + (entry_index << 7) 82 | 83 | self.native_parameter_memory[entry_address + 0x01] = 0x00 84 | self.native_parameter_memory[entry_address + 0x02] = 0x00 85 | self.native_parameter_memory[entry_address + 0x03] = 0x00 86 | self.native_parameter_memory[entry_address + 0x04] = entry_index 87 | self.native_parameter_memory[entry_address + 0x05] = 0x01 88 | self.native_parameter_memory[entry_address + 0x06] = 0x01 89 | self.native_parameter_memory[entry_address + 0x07] = 0x01 90 | self.native_parameter_memory[entry_address + 0x08] = 0x01 91 | self.native_parameter_memory[entry_address + 0x09] = 0x01 92 | self.native_parameter_memory[entry_address + 0x0A] = 0x01 93 | self.native_parameter_memory[entry_address + 0x0B] = 0x01 94 | self.native_parameter_memory[entry_address + 0x0C] = 0x01 95 | self.native_parameter_memory[entry_address + 0x0D] = 0x01 96 | self.native_parameter_memory[entry_address + 0x0E] = 0x01 97 | self.native_parameter_memory[entry_address + 0x0F] = 0x01 98 | self.native_parameter_memory[entry_address + 0x10] = 0x01 99 | self.native_parameter_memory[entry_address + 0x11] = 0x01 100 | self.native_parameter_memory[entry_address + 0x12] = 0x01 101 | self.native_parameter_memory[entry_address + 0x13] = 0x01 102 | self.native_parameter_memory[entry_address + 0x14] = 0x01 103 | 104 | self.native_parameter_memory[entry_address + 0x15] = 0x01 105 | self.native_parameter_memory[entry_address + 0x16] = 0x01 106 | self.native_parameter_memory[entry_address + 0x17] = 0x01 107 | self.native_parameter_memory[entry_address + 0x18] = 0x01 108 | self.native_parameter_memory[entry_address + 0x19] = 0x08 109 | self.native_parameter_memory[entry_address + 0x1A] = 0x00 110 | self.native_parameter_memory[entry_address + 0x1B] = 0x64 111 | self.native_parameter_memory[entry_address + 0x1C] = 0x40 112 | self.native_parameter_memory[entry_address + 0x1D] = 0x40 113 | self.native_parameter_memory[entry_address + 0x1E] = 0x40 114 | self.native_parameter_memory[entry_address + 0x1F] = 0x00 115 | self.native_parameter_memory[entry_address + 0x20] = 0x7F 116 | self.native_parameter_memory[entry_address + 0x21] = 0x10 117 | self.native_parameter_memory[entry_address + 0x22] = 0x11 118 | self.native_parameter_memory[entry_address + 0x23] = 0x7F 119 | self.native_parameter_memory[entry_address + 0x24] = 0x00 120 | self.native_parameter_memory[entry_address + 0x25] = 0x40 121 | self.native_parameter_memory[entry_address + 0x26] = 0x00 122 | 123 | self.native_parameter_memory[entry_address + 0x27] = 0x40 124 | self.native_parameter_memory[entry_address + 0x28] = 0x40 125 | self.native_parameter_memory[entry_address + 0x29] = 0x40 126 | self.native_parameter_memory[entry_address + 0x2A] = 0x40 127 | self.native_parameter_memory[entry_address + 0x2B] = 0x40 128 | self.native_parameter_memory[entry_address + 0x2C] = 0x40 129 | self.native_parameter_memory[entry_address + 0x2D] = 0x40 130 | self.native_parameter_memory[entry_address + 0x2E] = 0x40 131 | 132 | self.native_parameter_memory[entry_address + 0x2F] = 0x40 133 | self.native_parameter_memory[entry_address + 0x30] = 0x40 134 | self.native_parameter_memory[entry_address + 0x31] = 0x40 135 | self.native_parameter_memory[entry_address + 0x32] = 0x40 136 | self.native_parameter_memory[entry_address + 0x33] = 0x40 137 | self.native_parameter_memory[entry_address + 0x34] = 0x40 138 | self.native_parameter_memory[entry_address + 0x35] = 0x40 139 | self.native_parameter_memory[entry_address + 0x36] = 0x40 140 | self.native_parameter_memory[entry_address + 0x37] = 0x40 141 | self.native_parameter_memory[entry_address + 0x38] = 0x40 142 | self.native_parameter_memory[entry_address + 0x39] = 0x40 143 | self.native_parameter_memory[entry_address + 0x3A] = 0x40 144 | 145 | self.native_parameter_memory[entry_address + 0x3B] = 0x40 146 | self.native_parameter_memory[entry_address + 0x3C] = 0x40 147 | self.native_parameter_memory[entry_address + 0x3D] = 0x40 148 | self.native_parameter_memory[entry_address + 0x3E] = 0x0A 149 | self.native_parameter_memory[entry_address + 0x3F] = 0x00 150 | 151 | self.native_parameter_memory[entry_address + 0x41] = 0x42 152 | self.native_parameter_memory[entry_address + 0x42] = 0x40 153 | self.native_parameter_memory[entry_address + 0x43] = 0x40 154 | self.native_parameter_memory[entry_address + 0x44] = 0x00 155 | self.native_parameter_memory[entry_address + 0x45] = 0x00 156 | 157 | self.native_parameter_memory[entry_address + 0x47] = 0x40 158 | self.native_parameter_memory[entry_address + 0x48] = 0x40 159 | self.native_parameter_memory[entry_address + 0x49] = 0x40 160 | self.native_parameter_memory[entry_address + 0x4A] = 0x00 161 | self.native_parameter_memory[entry_address + 0x4B] = 0x00 162 | 163 | self.native_parameter_memory[entry_address + 0x4D] = 0x40 164 | self.native_parameter_memory[entry_address + 0x4E] = 0x40 165 | self.native_parameter_memory[entry_address + 0x4F] = 0x40 166 | self.native_parameter_memory[entry_address + 0x50] = 0x00 167 | self.native_parameter_memory[entry_address + 0x51] = 0x00 168 | 169 | self.native_parameter_memory[entry_address + 0x53] = 0x40 170 | self.native_parameter_memory[entry_address + 0x54] = 0x40 171 | self.native_parameter_memory[entry_address + 0x55] = 0x40 172 | self.native_parameter_memory[entry_address + 0x56] = 0x00 173 | self.native_parameter_memory[entry_address + 0x57] = 0x00 174 | 175 | self.native_parameter_memory[entry_address + 0x59] = 0x40 176 | self.native_parameter_memory[entry_address + 0x5A] = 0x40 177 | self.native_parameter_memory[entry_address + 0x5B] = 0x40 178 | self.native_parameter_memory[entry_address + 0x5C] = 0x00 179 | self.native_parameter_memory[entry_address + 0x5D] = 0x00 180 | 181 | self.native_parameter_memory[entry_address + 0x5F] = 0x00 182 | self.native_parameter_memory[entry_address + 0x60] = 0x00 183 | 184 | def __receive_universal_realtime_message(self, event: MidiEvent) -> None: 185 | if event.status_byte != 0xF0: 186 | raise ValueError( 187 | f"Invalid status_byte. status_byte={hex(event.status_byte)}" 188 | ) 189 | manufacture_id = event.data_bytes[0] 190 | if manufacture_id != 0x7F: 191 | raise ValueError( 192 | f"Invalid manufacture_id. manufacture_id={hex(manufacture_id)}" 193 | ) 194 | target_device_id = event.data_bytes[1] 195 | sub_id_1 = event.data_bytes[2] 196 | if sub_id_1 != 0x04: 197 | self.__logger.warning( 198 | f"Unknown sub_id_1 detected. sub_id_1={hex(sub_id_1)}" 199 | ) 200 | 201 | sub_id_2 = event.data_bytes[3] 202 | if sub_id_2 == 0x01: 203 | # Master Volume 204 | volume_lsb = event.data_bytes[4] 205 | volume_msb = event.data_bytes[5] 206 | # MASTER VOLUME 207 | self.native_parameter_memory[0x000004] = volume_msb 208 | elif sub_id_2 == 0x02: 209 | # Master Balance 210 | balance_lsb = event.data_bytes[4] 211 | balance_msb = event.data_bytes[5] 212 | # MASTER PAN 213 | self.native_parameter_memory[0x000006] = balance_msb 214 | else: 215 | self.__logger.warning( 216 | f"Unknown sub_id_2 detected. sub_id_2={hex(sub_id_2)}" 217 | ) 218 | 219 | def __receive_universal_non_realtime_message(self, event: MidiEvent) -> None: 220 | if event.status_byte != 0xF0: 221 | raise ValueError( 222 | f"Invalid status_byte. status_byte={hex(event.status_byte)}" 223 | ) 224 | manufacture_id = event.data_bytes[0] 225 | if manufacture_id != 0x7E: 226 | raise ValueError( 227 | f"Invalid manufacture_id. manufacture_id={hex(manufacture_id)}" 228 | ) 229 | target_device_id = event.data_bytes[1] 230 | sub_id_1 = event.data_bytes[2] 231 | if sub_id_1 != 0x09: 232 | self.__logger.warning( 233 | f"Unknown sub_id_1 detected. sub_id_1={hex(sub_id_1)}" 234 | ) 235 | 236 | sub_id_2 = event.data_bytes[3] 237 | if sub_id_2 == 0x01: 238 | self.sound_module_mode = event.data_bytes[4] 239 | else: 240 | self.__logger.warning( 241 | f"Unknown sub_id_2 detected. sub_id_2={hex(sub_id_2)}" 242 | ) 243 | 244 | def __receive_native_parameter_change_message(self, event: MidiEvent) -> None: 245 | if event.status_byte != 0xF0: 246 | raise ValueError( 247 | f"Invalid status_byte. status_byte={hex(event.status_byte)}" 248 | ) 249 | manufacture_id = event.data_bytes[0] 250 | if manufacture_id != 0x43: 251 | raise ValueError( 252 | f"Invalid manufacture_id. manufacture_id={hex(manufacture_id)}" 253 | ) 254 | device_number_byte = event.data_bytes[1] 255 | if device_number_byte & 0xF0 != 0x10: 256 | raise ValueError( 257 | f"Invalid device_number_byte detected. device_number_byte={hex(device_number_byte)}" 258 | ) 259 | device_number = device_number_byte & 0x0F 260 | model_id = event.data_bytes[2] 261 | 262 | address = ( 263 | event.data_bytes[3] << 14 | event.data_bytes[4] << 7 | event.data_bytes[5] 264 | ) 265 | data_length = len(event.data_bytes) - 8 266 | data = event.data_bytes[6 : 6 + data_length] 267 | check_sum = event.data_bytes[-2] 268 | 269 | if address == 0x00007F: 270 | # All Parameters Reset 271 | self.initialize_state() 272 | return 273 | self.native_parameter_memory[address : address + data_length] = data 274 | 275 | def receive_sysex_message(self, event: MidiEvent) -> None: 276 | if len(event.data_bytes) < 1: 277 | raise ValueError("Invalid event.data legnth.") 278 | 279 | if event.status_byte != 0xF0: 280 | raise ValueError( 281 | f"Invalid status_byte. status_byte={hex(event.status_byte)}" 282 | ) 283 | end_mark = event.data_bytes[-1] 284 | if end_mark != 0xF7: 285 | raise ValueError(f"Invalid end_mark. end_mark={hex(end_mark)}") 286 | 287 | manufacture_id = event.data_bytes[0] 288 | if manufacture_id == 0x7F: 289 | self.__receive_universal_realtime_message(event) 290 | elif manufacture_id == 0x7E: 291 | self.__receive_universal_non_realtime_message(event) 292 | elif manufacture_id == 0x43: 293 | return self.__receive_native_parameter_change_message(event) 294 | else: 295 | self.__logger.warning( 296 | f"Unknown manufacture_id detected. manufacture_id={hex(manufacture_id)}" 297 | ) 298 | 299 | def system(self) -> System: 300 | return System.from_memory(self.native_parameter_memory) 301 | 302 | def multi_part_entry(self, part_number: int) -> MultiPartEntry: 303 | return MultiPartEntry.from_memory(self.native_parameter_memory, part_number) 304 | 305 | def multi_part_entries(self) -> list[MultiPartEntry]: 306 | return [ 307 | self.multi_part_entry(part_number) for part_number in range(MmtTg.PARTS) 308 | ] 309 | -------------------------------------------------------------------------------- /okd/oka_file.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | from typing import BinaryIO, Self 4 | 5 | from .okd_file_scramble import descramble 6 | 7 | 8 | @dataclass 9 | class OkaHeader: 10 | """OKA Header""" 11 | 12 | MAGIC_BYTES = b"YOKA" 13 | FIXED_PART_LENGTH = 40 14 | 15 | magic_bytes: bytes 16 | length: int 17 | version: str 18 | id_karaoke: int 19 | data_offset: int 20 | unknown_0: int 21 | crc: int 22 | 23 | @classmethod 24 | def read( 25 | cls, 26 | stream: BinaryIO, 27 | scramble_pattern_index: int | None = None, 28 | ) -> Self: 29 | """Read 30 | 31 | Args: 32 | stream (BinaryIO): Input stream 33 | scramble_pattern_index (int): Scramble pattern index 34 | 35 | Raises: 36 | ValueError: Invalid `magic_bytes` 37 | 38 | Returns: 39 | Self: Instance of this class 40 | """ 41 | if scramble_pattern_index is None: 42 | buffer = stream.read(OkaHeader.FIXED_PART_LENGTH) 43 | else: 44 | header_stream = BytesIO() 45 | scramble_pattern_index = descramble( 46 | stream, 47 | header_stream, 48 | scramble_pattern_index, 49 | OkaHeader.FIXED_PART_LENGTH, 50 | ) 51 | header_stream.seek(0) 52 | buffer = header_stream.read() 53 | if len(buffer) < OkaHeader.FIXED_PART_LENGTH: 54 | raise ValueError("Too less read bytes.") 55 | 56 | magic_bytes = buffer[0:4] 57 | if magic_bytes != OkaHeader.MAGIC_BYTES: 58 | raise ValueError("Invalid `magic_bytes`.") 59 | length = int.from_bytes(buffer[4:8], "big") 60 | version = buffer[8:24].decode("ascii") 61 | id_karaoke = int.from_bytes(buffer[24:28], "big") 62 | data_offset = int.from_bytes(buffer[28:32], "big") 63 | unknown_0 = int.from_bytes(buffer[32:36], "big") 64 | crc = int.from_bytes(buffer[36:40], "big") 65 | return cls( 66 | magic_bytes, length, version, id_karaoke, data_offset, unknown_0, crc 67 | ) 68 | 69 | def write(self, stream: BinaryIO) -> None: 70 | """Write 71 | 72 | Args: 73 | stream (BinaryIO): Output stream 74 | """ 75 | stream.write(OkaHeader.MAGIC_BYTES) 76 | stream.write(self.length.to_bytes(4, "big")) 77 | stream.write(self.version.encode("ascii").ljust(16, b"\x00")) 78 | stream.write(self.id_karaoke.to_bytes(4, "big")) 79 | stream.write(self.data_offset.to_bytes(4, "big")) 80 | stream.write(self.unknown_0.to_bytes(4, "big")) 81 | stream.write(self.crc.to_bytes(4, "big")) 82 | -------------------------------------------------------------------------------- /okd/okd_file.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from dataclasses import dataclass 3 | from io import BytesIO 4 | from logging import getLogger 5 | from typing import Self, Union, BinaryIO 6 | 7 | from sprc.header import SprcHeader 8 | from .chunks import OkdChunk, ChunkBase, read_chunk 9 | from .okd_file_scramble import ( 10 | choose_scramble_pattern_index, 11 | scramble, 12 | detect_scramble_pattern_index, 13 | descramble, 14 | ) 15 | 16 | 17 | @dataclass 18 | class OkdHeaderBase(ABC): 19 | """OKD Header Base Class""" 20 | 21 | MAGIC_BYTES = b"YKS1" 22 | FIXED_PART_LENGTH = 40 23 | 24 | length: int 25 | version: str 26 | id_karaoke: int 27 | adpcm_offset: int 28 | encryption_mode: int 29 | 30 | @staticmethod 31 | def _read_common( 32 | stream: BinaryIO, 33 | scramble_pattern_index: int | None = None, 34 | ) -> tuple[int, str, int, int, int, bytes]: 35 | """Read Common Part 36 | 37 | Args: 38 | stream (BinaryIO): Input stream 39 | scramble_pattern_index (int): Scramble pattern index 40 | 41 | Raises: 42 | ValueError: Invalid `magic_bytes` 43 | 44 | Returns: 45 | tuple[int, str, int, int, int, bytes]: length, version, id_karaoke, adpcm_offset, encryption_mode and optional_data 46 | """ 47 | if scramble_pattern_index is None: 48 | fixed_part_buffer = stream.read(OkdHeaderBase.FIXED_PART_LENGTH) 49 | else: 50 | fixed_part_stream = BytesIO() 51 | scramble_pattern_index = descramble( 52 | stream, 53 | fixed_part_stream, 54 | scramble_pattern_index, 55 | OkdHeaderBase.FIXED_PART_LENGTH, 56 | ) 57 | fixed_part_stream.seek(0) 58 | fixed_part_buffer = fixed_part_stream.read() 59 | if len(fixed_part_buffer) < OkdHeaderBase.FIXED_PART_LENGTH: 60 | raise ValueError("Too less read bytes.") 61 | 62 | magic_bytes = fixed_part_buffer[0:4] 63 | if magic_bytes != OkdHeaderBase.MAGIC_BYTES: 64 | raise ValueError("Invalid `magic_bytes`.") 65 | length = int.from_bytes(fixed_part_buffer[4:8], "big") 66 | version = fixed_part_buffer[8:24].decode("ascii") 67 | id_karaoke = int.from_bytes(fixed_part_buffer[24:28], "big") 68 | adpcm_offset = int.from_bytes(fixed_part_buffer[28:32], "big") 69 | encryption_mode = int.from_bytes(fixed_part_buffer[32:36], "big") 70 | optional_data_length = int.from_bytes(fixed_part_buffer[36:40], "big") 71 | 72 | if scramble_pattern_index is None: 73 | variable_part_buffer = stream.read(optional_data_length) 74 | else: 75 | variable_part_stream = BytesIO() 76 | descramble( 77 | stream, 78 | variable_part_stream, 79 | scramble_pattern_index, 80 | optional_data_length, 81 | ) 82 | variable_part_stream.seek(0) 83 | variable_part_buffer = variable_part_stream.read() 84 | if len(variable_part_buffer) < optional_data_length: 85 | raise ValueError("Too less read bytes.") 86 | 87 | optional_data = variable_part_buffer 88 | 89 | return ( 90 | length, 91 | version, 92 | id_karaoke, 93 | adpcm_offset, 94 | encryption_mode, 95 | optional_data, 96 | ) 97 | 98 | @abstractmethod 99 | @staticmethod 100 | def optional_data_buffer_size() -> int: 101 | """Size of Optional Data Buffer 102 | 103 | Returns: 104 | bytes: Size of Optional Data Buffer 105 | """ 106 | pass 107 | 108 | @abstractmethod 109 | def _optional_data_buffer(self) -> bytes: 110 | """Optional Data Buffer 111 | 112 | Returns: 113 | bytes: Optional Data Buffer 114 | """ 115 | pass 116 | 117 | def write(self, stream: BinaryIO) -> None: 118 | """Write 119 | 120 | Args: 121 | stream (BinaryIO): Output stream 122 | """ 123 | stream.write(OkdHeaderBase.MAGIC_BYTES) 124 | stream.write(self.length.to_bytes(4, "big")) 125 | stream.write(self.version.encode("ascii").ljust(16, b"\x00")) 126 | stream.write(self.id_karaoke.to_bytes(4, "big")) 127 | stream.write(self.adpcm_offset.to_bytes(4, "big")) 128 | stream.write(self.encryption_mode.to_bytes(4, "big")) 129 | optional_data_buffer = self._optional_data_buffer() 130 | stream.write(len(optional_data_buffer).to_bytes(4, "big")) 131 | stream.write(optional_data_buffer) 132 | 133 | 134 | @dataclass 135 | class OkdGenericHeader(OkdHeaderBase): 136 | """OKD Generic Header""" 137 | 138 | optional_data: bytes 139 | 140 | @classmethod 141 | def read( 142 | cls, 143 | stream: BinaryIO, 144 | scramble_pattern_index: int | None = None, 145 | ) -> Self: 146 | """Read 147 | 148 | Args: 149 | stream (BinaryIO): Input stream 150 | scramble_pattern_index (int): Scramble pattern index 151 | 152 | Returns: 153 | Self: Instance of this class 154 | """ 155 | length, version, id_karaoke, adpcm_offset, encryption_mode, optional_data = ( 156 | OkdHeaderBase._read_common(stream, scramble_pattern_index) 157 | ) 158 | return cls( 159 | length, 160 | version, 161 | id_karaoke, 162 | adpcm_offset, 163 | encryption_mode, 164 | optional_data, 165 | ) 166 | 167 | @staticmethod 168 | def optional_data_buffer_size() -> int: 169 | raise NotImplementedError() 170 | 171 | def _optional_data_buffer(self) -> bytes: 172 | return self.optional_data 173 | 174 | 175 | @dataclass 176 | class YksOkdHeader(OkdHeaderBase): 177 | """YKS OKD Header""" 178 | 179 | @classmethod 180 | def from_generic(cls, generic: OkdGenericHeader) -> Self: 181 | """From Generic OKD Header 182 | 183 | Args: 184 | generic (OkdGenericHeader): Generic OKD Header 185 | 186 | Returns: 187 | Self: Instance of this class 188 | """ 189 | return cls( 190 | generic.length, 191 | generic.version, 192 | generic.id_karaoke, 193 | generic.adpcm_offset, 194 | generic.encryption_mode, 195 | ) 196 | 197 | @staticmethod 198 | def optional_data_buffer_size() -> int: 199 | return 0 200 | 201 | def _optional_data_buffer(self) -> bytes: 202 | return b"" 203 | 204 | 205 | @dataclass 206 | class MmtOkdHeader(OkdHeaderBase): 207 | """MMT OKD Header""" 208 | 209 | yks_chunks_length: int 210 | mmt_chunks_length: int 211 | yks_chunks_crc: int 212 | crc: int 213 | 214 | @classmethod 215 | def from_generic(cls, generic: OkdGenericHeader) -> Self: 216 | """From Generic OKD Header 217 | 218 | Args: 219 | generic (OkdGenericHeader): Generic OKD Header 220 | 221 | Returns: 222 | Self: Instance of this class 223 | """ 224 | yks_chunks_length = int.from_bytes(generic.optional_data[0:4], "big") 225 | mmt_chunks_length = int.from_bytes(generic.optional_data[4:8], "big") 226 | yks_chunks_crc = int.from_bytes(generic.optional_data[8:10], "big") 227 | crc = int.from_bytes(generic.optional_data[10:12], "big") 228 | return cls( 229 | generic.length, 230 | generic.version, 231 | generic.id_karaoke, 232 | generic.adpcm_offset, 233 | generic.encryption_mode, 234 | yks_chunks_length, 235 | mmt_chunks_length, 236 | yks_chunks_crc, 237 | crc, 238 | ) 239 | 240 | @staticmethod 241 | def optional_data_buffer_size() -> int: 242 | return 12 243 | 244 | def _optional_data_buffer(self) -> bytes: 245 | buffer = self.yks_chunks_length.to_bytes(4, "big") 246 | buffer += self.mmt_chunks_length.to_bytes(4, "big") 247 | buffer += self.yks_chunks_crc.to_bytes(2, "big") 248 | buffer += self.crc.to_bytes(2, "big") 249 | return buffer 250 | 251 | 252 | @dataclass 253 | class MmkOkdHeader(OkdHeaderBase): 254 | """MMK OKD Header""" 255 | 256 | yks_chunks_length: int 257 | mmt_chunks_length: int 258 | mmk_chunks_length: int 259 | yks_chunks_crc: int 260 | yks_mmt_chunks_crc: int 261 | crc: int 262 | 263 | @classmethod 264 | def from_generic(cls, generic: OkdGenericHeader) -> Self: 265 | """From Generic OKD Header 266 | 267 | Args: 268 | generic (OkdGenericHeader): Generic OKD Header 269 | 270 | Returns: 271 | Self: Instance of this class 272 | """ 273 | yks_chunks_length = int.from_bytes(generic.optional_data[0:4], "big") 274 | mmt_chunks_length = int.from_bytes(generic.optional_data[4:8], "big") 275 | mmk_chunks_length = int.from_bytes(generic.optional_data[8:12], "big") 276 | yks_chunks_crc = int.from_bytes(generic.optional_data[12:14], "big") 277 | yks_mmt_chunks_crc = int.from_bytes(generic.optional_data[14:16], "big") 278 | crc = int.from_bytes(generic.optional_data[16:18], "big") 279 | return cls( 280 | generic.length, 281 | generic.version, 282 | generic.id_karaoke, 283 | generic.adpcm_offset, 284 | generic.encryption_mode, 285 | yks_chunks_length, 286 | mmt_chunks_length, 287 | mmk_chunks_length, 288 | yks_chunks_crc, 289 | yks_mmt_chunks_crc, 290 | crc, 291 | ) 292 | 293 | @staticmethod 294 | def optional_data_buffer_size() -> int: 295 | return 20 296 | 297 | def _optional_data_buffer(self) -> bytes: 298 | buffer = self.yks_chunks_length.to_bytes(4, "big") 299 | buffer += self.mmt_chunks_length.to_bytes(4, "big") 300 | buffer += self.mmk_chunks_length.to_bytes(4, "big") 301 | buffer += self.yks_chunks_crc.to_bytes(2, "big") 302 | buffer += self.yks_mmt_chunks_crc.to_bytes(2, "big") 303 | buffer += self.crc.to_bytes(2, "big") 304 | # Padding 305 | buffer += b"\x00" * 2 306 | return buffer 307 | 308 | 309 | @dataclass 310 | class SprOkdHeader(OkdHeaderBase): 311 | """SPR OKD Header""" 312 | 313 | yks_chunks_length: int 314 | mmt_chunks_length: int 315 | mmk_chunks_length: int 316 | spr_chunks_length: int 317 | yks_chunks_crc: int 318 | yks_mmt_chunks_crc: int 319 | yks_mmt_mmk_chunks_crc: int 320 | crc: int 321 | 322 | @classmethod 323 | def from_generic(cls, generic: OkdGenericHeader) -> Self: 324 | """From Generic OKD Header 325 | 326 | Args: 327 | generic (OkdGenericHeader): Generic OKD Header 328 | 329 | Returns: 330 | Self: Instance of this class 331 | """ 332 | yks_chunks_length = int.from_bytes(generic.optional_data[0:4], "big") 333 | mmt_chunks_length = int.from_bytes(generic.optional_data[4:8], "big") 334 | mmk_chunks_length = int.from_bytes(generic.optional_data[8:12], "big") 335 | spr_chunks_length = int.from_bytes(generic.optional_data[12:16], "big") 336 | yks_chunks_crc = int.from_bytes(generic.optional_data[16:18], "big") 337 | yks_mmt_chunks_crc = int.from_bytes(generic.optional_data[18:20], "big") 338 | yks_mmt_mmk_chunks_crc = int.from_bytes(generic.optional_data[20:22], "big") 339 | crc = int.from_bytes(generic.optional_data[22:24], "big") 340 | 341 | return cls( 342 | generic.length, 343 | generic.version, 344 | generic.id_karaoke, 345 | generic.adpcm_offset, 346 | generic.encryption_mode, 347 | yks_chunks_length, 348 | mmt_chunks_length, 349 | mmk_chunks_length, 350 | spr_chunks_length, 351 | yks_chunks_crc, 352 | yks_mmt_chunks_crc, 353 | yks_mmt_mmk_chunks_crc, 354 | crc, 355 | ) 356 | 357 | @staticmethod 358 | def optional_data_buffer_size() -> int: 359 | return 24 360 | 361 | def _optional_data_buffer(self) -> bytes: 362 | buffer = self.yks_chunks_length.to_bytes(4, "big") 363 | buffer += self.mmt_chunks_length.to_bytes(4, "big") 364 | buffer += self.mmk_chunks_length.to_bytes(4, "big") 365 | buffer += self.spr_chunks_length.to_bytes(4, "big") 366 | buffer += self.yks_chunks_crc.to_bytes(2, "big") 367 | buffer += self.yks_mmt_chunks_crc.to_bytes(2, "big") 368 | buffer += self.yks_mmt_mmk_chunks_crc.to_bytes(2, "big") 369 | buffer += self.crc.to_bytes(2, "big") 370 | return buffer 371 | 372 | 373 | @dataclass 374 | class DioOkdHeader(OkdHeaderBase): 375 | """DIO OKD Header""" 376 | 377 | yks_chunks_length: int 378 | mmt_chunks_length: int 379 | mmk_chunks_length: int 380 | spr_chunks_length: int 381 | dio_chunks_length: int 382 | yks_chunks_crc: int 383 | yks_mmt_chunks_crc: int 384 | yks_mmt_mmk_chunks_crc: int 385 | yks_mmt_mmk_spr_chunks_crc: int 386 | crc: int 387 | 388 | @classmethod 389 | def from_generic(cls, generic: OkdGenericHeader) -> Self: 390 | """From Generic OKD Header 391 | 392 | Args: 393 | generic (OkdGenericHeader): Generic OKD Header 394 | 395 | Returns: 396 | Self: Instance of this class 397 | """ 398 | yks_chunks_length = int.from_bytes(generic.optional_data[0:4], "big") 399 | mmt_chunks_length = int.from_bytes(generic.optional_data[4:8], "big") 400 | mmk_chunks_length = int.from_bytes(generic.optional_data[8:12], "big") 401 | spr_chunks_length = int.from_bytes(generic.optional_data[12:16], "big") 402 | dio_chunks_length = int.from_bytes(generic.optional_data[16:20], "big") 403 | yks_chunks_crc = int.from_bytes(generic.optional_data[20:22], "big") 404 | yks_mmt_chunks_crc = int.from_bytes(generic.optional_data[22:24], "big") 405 | yks_mmt_mmk_chunks_crc = int.from_bytes(generic.optional_data[24:26], "big") 406 | yks_mmt_mmk_spr_chunks_crc = int.from_bytes(generic.optional_data[26:28], "big") 407 | crc = int.from_bytes(generic.optional_data[28:30], "big") 408 | return cls( 409 | generic.length, 410 | generic.version, 411 | generic.id_karaoke, 412 | generic.adpcm_offset, 413 | generic.encryption_mode, 414 | yks_chunks_length, 415 | mmt_chunks_length, 416 | mmk_chunks_length, 417 | spr_chunks_length, 418 | dio_chunks_length, 419 | yks_chunks_crc, 420 | yks_mmt_chunks_crc, 421 | yks_mmt_mmk_chunks_crc, 422 | yks_mmt_mmk_spr_chunks_crc, 423 | crc, 424 | ) 425 | 426 | @staticmethod 427 | def optional_data_buffer_size() -> int: 428 | return 32 429 | 430 | def _optional_data_buffer(self) -> bytes: 431 | buffer = self.yks_chunks_length.to_bytes(4, "big") 432 | buffer += self.mmt_chunks_length.to_bytes(4, "big") 433 | buffer += self.mmk_chunks_length.to_bytes(4, "big") 434 | buffer += self.spr_chunks_length.to_bytes(4, "big") 435 | buffer += self.dio_chunks_length.to_bytes(4, "big") 436 | buffer += self.yks_chunks_crc.to_bytes(2, "big") 437 | buffer += self.yks_mmt_chunks_crc.to_bytes(2, "big") 438 | buffer += self.yks_mmt_mmk_chunks_crc.to_bytes(2, "big") 439 | buffer += self.yks_mmt_mmk_spr_chunks_crc.to_bytes(2, "big") 440 | buffer += self.crc.to_bytes(2, "big") 441 | # Padding 442 | buffer += b"\x00" * 2 443 | return buffer 444 | 445 | 446 | OkdHeader = Union[OkdGenericHeader, YksOkdHeader, MmtOkdHeader, MmkOkdHeader, SprOkdHeader, DioOkdHeader] 447 | 448 | 449 | def read_okd_header( 450 | stream: BinaryIO, scramble_pattern_index: int | None = None 451 | ) -> OkdHeader: 452 | """Read OKD Header 453 | 454 | Args: 455 | stream (BinaryIO): Input stream 456 | scramble_pattern_index (int | None, optional): Scramble pattern index. Defaults to None. 457 | 458 | Returns: 459 | OkdHeader: OKD Header 460 | """ 461 | generic = OkdGenericHeader.read(stream, scramble_pattern_index) 462 | 463 | if len(generic.optional_data) == YksOkdHeader.optional_data_buffer_size(): 464 | return YksOkdHeader.from_generic(generic) 465 | elif len(generic.optional_data) == MmtOkdHeader.optional_data_buffer_size(): 466 | return MmtOkdHeader.from_generic(generic) 467 | elif len(generic.optional_data) == MmkOkdHeader.optional_data_buffer_size(): 468 | return MmkOkdHeader.from_generic(generic) 469 | elif len(generic.optional_data) == SprOkdHeader.optional_data_buffer_size(): 470 | return SprOkdHeader.from_generic(generic) 471 | elif len(generic.optional_data) == DioOkdHeader.optional_data_buffer_size(): 472 | return DioOkdHeader.from_generic(generic) 473 | 474 | return generic 475 | 476 | 477 | @dataclass 478 | class OkdFile: 479 | """OKD File""" 480 | 481 | __logger = getLogger(__name__) 482 | 483 | header: OkdHeader 484 | chunks: list[OkdChunk] 485 | 486 | @classmethod 487 | def read(cls, stream: BinaryIO) -> Self: 488 | """Read 489 | 490 | Args: 491 | stream (BinaryIO): Input stream 492 | 493 | Raises: 494 | ValueError: Invalid `magic_bytes` 495 | 496 | Returns: 497 | Self: Instance of this class 498 | """ 499 | if SprcHeader.has_sprc_header(stream): 500 | # Validate SPRC Header 501 | OkdFile.__logger.info("SPRC Header detected.") 502 | sprc_header = SprcHeader.read(stream) 503 | if not sprc_header.validate_crc(stream): 504 | raise ValueError("SPRC Header CRC validation failed.") 505 | OkdFile.__logger.info("SPRC Header CRC validation succeeded.") 506 | 507 | scramble_pattern_index = detect_scramble_pattern_index( 508 | stream, OkdHeaderBase.MAGIC_BYTES 509 | ) 510 | 511 | # Header 512 | header = read_okd_header(stream, scramble_pattern_index) 513 | if header.adpcm_offset == 0: 514 | scrambled_length = (header.length + 8) - ( 515 | OkdHeaderBase.FIXED_PART_LENGTH + header.optional_data_buffer_size() 516 | ) 517 | plaintext_length = 0 518 | else: 519 | scrambled_length = header.adpcm_offset - ( 520 | OkdHeaderBase.FIXED_PART_LENGTH + header.optional_data_buffer_size() 521 | ) 522 | plaintext_length = (header.length + 8) - header.adpcm_offset 523 | chunks_stream = BytesIO() 524 | if scramble_pattern_index is None: 525 | chunks_stream.write(stream.read()) 526 | else: 527 | descramble(stream, chunks_stream, scramble_pattern_index, scrambled_length) 528 | # Plaintext part 529 | chunks_stream.write(stream.read(plaintext_length)) 530 | 531 | chunks: list[OkdChunk] = [] 532 | chunks_stream.seek(0) 533 | while True: 534 | if ChunkBase.peek_header(chunks_stream) is None: 535 | # Reached to End of File 536 | break 537 | chunk = read_chunk(chunks_stream) 538 | chunks.append(chunk) 539 | 540 | return cls(header, chunks) 541 | 542 | def write(self, stream: BinaryIO, should_scramble: bool = False) -> None: 543 | """Write 544 | 545 | Args: 546 | stream (BinaryIO): Output stream 547 | scramble (bool, optional): Scramble. Defaults to False. 548 | """ 549 | # Make chunks buffer 550 | chunks_stream = BytesIO() 551 | for chunk in self.chunks: 552 | chunk.write(chunks_stream) 553 | self.header.length = ( 554 | OkdHeaderBase.FIXED_PART_LENGTH 555 | + len(self.header._optional_data_buffer()) 556 | + chunks_stream.tell() 557 | - 8 558 | ) 559 | self.header.encryption_mode = 1 if should_scramble else 0 560 | chunks_stream.seek(0) 561 | 562 | # Make header buffer 563 | header_stream = BytesIO() 564 | self.header.write(header_stream) 565 | header_stream.seek(0) 566 | 567 | if should_scramble: 568 | scramble_pattern_index = choose_scramble_pattern_index() 569 | scramble(header_stream, stream, scramble_pattern_index) 570 | scramble(chunks_stream, stream, scramble_pattern_index) 571 | else: 572 | stream.write(header_stream.read()) 573 | stream.write(chunks_stream.read()) 574 | # End of file 575 | stream.write(b"\x00\x00\x00\x00") 576 | -------------------------------------------------------------------------------- /okd/okd_file_scramble.py: -------------------------------------------------------------------------------- 1 | from logging import getLogger 2 | from random import randint 3 | from typing import BinaryIO 4 | 5 | from .okd_scramble_pattern import OKD_SCRAMBLE_PATTERN 6 | 7 | __logger = getLogger(__name__) 8 | 9 | 10 | def choose_scramble_pattern_index(): 11 | return randint(0x00, 0xFF) 12 | 13 | 14 | def scramble( 15 | input_stream: BinaryIO, 16 | output_stream: BinaryIO, 17 | scramble_pattern_index: int, 18 | length: int | None = None, 19 | ): 20 | """Scramble 21 | 22 | Args: 23 | input_stream (BinaryIO): Input stream 24 | output_stream (BinaryIO): Output stream 25 | scramble_pattern_index (int): Scramble pattern index 26 | length (int | None, optional): Length. Defaults to None. 27 | 28 | Returns: 29 | int: Last scramble pattern index 30 | """ 31 | if length is not None and length % 2 != 0: 32 | raise ValueError("Argument `length` length must be multiple of 2.") 33 | 34 | start_position = input_stream.tell() 35 | while length is None or ( 36 | length is not None and (input_stream.tell() - start_position) < length 37 | ): 38 | plaintext_buffer = input_stream.read(2) 39 | if len(plaintext_buffer) == 0: 40 | if length is None: 41 | break 42 | else: 43 | raise RuntimeError("Reached to unexpected End of Stream.") 44 | if len(plaintext_buffer) % 2 != 0: 45 | raise ValueError("`plaintext_buffer` length must be 2.") 46 | plaintext = int.from_bytes(plaintext_buffer, "big") 47 | scramble_pattern = OKD_SCRAMBLE_PATTERN[scramble_pattern_index % 0x100] 48 | scrambled = plaintext ^ scramble_pattern 49 | scrambled_buffer = scrambled.to_bytes(2, "big") 50 | output_stream.write(scrambled_buffer) 51 | scramble_pattern_index += 1 52 | return scramble_pattern_index % 0x100 53 | 54 | 55 | def detect_scramble_pattern_index( 56 | stream: BinaryIO, 57 | expected_magic_bytes: bytes, 58 | ) -> int | None: 59 | """Detect scramble pattern index 60 | 61 | Args: 62 | stream (BinaryIO): Input stream 63 | expected_magic_bytes (bytes): Expected magic bytes (4 bytes) 64 | 65 | Raises: 66 | ValueError: Invalid argument `expected_magic_bytes` 67 | RuntimeError: Failed to detect OKD file `scramble_pattern_index` 68 | 69 | Returns: 70 | int | None: Scrambled pattern index if int, unscrambled if None 71 | """ 72 | if len(expected_magic_bytes) != 4: 73 | raise ValueError("Argument `expected_magic_bytes` length must be 4.") 74 | 75 | expected_magic_bytes_int = int.from_bytes(expected_magic_bytes, "big") 76 | 77 | position = stream.tell() 78 | magic_bytes_buffer = stream.read(4) 79 | stream.seek(position) 80 | if len(magic_bytes_buffer) != 4: 81 | raise RuntimeError("Invalid `magic_bytes_buffer` length.") 82 | magic_bytes_int = int.from_bytes(magic_bytes_buffer, "big") 83 | if magic_bytes_int == expected_magic_bytes_int: 84 | __logger.info("OKD file is not scrambled.") 85 | return 86 | 87 | __logger.info("OKD file is scrambled.") 88 | expected_pattern = magic_bytes_int ^ expected_magic_bytes_int 89 | for scramble_pattern_index in range(0x100): 90 | if scramble_pattern_index == 0xFF: 91 | candidated_pattern = OKD_SCRAMBLE_PATTERN[0] 92 | else: 93 | candidated_pattern = OKD_SCRAMBLE_PATTERN[scramble_pattern_index + 1] 94 | candidated_pattern |= OKD_SCRAMBLE_PATTERN[scramble_pattern_index] << 16 95 | if candidated_pattern == expected_pattern: 96 | __logger.info( 97 | f"OKD file `scramble_pattern_index` detected. scramble_pattern_index={scramble_pattern_index}" 98 | ) 99 | return scramble_pattern_index 100 | raise RuntimeError("Failed to detect OKD file `scramble_pattern_index`.") 101 | 102 | 103 | def descramble( 104 | input_stream: BinaryIO, 105 | output_stream: BinaryIO, 106 | scramble_pattern_index: int, 107 | length: int | None = None, 108 | ) -> int: 109 | """Descramble 110 | 111 | Args: 112 | input_stream (BinaryIO): Input stream 113 | output_stream (BinaryIO): Output stream 114 | scramble_pattern_index (int): Scramble pattern index 115 | length (int | None, optional): Length. Defaults to None. 116 | 117 | Returns: 118 | int: Last scramble pattern index 119 | """ 120 | if length is not None and length % 2 != 0: 121 | raise ValueError("Argument `length` length must be multiple of 2.") 122 | 123 | start_position = input_stream.tell() 124 | while length is None or ( 125 | length is not None and (input_stream.tell() - start_position) < length 126 | ): 127 | scrambled_buffer = input_stream.read(2) 128 | if len(scrambled_buffer) == 0: 129 | if length is None: 130 | break 131 | else: 132 | raise RuntimeError("Reached to unexpected End of Stream.") 133 | if len(scrambled_buffer) % 2 != 0: 134 | raise ValueError("`plaintext_buffer` length must be 2.") 135 | scrambled = int.from_bytes(scrambled_buffer, "big") 136 | scramble_pattern = OKD_SCRAMBLE_PATTERN[scramble_pattern_index % 0x100] 137 | plaintext = scrambled ^ scramble_pattern 138 | plaintext_buffer = plaintext.to_bytes(2, "big") 139 | output_stream.write(plaintext_buffer) 140 | scramble_pattern_index = scramble_pattern_index + 1 141 | return scramble_pattern_index % 0x100 142 | -------------------------------------------------------------------------------- /okd/okd_midi.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import BinaryIO 3 | 4 | 5 | def read_status_byte(stream: BinaryIO) -> int: 6 | """Read Status Byte 7 | 8 | Args: 9 | stream (BinaryIO): Input stream 10 | 11 | Raises: 12 | ValueError: Invalid Status Byte 13 | 14 | Returns: 15 | int: Status Byte 16 | """ 17 | byte = stream.read(1) 18 | if len(byte) < 1: 19 | raise ValueError("Too less read bytes.") 20 | byte = byte[0] 21 | if byte & 0x80 != 0x80: 22 | position = stream.tell() 23 | raise ValueError(f"Invalid status byte. byte={byte} position={position}") 24 | return byte 25 | 26 | 27 | def peek_status_byte(stream: BinaryIO) -> int: 28 | """Peek Status Byte 29 | 30 | Args: 31 | stream (BinaryIO): Input stream 32 | 33 | Raises: 34 | ValueError: Invalid Status Byte 35 | 36 | Returns: 37 | int: Status Byte 38 | """ 39 | byte = stream.read(1) 40 | if len(byte) < 1: 41 | raise ValueError("Too less read bytes.") 42 | stream.seek(-1, os.SEEK_CUR) 43 | byte = byte[0] 44 | if byte & 0x80 != 0x80: 45 | position = stream.tell() 46 | raise ValueError(f"Invalid Status Byte. byte={byte} position={position}") 47 | return byte 48 | 49 | 50 | def read_data_byte(stream: BinaryIO) -> int: 51 | """Read Data Byte 52 | 53 | Args: 54 | stream (BinaryIO): Input stream 55 | 56 | Raises: 57 | ValueError: Invalid Data Byte 58 | 59 | Returns: 60 | int: Data Byte 61 | """ 62 | byte = stream.read(1) 63 | if len(byte) < 1: 64 | raise ValueError("Too less read bytes.") 65 | byte = byte[0] 66 | if byte & 0x80 == 0x80: 67 | position = stream.tell() 68 | raise ValueError(f"Invalid Data Byte. byte={byte} position={position}") 69 | return byte 70 | 71 | 72 | def peek_data_byte(stream: BinaryIO) -> int: 73 | """Peek Data Byte 74 | 75 | Args: 76 | stream (BinaryIO): Input stream 77 | 78 | Raises: 79 | ValueError: Invalid data byte 80 | 81 | Returns: 82 | int: Data Byte 83 | """ 84 | byte = stream.read(1) 85 | if len(byte) < 1: 86 | raise ValueError("Too less read bytes.") 87 | stream.seek(-1, os.SEEK_CUR) 88 | byte = byte[0] 89 | if byte & 0x80 == 0x80: 90 | position = stream.tell() 91 | raise ValueError(f"Invalid data byte. byte={byte} position={position}") 92 | return byte 93 | 94 | 95 | def is_data_bytes(data: bytes) -> bool: 96 | """Is Data Bytes 97 | 98 | Args: 99 | data (bytes): Data 100 | 101 | Returns: 102 | bool: True if Data Bytes, else False 103 | """ 104 | for byte in data: 105 | if byte & 0x80 == 0x80: 106 | return False 107 | return True 108 | 109 | 110 | def read_variable_int(stream: BinaryIO) -> int: 111 | """Read Variable Int 112 | 113 | Args: 114 | stream (BinaryIO): Input stream 115 | 116 | Raises: 117 | ValueError: Invalid byte sequence 118 | 119 | Returns: 120 | int: Variable Int value 121 | """ 122 | value = 0 123 | for i in range(3): 124 | byte: int = read_data_byte(stream) 125 | value += byte << (i * 6) 126 | if byte & 0x40 != 0x40: 127 | return value 128 | 129 | position = stream.tell() 130 | raise ValueError(f"Invalid byte sequence. position={position}") 131 | 132 | 133 | def write_variable_int(stream: BinaryIO, value: int) -> None: 134 | """Write Variable Int 135 | 136 | Args: 137 | stream (BinaryIO): Output stream 138 | value (int): Value 139 | 140 | Raises: 141 | ValueError: Invalid argument `value` 142 | """ 143 | if 0x04103F < value: 144 | raise ValueError("Too big argument `value`. Use write_extended_variable_int.") 145 | 146 | for i in range(3): 147 | masked_value = value & (0x3F << (i * 6)) 148 | byte = masked_value >> (i * 6) 149 | next_value = value - masked_value 150 | if next_value != 0x000000: 151 | byte |= 0x40 152 | next_value -= 0x40 << (i * 6) 153 | value = next_value 154 | stream.write(byte.to_bytes()) 155 | 156 | if value == 0x000000: 157 | if byte & 0x40 == 0x40: 158 | stream.write(b"\x00") 159 | break 160 | 161 | 162 | def read_extended_variable_int(stream: BinaryIO) -> int: 163 | """Read Extended Variable Int 164 | 165 | Args: 166 | stream (BinaryIO): Input stream 167 | 168 | Returns: 169 | int: Extended Variable Int value 170 | """ 171 | value = 0 172 | while True: 173 | try: 174 | byte = peek_data_byte(stream) 175 | if byte == 0x00: 176 | # Maybe End of Track 177 | return value 178 | except ValueError: 179 | break 180 | value += read_variable_int(stream) 181 | return value 182 | 183 | 184 | def write_extended_variable_int(stream: BinaryIO, value: int) -> None: 185 | """Write Extended Variable Int 186 | 187 | Args: 188 | stream (BinaryIO): Output stream 189 | value (int): Value 190 | """ 191 | while 0x000000 < value: 192 | write_value = min(value, 0x04103F) 193 | write_variable_int(stream, write_value) 194 | value -= write_value 195 | -------------------------------------------------------------------------------- /okd/okd_scramble_pattern.py: -------------------------------------------------------------------------------- 1 | # OKD Scramble Pattern 2 | # uint16_t[256] 3 | OKD_SCRAMBLE_PATTERN = [] 4 | -------------------------------------------------------------------------------- /okd/p_track_conversion.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, asdict 2 | from logging import getLogger 3 | import math 4 | import mido 5 | 6 | from .chunks import ( 7 | MTrackInterpretation, 8 | PTrackInfoChunk, 9 | ExtendedPTrackInfoChunk, 10 | P3TrackInfoChunk, 11 | PTrackEvent, 12 | PTrackAbsoluteTimeEvent, 13 | PTrackChunk, 14 | ) 15 | from midi.event import MidiEvent 16 | from midi.time_converter import MidiTimeConverter 17 | from midi.utils import get_track_port 18 | from .mmt_tg import MultiPartEntry, MmtTg 19 | 20 | __logger = getLogger(__name__) 21 | 22 | 23 | @dataclass 24 | class PTrackAbsoluteTimeMetaEvent(MidiEvent): 25 | """P-Track Absolute Time Meta Event""" 26 | 27 | track: int 28 | time: int 29 | 30 | 31 | def __p_tracks_to_absolute_time_track( 32 | track_info: PTrackInfoChunk | ExtendedPTrackInfoChunk | P3TrackInfoChunk, 33 | tracks: list[PTrackChunk], 34 | ) -> list[PTrackAbsoluteTimeEvent]: 35 | absolute_time_track: list[PTrackAbsoluteTimeEvent] = [] 36 | for track in tracks: 37 | absolute_time_track += track.absolute_time_track(track_info) 38 | 39 | absolute_time_track.sort(key=lambda absolute_time_event: absolute_time_event.time) 40 | return absolute_time_track 41 | 42 | 43 | def p_track_to_midi( 44 | m_track_interpretation: MTrackInterpretation, 45 | track_info: PTrackInfoChunk | ExtendedPTrackInfoChunk | P3TrackInfoChunk, 46 | tracks: list[PTrackChunk], 47 | sysex_to_text: bool, 48 | ) -> mido.MidiFile: 49 | midi_time_converter = MidiTimeConverter() 50 | for time, tempo in m_track_interpretation.tempos: 51 | midi_time_converter.add_tempo_change(time, tempo) 52 | 53 | midi_device_1 = MmtTg() 54 | midi_device_2 = MmtTg() 55 | 56 | midi = mido.MidiFile() 57 | for port in range(PTrackChunk.PORTS): 58 | for channel in range(PTrackChunk.CHANNELS_PER_PORT): 59 | midi_track = mido.MidiTrack() 60 | 61 | # Set port 62 | midi_track.append( 63 | mido.MetaMessage( 64 | "midi_port", 65 | port=port, 66 | ) 67 | ) 68 | # Track setup messages 69 | midi_device = midi_device_1 if port < 2 else midi_device_2 70 | muti_part_entry_index = port // 2 * MmtTg.PARTS_PER_PORT + channel 71 | multi_part_entry = midi_device.multi_part_entry(muti_part_entry_index) 72 | part_number = port * MmtTg.PARTS_PER_PORT + channel 73 | track_setup_messages = MultiPartEntry.to_mido_messages( 74 | multi_part_entry, 75 | part_number % PTrackChunk.CHANNELS_PER_PORT, 76 | 0, 77 | ) 78 | midi_track += track_setup_messages 79 | 80 | midi.tracks.append(midi_track) 81 | 82 | absolute_time_track: list[PTrackAbsoluteTimeEvent | PTrackAbsoluteTimeMetaEvent] = ( 83 | [] 84 | ) 85 | absolute_time_track += __p_tracks_to_absolute_time_track(track_info, tracks) 86 | if len(absolute_time_track) < 1: 87 | raise ValueError("Track empty.") 88 | 89 | for time, tempo in m_track_interpretation.tempos: 90 | absolute_time_track.append( 91 | PTrackAbsoluteTimeMetaEvent( 92 | 0x51, b"\x03" + round(mido.bpm2tempo(tempo)).to_bytes(3, "big"), 0, time 93 | ) 94 | ) 95 | for time, numerator, denominator in m_track_interpretation.time_signatures: 96 | absolute_time_track.append( 97 | PTrackAbsoluteTimeMetaEvent( 98 | 0x58, 99 | bytes([0x04, numerator, int(math.log2(denominator)), 24, 8]), 100 | 0, 101 | time, 102 | ), 103 | ) 104 | absolute_time_track.sort(key=lambda absolute_time_event: absolute_time_event.time) 105 | 106 | track_times = [0] * PTrackChunk.TOTAL_CHANNELS 107 | for event in absolute_time_track: 108 | status_type = event.status_byte & 0xF0 109 | 110 | tick = midi_time_converter.ms_to_ticks(event.time) 111 | 112 | delta_time = tick - track_times[event.track] 113 | track_times[event.track] = tick 114 | 115 | if isinstance(event, PTrackAbsoluteTimeMetaEvent): 116 | meta_message = mido.MetaMessage.from_bytes( 117 | b"\xff" + event.status_byte.to_bytes() + event.data_bytes 118 | ) 119 | meta_message.time = delta_time 120 | midi.tracks[event.track].append(meta_message) 121 | continue 122 | 123 | if status_type == 0xF0: 124 | if event.status_byte != 0xF0: 125 | midi.tracks[event.track].append( 126 | mido.MetaMessage( 127 | type="text", 128 | text=event.to_bytes().hex(" ").upper(), 129 | time=delta_time, 130 | ) 131 | ) 132 | continue 133 | 134 | # Convert SysEx event to General MIDI message 135 | midi_device = midi_device_1 if event.port < 2 else midi_device_2 136 | part_number = MmtTg.effecting_multi_part_number(event) 137 | if part_number is not None: 138 | before_sysex = midi_device.multi_part_entry(part_number) 139 | midi_device.receive_sysex_message(event) 140 | after_sysex = midi_device.multi_part_entry(part_number) 141 | multi_part_diff = dict( 142 | asdict(after_sysex).items() - asdict(before_sysex).items() 143 | ) 144 | track_number = event.port * MmtTg.PARTS_PER_PORT + part_number 145 | midi.tracks[track_number] += MultiPartEntry.to_mido_messages( 146 | multi_part_diff, 147 | part_number % PTrackChunk.CHANNELS_PER_PORT, 148 | delta_time, 149 | ) 150 | 151 | if sysex_to_text: 152 | midi.tracks[event.track].append( 153 | mido.MetaMessage( 154 | type="text", 155 | text=event.to_bytes().hex(" ").upper(), 156 | time=delta_time, 157 | ) 158 | ) 159 | continue 160 | 161 | try: 162 | mido.messages.specs.SPEC_BY_STATUS[event.status_byte] 163 | except KeyError: 164 | __logger.warning( 165 | f"Unknown MIDI message detected. status_byte={hex(event.status_byte)}" 166 | ) 167 | pass 168 | 169 | midi_message: mido.Message 170 | try: 171 | midi_message = mido.Message.from_bytes(event.to_bytes(), delta_time) 172 | except ValueError: 173 | __logger.warning( 174 | f"Invalid MIDI event data. message=`{event.to_bytes().hex(" ").upper()}`" 175 | ) 176 | continue 177 | midi.tracks[event.track].append(midi_message) 178 | 179 | return midi 180 | 181 | 182 | def __midi_to_absolute_time_tracks( 183 | midi: mido.MidiFile, 184 | ) -> list[list[PTrackAbsoluteTimeEvent]]: 185 | midi_time_converter = MidiTimeConverter() 186 | midi_time_converter.load_from_midi(midi) 187 | 188 | absolute_time_tracks: list[list[PTrackAbsoluteTimeEvent]] = [[]] * PTrackChunk.PORTS 189 | for i, midi_track in enumerate(midi.tracks): 190 | port = get_track_port(midi_track) 191 | if port is None: 192 | __logger.warning(f"Port undefined. track={i}") 193 | continue 194 | 195 | track_time = 0 196 | for midi_message in midi_track: 197 | midi_message_data = bytes(midi_message.bin()) 198 | status_byte = midi_message_data[0] 199 | status_type = status_byte & 0xF0 200 | data_bytes = midi_message_data[1:] 201 | 202 | track_time += midi_message.time 203 | absolute_time = midi_time_converter.ticks_to_ms(track_time) 204 | 205 | if status_type == 0xF0: 206 | # System messages 207 | track = port * PTrackChunk.CHANNELS_PER_PORT 208 | absolute_time_tracks[port].append( 209 | PTrackAbsoluteTimeEvent( 210 | status_byte, 211 | data_bytes, 212 | port, 213 | track, 214 | absolute_time, 215 | ) 216 | ) 217 | else: 218 | # Channel voice messages 219 | channel = status_byte & 0x0F 220 | track = (port * PTrackChunk.CHANNELS_PER_PORT) + channel 221 | absolute_time_tracks[port].append( 222 | PTrackAbsoluteTimeEvent( 223 | status_byte, 224 | data_bytes, 225 | port, 226 | track, 227 | absolute_time, 228 | ) 229 | ) 230 | 231 | for absolute_time_track in absolute_time_tracks: 232 | absolute_time_track.sort( 233 | key=lambda absolute_time_event: absolute_time_event.time 234 | ) 235 | 236 | return absolute_time_tracks 237 | 238 | 239 | def __absolute_time_track_to_p_track( 240 | absolute_time_track: list[PTrackAbsoluteTimeEvent], 241 | ) -> list[PTrackEvent]: 242 | events: list[PTrackEvent] = [] 243 | current_time = 0 244 | for event_index, event in enumerate(absolute_time_track): 245 | status_type = event.status_byte & 0xF0 246 | delta_time = event.time - current_time 247 | 248 | if status_type == 0x80: 249 | # Do nothing 250 | continue 251 | elif status_type == 0x90: 252 | channel = event.status_byte & 0x0F 253 | note_number = event.data_bytes[0] 254 | note_off_time = event.time 255 | for i in range(event_index, len(absolute_time_track)): 256 | note_off_event = absolute_time_track[i] 257 | note_off_event_status_type = note_off_event.status_byte & 0xF0 258 | note_off_event_channel = note_off_event.status_byte & 0x0F 259 | if ( 260 | note_off_event_status_type == 0x80 261 | and note_off_event_channel == channel 262 | ): 263 | note_off_event_note_number = note_off_event.data_bytes[0] 264 | if note_off_event_note_number == note_number: 265 | note_off_time = note_off_event.time 266 | break 267 | duration = (note_off_time - event.time) >> 2 268 | events.append( 269 | PTrackEvent( 270 | event.status_byte, 271 | event.data_bytes, 272 | delta_time, 273 | duration, 274 | ) 275 | ) 276 | elif status_type == 0xA0 or status_type == 0xC0: 277 | data_bytes = event.status_byte.to_bytes() + event.data_bytes 278 | events.append( 279 | PTrackEvent( 280 | 0xFE, 281 | data_bytes, 282 | delta_time, 283 | ) 284 | ) 285 | elif status_type == 0xF0: 286 | if event.status_byte != 0xF0: 287 | continue 288 | 289 | events.append( 290 | PTrackEvent( 291 | 0xF0, 292 | event.data_bytes, 293 | delta_time, 294 | ) 295 | ) 296 | else: 297 | events.append( 298 | PTrackEvent( 299 | event.status_byte, 300 | event.data_bytes, 301 | delta_time, 302 | ) 303 | ) 304 | 305 | current_time = event.time 306 | 307 | # End of Track 308 | events.append(PTrackEvent(0x00, b"\x00\x00\x00", 0)) 309 | 310 | return events 311 | 312 | 313 | def midi_to_p_tracks(midi: mido.MidiFile) -> list[PTrackChunk]: 314 | absolute_time_tracks = __midi_to_absolute_time_tracks(midi) 315 | p_tracks: list[PTrackChunk] = [] 316 | track_count = 0 317 | for i in range(PTrackChunk.PORTS): 318 | if absolute_time_tracks[i] is None: 319 | continue 320 | 321 | track_number = track_count + 1 if track_count >= 2 else track_count 322 | p_tracks.append( 323 | PTrackChunk( 324 | b"\xffPR" + track_number.to_bytes(), 325 | __absolute_time_track_to_p_track(absolute_time_tracks[i]), 326 | ) 327 | ) 328 | track_count += 1 329 | return p_tracks 330 | 331 | 332 | def midi_to_p3_track(midi: mido.MidiFile) -> PTrackChunk: 333 | absolute_time_tracks = __midi_to_absolute_time_tracks(midi) 334 | absolute_time_track = absolute_time_tracks[2] 335 | if absolute_time_tracks is None: 336 | raise ValueError("P-Track 2 not found.") 337 | absolute_time_track = [ 338 | event 339 | for event in absolute_time_track 340 | # Note Off and Note On 341 | if event.status_byte_type() in [0x80, 0x90] 342 | ] 343 | return PTrackChunk( 344 | b"\xffPR\x02", 345 | __absolute_time_track_to_p_track(absolute_time_track), 346 | ) 347 | -------------------------------------------------------------------------------- /okd/utils.py: -------------------------------------------------------------------------------- 1 | from logging import getLogger 2 | 3 | import mido 4 | 5 | from midi.utils import get_meta_track, get_track_by_port_channel 6 | from okd.okd_file import OkdGenericHeader, OkdFile 7 | from okd.chunks import ( 8 | MTrackInterpretation, 9 | MTrackChunk, 10 | PTrackInfoChunk, 11 | ExtendedPTrackInfoChunk, 12 | P3TrackInfoChunk, 13 | PTrackChunk, 14 | p_track_info_chunk_by_p_track_chunks, 15 | p3_track_info_chunk_by_p_track_chunks, 16 | ) 17 | from okd.m_track_conversion import midi_to_m_track 18 | from okd.p_track_conversion import p_track_to_midi, midi_to_p_tracks, midi_to_p3_track 19 | 20 | __logger = getLogger(__name__) 21 | 22 | 23 | def okd_to_midi(okd: OkdFile, sysex_to_text: bool) -> mido.MidiFile: 24 | """Make MIDI file from OKD 25 | 26 | Args: 27 | okd (OkdFile): OKD file 28 | sysex_to_text (bool): Convert SysEx Messages to Text Meta Messages 29 | 30 | Raises: 31 | ValueError: Invalid input OKD. 32 | 33 | Returns: 34 | mido.MidiFile: MIDI file 35 | """ 36 | __logger.info(f"OKD loaded. header={okd.header}") 37 | 38 | p_track_info: ( 39 | PTrackInfoChunk | ExtendedPTrackInfoChunk | P3TrackInfoChunk | None 40 | ) = None 41 | p_tracks: list[PTrackChunk] = [] 42 | 43 | m_track_interpritation: MTrackInterpretation | None = None 44 | for chunk in okd.chunks: 45 | chunk_id_hex = chunk.id.hex().upper() 46 | __logger.info(f"{type(chunk).__name__} found. id={chunk.id} (0x{chunk_id_hex})") 47 | if isinstance(chunk, MTrackChunk): 48 | m_track_interpritation = MTrackInterpretation.from_track(chunk) 49 | elif isinstance(chunk, PTrackInfoChunk): 50 | p_track_info = chunk 51 | elif isinstance(chunk, ExtendedPTrackInfoChunk): 52 | p_track_info = chunk 53 | elif isinstance(chunk, P3TrackInfoChunk): 54 | p_track_info = chunk 55 | elif isinstance(chunk, PTrackChunk): 56 | p_tracks.append(chunk) 57 | 58 | if m_track_interpritation is None or p_track_info is None or len(p_tracks) == 0: 59 | raise ValueError( 60 | "Invalid input OKD. Needed M-Track, P-Track Info and P-Tracks." 61 | ) 62 | 63 | __logger.info("Make P-Track MIDI file.") 64 | return p_track_to_midi( 65 | m_track_interpritation, p_track_info, p_tracks, sysex_to_text 66 | ) 67 | 68 | 69 | def midi_to_okds( 70 | midi: mido.MidiFile, header: OkdGenericHeader 71 | ) -> tuple[OkdFile, OkdFile]: 72 | """MIDI to OKDs 73 | 74 | Args: 75 | midi (mido.MidiFile): MIDI file 76 | 77 | Raises: 78 | ValueError: Meta track not found. 79 | ValueError: P-Track not found. 80 | ValueError: P3-Track not found. 81 | 82 | Returns: 83 | tuple[OkdFile, OkdFile]: P-Track and P3-Track 84 | """ 85 | meta_track = get_meta_track(midi.tracks) 86 | if meta_track is None: 87 | raise ValueError("Meta track not found.") 88 | 89 | m_track_chunk = midi_to_m_track(midi) 90 | 91 | p_track = [ 92 | get_track_by_port_channel(midi.tracks, port, track) 93 | for port in range(2) 94 | for track in range(16) 95 | ] 96 | p_track = [track for track in p_track if track is not None] 97 | if len(p_track) < 1: 98 | raise ValueError("P-Track not found.") 99 | p_track_midi = mido.MidiFile() 100 | p_track_midi.tracks = [meta_track, *p_track] 101 | p_track_chunks = midi_to_p_tracks(p_track_midi) 102 | p_track_info_chunk = p_track_info_chunk_by_p_track_chunks(p_track_chunks) 103 | 104 | p3_track = get_track_by_port_channel(midi.tracks, 1, 8) 105 | if p3_track is None: 106 | raise ValueError("P3-Track not found.") 107 | for message in p3_track: 108 | if message.type == "midi_port": 109 | message.port = 2 110 | if hasattr(message, "channel"): 111 | message.channel = 14 112 | p3_track_midi = mido.MidiFile() 113 | p3_track_midi.tracks = [meta_track, p3_track] 114 | p3_track_chunk = midi_to_p3_track(p3_track_midi) 115 | p3_track_info_chunk = p3_track_info_chunk_by_p_track_chunks(p3_track_chunk) 116 | 117 | playing_okd = OkdFile(header, [m_track_chunk, p_track_info_chunk, *p_track_chunks]) 118 | p3_okd = OkdFile(header, [p3_track_info_chunk, p3_track_chunk]) 119 | return playing_okd, p3_okd 120 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "dam-song-tools" 3 | version = "0.1.0" 4 | description = "Tools for DAM Karaoke Song data" 5 | authors = ["KIRISHIKI Yudai "] 6 | license = "MIT" 7 | readme = "README.md" 8 | packages = [ 9 | {include = "dam_song_tools_cli"}, 10 | {include = "midi"}, 11 | {include = "mtf"}, 12 | {include = "okd"}, 13 | {include = "sprc_header"} 14 | ] 15 | 16 | [tool.poetry.scripts] 17 | dam-song-tools = "dam_song_tools_cli.cli:main" 18 | 19 | [tool.poetry.dependencies] 20 | python = "^3.13" 21 | fastcrc = "^0.3.2" 22 | fire = "^0.7.0" 23 | mido = "^1.3.3" 24 | numpy = "^2.2.1" 25 | simplejson = "^3.19.3" 26 | soundfile = "^0.12.1" 27 | 28 | [tool.poetry.group.dev.dependencies] 29 | black = "^24.10.0" 30 | 31 | [build-system] 32 | requires = ["poetry-core"] 33 | build-backend = "poetry.core.masonry.api" 34 | -------------------------------------------------------------------------------- /sprc/header.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Final, BinaryIO, Self 3 | 4 | from fastcrc import crc16 5 | 6 | # Magic bytes that identify SPRC files 7 | _MAGIC_BYTES: Final[bytes] = b"SPRC" 8 | 9 | # Header size in bytes 10 | _HEADER_SIZE: Final[int] = 16 11 | 12 | 13 | @dataclass 14 | class SprcHeader: 15 | """ 16 | SPRC Header class for handling SPRC file format headers. 17 | 18 | This class provides functionality to read, write, and validate SPRC headers, 19 | which include magic bytes, revision information, CRC checksums, and flags. 20 | 21 | Attributes: 22 | revision: Header revision number 23 | crc_value: CRC-16 checksum of the file content 24 | force_flag: Flag indicating if force processing is required 25 | unknown_0: Reserved bytes for future use 26 | """ 27 | 28 | revision: int = 0 29 | crc_value: int = 0 30 | force_flag: int = 0 31 | unknown_0: bytes = b"" 32 | 33 | @staticmethod 34 | def has_sprc_header(stream: BinaryIO) -> bool: 35 | """ 36 | Check if a stream contains a valid SPRC header. 37 | 38 | This method reads the first 16 bytes of the stream to check for the SPRC 39 | magic bytes signature, then restores the stream position. 40 | 41 | Args: 42 | stream: Input stream to check 43 | 44 | Returns: 45 | True if the stream has a valid SPRC header, False otherwise 46 | """ 47 | position = stream.tell() 48 | try: 49 | buffer = stream.read(_HEADER_SIZE) 50 | if len(buffer) < _HEADER_SIZE: 51 | return False 52 | 53 | magic_bytes = buffer[0:4] 54 | return magic_bytes == _MAGIC_BYTES 55 | finally: 56 | # Restore original stream position 57 | stream.seek(position) 58 | 59 | @classmethod 60 | def read(cls, stream: BinaryIO) -> Self: 61 | """ 62 | Read SPRC header from a stream. 63 | 64 | Args: 65 | stream: Input stream to read from 66 | 67 | Returns: 68 | New SprcHeader instance 69 | 70 | Raises: 71 | ValueError: If the stream doesn't contain a valid SPRC header 72 | """ 73 | buffer = stream.read(_HEADER_SIZE) 74 | if len(buffer) < _HEADER_SIZE: 75 | raise ValueError( 76 | f"Insufficient data: expected {_HEADER_SIZE} bytes, got {len(buffer)}" 77 | ) 78 | 79 | # Check magic bytes 80 | magic_bytes = buffer[0:4] 81 | if magic_bytes != _MAGIC_BYTES: 82 | raise ValueError( 83 | f"Invalid magic bytes: expected {_MAGIC_BYTES!r}, got {magic_bytes!r}" 84 | ) 85 | 86 | # Parse header fields 87 | revision = int.from_bytes(buffer[4:6], "big") 88 | crc_value = int.from_bytes(buffer[6:8], "big") 89 | force_flag = buffer[8] 90 | unknown_0 = buffer[9:16] 91 | 92 | return cls(revision, crc_value, force_flag, unknown_0) 93 | 94 | def validate_crc(self, data: bytes | BinaryIO) -> bool: 95 | """ 96 | Validate data with stored CRC value. 97 | 98 | Args: 99 | data: Data bytes or stream to validate 100 | 101 | Returns: 102 | True if the calculated CRC matches the stored CRC, False otherwise 103 | """ 104 | if isinstance(data, BinaryIO): 105 | # Save current position 106 | position = data.tell() 107 | 108 | try: 109 | # Skip SPRC header 110 | data.seek(_HEADER_SIZE) 111 | buffer = data.read() 112 | data_bytes = buffer 113 | finally: 114 | # Restore original position 115 | data.seek(position) 116 | else: 117 | data_bytes = data 118 | 119 | # Calculate CRC-16 (Genibus) of the data 120 | calculated_crc = crc16.genibus(data_bytes) 121 | 122 | return calculated_crc == self.crc_value 123 | 124 | def write(self, stream: BinaryIO) -> None: 125 | """ 126 | Write SPRC header to a stream. 127 | 128 | Args: 129 | stream: Output stream to write to 130 | """ 131 | # Write magic bytes 132 | stream.write(_MAGIC_BYTES) 133 | 134 | # Write header fields 135 | stream.write(self.revision.to_bytes(2, "big")) 136 | stream.write(self.crc_value.to_bytes(2, "big")) 137 | stream.write(self.force_flag.to_bytes(1, "big")) 138 | stream.write(self.unknown_0) 139 | 140 | @classmethod 141 | def create(cls, data: bytes, revision: int = 1, force_flag: int = 0) -> Self: 142 | """ 143 | Create a new SPRC header for the given data. 144 | 145 | Args: 146 | data: Data bytes to calculate CRC for 147 | revision: Header revision number (default: 1) 148 | force_flag: Force processing flag (default: 0) 149 | 150 | Returns: 151 | New SprcHeader instance with calculated CRC 152 | """ 153 | # Calculate CRC-16 (Genibus) of the data 154 | crc_value = crc16.genibus(data) 155 | 156 | # Create unknown_0 bytes (all zeros) 157 | unknown_0 = bytes(7) 158 | 159 | return cls(revision, crc_value, force_flag, unknown_0) 160 | -------------------------------------------------------------------------------- /test/data/p_track.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DKKaraoke/dam-song-tools-oss/08318eb75f7514495cbb460e9b7eab2e0529757a/test/data/p_track.mid -------------------------------------------------------------------------------- /test/test_okd_midi.py: -------------------------------------------------------------------------------- 1 | from io import BytesIO 2 | import unittest 3 | 4 | from okd.okd_midi import ( 5 | read_variable_int, 6 | write_variable_int, 7 | read_extended_variable_int, 8 | write_extended_variable_int, 9 | ) 10 | 11 | 12 | class TestOkdMidi(unittest.TestCase): 13 | VALUES: list[tuple[int, bytes]] = [ 14 | (0x000000, b"\x00"), 15 | (0x00003F, b"\x3f"), 16 | (0x00103F, b"\x7f\x3f"), 17 | (0x04103F, b"\x7f\x7f\x3f"), 18 | ] 19 | EXTENDED_VALUES: list[tuple[int, bytes]] = [ 20 | (0x000000, b""), 21 | (0x00003F, b"\x3f"), 22 | (0x00103F, b"\x7f\x3f"), 23 | (0x04103F, b"\x7f\x7f\x3f"), 24 | (0x04107E, b"\x7f\x7f\x3f\x3f"), 25 | (0x04207E, b"\x7f\x7f\x3f\x7f\x3f"), 26 | (0x08207E, b"\x7f\x7f\x3f\x7f\x7f\x3f"), 27 | ] 28 | 29 | def test_read_varibale_int(self): 30 | for value, buffer in TestOkdMidi.VALUES: 31 | with self.subTest(value=value, buffer=buffer): 32 | stream = BytesIO(buffer) 33 | read_value = read_variable_int(stream) 34 | self.assertEqual(value, read_value) 35 | 36 | with self.assertRaises(ValueError): 37 | stream = BytesIO(b"\x7f\x7f\x7f") 38 | read_variable_int(stream) 39 | 40 | def test_write_variable_int( 41 | self, 42 | ): 43 | for value, buffer in TestOkdMidi.VALUES: 44 | with self.subTest(value=value, buffer=buffer): 45 | stream = BytesIO() 46 | write_variable_int(stream, value) 47 | stream.seek(0) 48 | self.assertEqual(buffer, stream.read()) 49 | 50 | with self.assertRaises(ValueError): 51 | stream = BytesIO() 52 | write_variable_int(stream, 0x04104F) 53 | 54 | def test_read_extended_variable_int(self): 55 | for value, buffer in TestOkdMidi.EXTENDED_VALUES: 56 | with self.subTest(value=value, buffer=buffer): 57 | stream = BytesIO(buffer + b"\x80") 58 | read_value = read_extended_variable_int(stream) 59 | self.assertEqual(value, read_value) 60 | 61 | def test_write_extended_variable_int(self): 62 | for value, buffer in TestOkdMidi.EXTENDED_VALUES: 63 | with self.subTest(value=value, buffer=buffer): 64 | stream = BytesIO() 65 | write_extended_variable_int(stream, value) 66 | stream.seek(0) 67 | self.assertEqual(buffer, stream.read()) 68 | 69 | 70 | if __name__ == "__main__": 71 | unittest.main() 72 | --------------------------------------------------------------------------------