├── mtf ├── __init__.py ├── note_events_conversion.py ├── mtf_audio.py ├── mtf_conversion.py └── saiten_ref_conversion.py ├── settings.ini ├── test ├── data │ └── p_track.mid └── test_okd_midi.py ├── .devcontainer ├── on_create.sh ├── Dockerfile └── devcontainer.json ├── okd ├── mmt_tg │ ├── __init__.py │ └── mmt_tg.py ├── __init__.py ├── chunks │ ├── generic_chunk.py │ ├── okd_chunk.py │ ├── __init__.py │ ├── adpcm_chunk.py │ ├── p3_track_info_chunk.py │ ├── chunk_base.py │ ├── utils.py │ ├── ykyi_chunk.py │ ├── p_track_info_chunk.py │ ├── extended_p_track_info_chunk.py │ ├── m_track_chunk.py │ └── p_track_chunk.py ├── dump_binary.py ├── oka_file.py ├── utils.py ├── adpcm.py ├── okd_midi.py ├── okd_file_scramble.py ├── m_track_conversion.py ├── p_track_conversion.py └── okd_file.py ├── Dockerfile ├── pyproject.toml ├── LICENSE ├── midi ├── event.py ├── utils.py └── time_converter.py ├── .gitignore ├── sprc └── header.py ├── README.md └── dam_song_tools_cli └── cli.py /mtf/__init__.py: -------------------------------------------------------------------------------- 1 | from .mtf_audio import MtfAudio -------------------------------------------------------------------------------- /settings.ini: -------------------------------------------------------------------------------- 1 | [MtfAudio] 2 | OpusVolumeIncrease = 20 3 | -------------------------------------------------------------------------------- /test/data/p_track.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DKKaraoke/dam-song-tools-oss/HEAD/test/data/p_track.mid -------------------------------------------------------------------------------- /.devcontainer/on_create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export POETRY_VIRTUALENVS_IN_PROJECT=1 4 | poetry install 5 | -------------------------------------------------------------------------------- /okd/mmt_tg/__init__.py: -------------------------------------------------------------------------------- 1 | from .midi_parameter_change_table import System, MultiEffect, MultiPartEntry 2 | from .mmt_tg import MmtTg 3 | -------------------------------------------------------------------------------- /okd/__init__.py: -------------------------------------------------------------------------------- 1 | from .okd_file import * 2 | from .oka_file import * 3 | from .chunks import * 4 | 5 | from .utils import okd_to_midi, midi_to_okds 6 | -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM python:3.13-bookworm 4 | 5 | ARG UID=1000 6 | ARG GID=1000 7 | 8 | RUN --mount=type=cache,target=/var/lib/apt/,sharing=locked \ 9 | --mount=type=cache,target=/var/cache/apt/,sharing=locked \ 10 | apt-get update && apt-get install -y --no-install-recommends \ 11 | # For development 12 | sudo 13 | 14 | RUN groupadd -g $GID python \ 15 | && useradd -m -s /bin/bash -u $UID -g $GID python \ 16 | && echo 'python ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 17 | 18 | USER python 19 | 20 | # Install Poetry 21 | RUN curl -sSL https://install.python-poetry.org | python3 - 22 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dam-song-tools", 3 | "build": { 4 | "dockerfile": "Dockerfile" 5 | }, 6 | "workspaceFolder": "/workspace/", 7 | "remoteUser": "python", 8 | "mounts": [ 9 | "source=${localWorkspaceFolder},target=/workspace/,type=bind,consistency=cached" 10 | ], 11 | "onCreateCommand": ".devcontainer/on_create.sh", 12 | "customizations": { 13 | "vscode": { 14 | "extensions": [ 15 | "ms-azuretools.vscode-docker", 16 | "esbenp.prettier-vscode", 17 | "ms-python.python", 18 | "ms-python.black-formatter", 19 | "njpwerner.autodocstring" 20 | ] 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /okd/chunks/generic_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import BinaryIO, Self 3 | 4 | from .chunk_base import ChunkBase 5 | 6 | 7 | @dataclass 8 | class GenericChunk(ChunkBase): 9 | """Generic Chunk""" 10 | 11 | payload: bytes 12 | 13 | @classmethod 14 | def read(cls, stream: BinaryIO) -> Self: 15 | """Read 16 | 17 | Args: 18 | stream (BinaryIOBufferedReader): Input stream 19 | 20 | Returns: 21 | Self: Generic Chunk 22 | """ 23 | id, payload = ChunkBase._read_common(stream) 24 | return cls(id, payload) 25 | 26 | def _payload_buffer(self) -> bytes: 27 | return self.payload 28 | -------------------------------------------------------------------------------- /okd/chunks/okd_chunk.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | from .chunk_base import ChunkBase 4 | from .generic_chunk import GenericChunk 5 | 6 | from .ykyi_chunk import YkyiChunk 7 | from .p_track_info_chunk import PTrackInfoChunk 8 | from .p3_track_info_chunk import P3TrackInfoChunk 9 | from .extended_p_track_info_chunk import ExtendedPTrackInfoChunk 10 | from .m_track_chunk import MTrackChunk 11 | from .p_track_chunk import PTrackChunk 12 | from .adpcm_chunk import AdpcmChunk 13 | 14 | OkdChunk = Union[ 15 | ChunkBase, 16 | GenericChunk, 17 | YkyiChunk, 18 | PTrackInfoChunk, 19 | P3TrackInfoChunk, 20 | ExtendedPTrackInfoChunk, 21 | MTrackChunk, 22 | PTrackChunk, 23 | AdpcmChunk, 24 | ] 25 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM python:3.13-slim 4 | 5 | ARG UID=1000 6 | ARG GID=1000 7 | 8 | RUN --mount=type=cache,target=/var/lib/apt/,sharing=locked \ 9 | --mount=type=cache,target=/var/cache/apt/,sharing=locked \ 10 | apt-get update && apt-get install -y --no-install-recommends \ 11 | curl ffmpeg 12 | 13 | RUN groupadd -g $GID python \ 14 | && useradd -m -s /bin/bash -u $UID -g $GID python 15 | 16 | USER python 17 | 18 | # Install Poetry 19 | RUN curl -sSL https://install.python-poetry.org | python - 20 | ENV PATH=/home/python/.local/bin:$PATH 21 | 22 | WORKDIR /app 23 | 24 | COPY pyproject.toml poetry.lock README.md settings.ini /app/ 25 | COPY midi/ midi/ 26 | COPY okd/ okd/ 27 | COPY mtf/ mtf/ 28 | COPY sprc/ sprc/ 29 | COPY dam_song_tools_cli/ dam_song_tools_cli/ 30 | 31 | RUN poetry install 32 | 33 | ENTRYPOINT [ "poetry", "run", "dam-song-tools" ] 34 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "dam-song-tools" 3 | version = "0.1.0" 4 | description = "Tools for DAM Karaoke Song data" 5 | authors = ["KIRISHIKI Yudai "] 6 | license = "MIT" 7 | readme = "README.md" 8 | packages = [ 9 | {include = "dam_song_tools_cli"}, 10 | {include = "midi"}, 11 | {include = "mtf"}, 12 | {include = "okd"}, 13 | {include = "sprc"} 14 | ] 15 | 16 | [tool.poetry.scripts] 17 | dam-song-tools = "dam_song_tools_cli.cli:main" 18 | 19 | [tool.poetry.dependencies] 20 | python = ">=3.10,<4.0" 21 | fastcrc = "^0.3.2" 22 | fire = "^0.7.0" 23 | mido = "^1.3.3" 24 | numpy = "^2.2.1" 25 | simplejson = "^3.19.3" 26 | soundfile = "^0.12.1" 27 | pydub = "^0.25.1" 28 | audioop-lts = { version = "^0.2.1", python = "^3.13" } 29 | 30 | [tool.poetry.group.dev.dependencies] 31 | black = "^24.10.0" 32 | 33 | [build-system] 34 | requires = ["poetry-core"] 35 | build-backend = "poetry.core.masonry.api" 36 | -------------------------------------------------------------------------------- /okd/chunks/__init__.py: -------------------------------------------------------------------------------- 1 | from .chunk_base import ChunkBase 2 | from .generic_chunk import GenericChunk 3 | from .ykyi_chunk import YkyiInfoEntry, YkyiChunk 4 | from .p_track_info_chunk import ( 5 | PTrackInfoChannelInfoEntry, 6 | PTrackInfoEntry, 7 | PTrackInfoChunk, 8 | ) 9 | from .p3_track_info_chunk import P3TrackInfoChannelInfoEntry, P3TrackInfoChunk 10 | from .extended_p_track_info_chunk import ( 11 | ExtendedPTrackInfoChannelInfoEntry, 12 | ExtendedPTrackInfoEntry, 13 | ExtendedPTrackInfoChunk, 14 | ) 15 | from .m_track_chunk import ( 16 | MTrackEvent, 17 | MTrackAbsoluteTimeEvent, 18 | MTrackInterpretation, 19 | MTrackChunk, 20 | ) 21 | from .p_track_chunk import PTrackEvent, PTrackAbsoluteTimeEvent, PTrackChunk 22 | from .adpcm_chunk import AdpcmChunk 23 | 24 | from .okd_chunk import OkdChunk 25 | 26 | from .utils import ( 27 | read_chunk, 28 | p_track_info_chunk_by_p_track_chunks, 29 | p3_track_info_chunk_by_p_track_chunks, 30 | ) 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024-2025 soltia48 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /midi/event.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BufferedWriter 3 | 4 | 5 | @dataclass 6 | class MidiEvent: 7 | """MIDI Event""" 8 | 9 | status_byte: int 10 | data_bytes: bytes 11 | 12 | def status_byte_type(self) -> int: 13 | """Get Status Byte type 14 | 15 | Returns: 16 | int: Status Byte type 17 | """ 18 | return self.status_byte & 0xF0 19 | 20 | def channel(self) -> int: 21 | """Get channel 22 | 23 | Returns: 24 | int: Channel 25 | """ 26 | return self.status_byte & 0x0F 27 | 28 | def write(self, stream: BufferedWriter) -> None: 29 | """Write 30 | 31 | Args: 32 | stream (BufferedReader): Output stream 33 | """ 34 | stream.write(self.status_byte.to_bytes()) 35 | stream.write(self.data_bytes) 36 | 37 | def to_bytes(self) -> bytes: 38 | """To bytes 39 | 40 | Returns: 41 | bytes: This instance as bytes 42 | """ 43 | return self.status_byte.to_bytes() + self.data_bytes 44 | 45 | 46 | @dataclass 47 | class MidiTrackEvent(MidiEvent): 48 | """MIDI Track Event""" 49 | 50 | delta_time: int 51 | -------------------------------------------------------------------------------- /okd/dump_binary.py: -------------------------------------------------------------------------------- 1 | """Dump binary with HEX""" 2 | 3 | import math 4 | 5 | 6 | def __dump_binary_line(address: int, chunk: bytes, chunk_size: int): 7 | BYTES_PER_SEP = " " 8 | 9 | line = "0x" 10 | line += format(address, "08X") 11 | line += " " 12 | line += chunk.hex(BYTES_PER_SEP).upper() 13 | padding = chunk_size - len(chunk) 14 | line += " " * padding 15 | line += " " 16 | for byte in chunk: 17 | if byte < 0x20 or 0x7E < byte: 18 | # Control character 19 | line += "." 20 | continue 21 | line += chr(byte) 22 | line += "\n" 23 | return line 24 | 25 | 26 | def dump_binary(data: bytes, chunk_size=16) -> str: 27 | """Dump binary with HEX 28 | 29 | Args: 30 | data (bytes): Data 31 | chunk_size (int, optional): Chunk size. Defaults to 16. 32 | 33 | Returns: 34 | str: Binary dumped string 35 | """ 36 | output = "" 37 | 38 | chunk_count = math.floor(len(data) / chunk_size) 39 | fraction_length = len(data) % chunk_size 40 | 41 | for i in range(chunk_count): 42 | address = chunk_size * i 43 | chunk: bytes = data[address : address + chunk_size] 44 | output += __dump_binary_line(address, chunk, chunk_size) 45 | 46 | if fraction_length == 0: 47 | return output[:-1] 48 | 49 | address = chunk_size * chunk_count 50 | fraction: bytes = data[address : address + fraction_length] 51 | output += __dump_binary_line(address, fraction, chunk_size) 52 | 53 | return output[:-1] 54 | -------------------------------------------------------------------------------- /mtf/note_events_conversion.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from midi.time_converter import MidiTimeConverter 4 | 5 | import mido 6 | 7 | @dataclass 8 | class NoteEvent: 9 | start_clk: int 10 | end_clk: int 11 | note: int 12 | 13 | def to_dict(self): 14 | return { 15 | "EndClk": self.end_clk, 16 | "Note": self.note, 17 | "StartClk": self.start_clk 18 | } 19 | 20 | def note_event_to_midi(note_events: list[NoteEvent], port: int, channel: int, ticks_per_beat: int = 480) -> mido.MidiFile: 21 | """Converts NoteEvents to mido.MidiFile""" 22 | midi = mido.MidiFile(ticks_per_beat=ticks_per_beat) 23 | track = mido.MidiTrack() 24 | track.append(mido.MetaMessage("midi_port", port=port)) 25 | track.append(mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(125.0))) 26 | 27 | midi.tracks.append(track) 28 | 29 | midi_time_converter = MidiTimeConverter() 30 | midi_time_converter.ticks_per_beat = ticks_per_beat 31 | midi_time_converter.add_tempo_change(0, 125.0) 32 | 33 | events = [] 34 | 35 | for event in note_events: 36 | start_ticks = int(midi_time_converter.ms_to_ticks(event.start_clk)) 37 | end_ticks = int(midi_time_converter.ms_to_ticks(event.end_clk)) 38 | events.append((start_ticks, mido.Message("note_on", note=event.note, velocity=100, time=0, channel=channel))) 39 | events.append((end_ticks, mido.Message("note_off", note=event.note, velocity=100, time=0, channel=channel))) 40 | 41 | def insert_track_messages(track, sorted_events): 42 | """Sorts events and calculates delta""" 43 | sorted_events.sort(key=lambda x: x[0]) 44 | last_tick = 0 45 | for tick, msg in sorted_events: 46 | msg.time = tick - last_tick 47 | track.append(msg) 48 | last_tick = tick 49 | 50 | insert_track_messages(track, events) 51 | 52 | return midi 53 | -------------------------------------------------------------------------------- /okd/chunks/adpcm_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | from typing import BinaryIO, Self 4 | 5 | from ..adpcm import AdpcmDecoder 6 | 7 | from .chunk_base import ChunkBase 8 | from .generic_chunk import GenericChunk 9 | 10 | 11 | @dataclass 12 | class AdpcmChunkTrack: 13 | """ADPCM Chunk""" 14 | 15 | TRACK_ID = b"YAWV" 16 | 17 | data: bytes 18 | 19 | def decode(self) -> list[int]: 20 | """Decode 21 | 22 | Returns: 23 | list[int]: Decoded samples 24 | """ 25 | stream = BytesIO(self.data) 26 | decoder = AdpcmDecoder() 27 | return decoder.decode(stream) 28 | 29 | def write(self, stream: BinaryIO) -> None: 30 | """Write 31 | 32 | Args: 33 | stream (BufferedReader): Output stream 34 | """ 35 | stream.write(AdpcmChunkTrack.TRACK_ID) 36 | stream.write(len(self.data).to_bytes(4, "big")) 37 | stream.write(self.data) 38 | 39 | 40 | @dataclass 41 | class AdpcmChunk(ChunkBase): 42 | """ADPCM Chunk""" 43 | 44 | tracks: list[AdpcmChunkTrack] 45 | 46 | @classmethod 47 | def from_generic(cls, generic: GenericChunk) -> Self: 48 | """From Generic Chunk 49 | 50 | Args: 51 | generic (GenericChunk): Generic Chunk 52 | 53 | Returns: 54 | Self: Instance of this class 55 | """ 56 | stream = BytesIO(generic.payload) 57 | tracks: list[AdpcmChunkTrack] = [] 58 | while True: 59 | buffer = stream.read(8) 60 | if len(buffer) < 8: 61 | break 62 | 63 | chunk_id = buffer[0:4] 64 | if chunk_id == AdpcmChunkTrack.TRACK_ID: 65 | chunk_size = int.from_bytes(buffer[4:8], "big") 66 | chunk_data = stream.read(chunk_size) 67 | if len(chunk_data) < chunk_size: 68 | raise ValueError("Too less read bytes.") 69 | tracks.append(AdpcmChunkTrack(chunk_data)) 70 | else: 71 | raise ValueError(f"Unknown Chunk ID detected. chunk_id=`{chunk_id}`") 72 | 73 | return cls(generic.id, tracks) 74 | 75 | def _payload_buffer(self) -> bytes: 76 | buffer = b"" 77 | for track in self.tracks: 78 | buffer += track.data 79 | return buffer 80 | -------------------------------------------------------------------------------- /test/test_okd_midi.py: -------------------------------------------------------------------------------- 1 | from io import BytesIO 2 | import unittest 3 | 4 | from okd.okd_midi import ( 5 | read_variable_int, 6 | write_variable_int, 7 | read_extended_variable_int, 8 | write_extended_variable_int, 9 | ) 10 | 11 | 12 | class TestOkdMidi(unittest.TestCase): 13 | VALUES: list[tuple[int, bytes]] = [ 14 | (0x000000, b"\x00"), 15 | (0x00003F, b"\x3f"), 16 | (0x00103F, b"\x7f\x3f"), 17 | (0x04103F, b"\x7f\x7f\x3f"), 18 | ] 19 | EXTENDED_VALUES: list[tuple[int, bytes]] = [ 20 | (0x000000, b""), 21 | (0x00003F, b"\x3f"), 22 | (0x00103F, b"\x7f\x3f"), 23 | (0x04103F, b"\x7f\x7f\x3f"), 24 | (0x04107E, b"\x7f\x7f\x3f\x3f"), 25 | (0x04207E, b"\x7f\x7f\x3f\x7f\x3f"), 26 | (0x08207E, b"\x7f\x7f\x3f\x7f\x7f\x3f"), 27 | ] 28 | 29 | def test_read_varibale_int(self): 30 | for value, buffer in TestOkdMidi.VALUES: 31 | with self.subTest(value=value, buffer=buffer): 32 | stream = BytesIO(buffer) 33 | read_value = read_variable_int(stream) 34 | self.assertEqual(value, read_value) 35 | 36 | with self.assertRaises(ValueError): 37 | stream = BytesIO(b"\x7f\x7f\x7f") 38 | read_variable_int(stream) 39 | 40 | def test_write_variable_int( 41 | self, 42 | ): 43 | for value, buffer in TestOkdMidi.VALUES: 44 | with self.subTest(value=value, buffer=buffer): 45 | stream = BytesIO() 46 | write_variable_int(stream, value) 47 | stream.seek(0) 48 | self.assertEqual(buffer, stream.read()) 49 | 50 | with self.assertRaises(ValueError): 51 | stream = BytesIO() 52 | write_variable_int(stream, 0x04104F) 53 | 54 | def test_read_extended_variable_int(self): 55 | for value, buffer in TestOkdMidi.EXTENDED_VALUES: 56 | with self.subTest(value=value, buffer=buffer): 57 | stream = BytesIO(buffer + b"\x80") 58 | read_value = read_extended_variable_int(stream) 59 | self.assertEqual(value, read_value) 60 | 61 | def test_write_extended_variable_int(self): 62 | for value, buffer in TestOkdMidi.EXTENDED_VALUES: 63 | with self.subTest(value=value, buffer=buffer): 64 | stream = BytesIO() 65 | write_extended_variable_int(stream, value) 66 | stream.seek(0) 67 | self.assertEqual(buffer, stream.read()) 68 | 69 | 70 | if __name__ == "__main__": 71 | unittest.main() 72 | -------------------------------------------------------------------------------- /okd/oka_file.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | from typing import BinaryIO, Self 4 | 5 | from .okd_file_scramble import descramble 6 | 7 | 8 | @dataclass 9 | class OkaHeader: 10 | """OKA Header""" 11 | 12 | MAGIC_BYTES = b"YOKA" 13 | FIXED_PART_LENGTH = 40 14 | 15 | magic_bytes: bytes 16 | length: int 17 | version: str 18 | id_karaoke: int 19 | data_offset: int 20 | unknown_0: int 21 | crc: int 22 | 23 | @classmethod 24 | def read( 25 | cls, 26 | stream: BinaryIO, 27 | scramble_pattern_index: int | None = None, 28 | ) -> Self: 29 | """Read 30 | 31 | Args: 32 | stream (BinaryIO): Input stream 33 | scramble_pattern_index (int): Scramble pattern index 34 | 35 | Raises: 36 | ValueError: Invalid `magic_bytes` 37 | 38 | Returns: 39 | Self: Instance of this class 40 | """ 41 | if scramble_pattern_index is None: 42 | buffer = stream.read(OkaHeader.FIXED_PART_LENGTH) 43 | else: 44 | header_stream = BytesIO() 45 | scramble_pattern_index = descramble( 46 | stream, 47 | header_stream, 48 | scramble_pattern_index, 49 | OkaHeader.FIXED_PART_LENGTH, 50 | ) 51 | header_stream.seek(0) 52 | buffer = header_stream.read() 53 | if len(buffer) < OkaHeader.FIXED_PART_LENGTH: 54 | raise ValueError("Too less read bytes.") 55 | 56 | magic_bytes = buffer[0:4] 57 | if magic_bytes != OkaHeader.MAGIC_BYTES: 58 | raise ValueError("Invalid `magic_bytes`.") 59 | length = int.from_bytes(buffer[4:8], "big") 60 | version = buffer[8:24].decode("ascii") 61 | id_karaoke = int.from_bytes(buffer[24:28], "big") 62 | data_offset = int.from_bytes(buffer[28:32], "big") 63 | unknown_0 = int.from_bytes(buffer[32:36], "big") 64 | crc = int.from_bytes(buffer[36:40], "big") 65 | return cls( 66 | magic_bytes, length, version, id_karaoke, data_offset, unknown_0, crc 67 | ) 68 | 69 | def write(self, stream: BinaryIO) -> None: 70 | """Write 71 | 72 | Args: 73 | stream (BinaryIO): Output stream 74 | """ 75 | stream.write(OkaHeader.MAGIC_BYTES) 76 | stream.write(self.length.to_bytes(4, "big")) 77 | stream.write(self.version.encode("ascii").ljust(16, b"\x00")) 78 | stream.write(self.id_karaoke.to_bytes(4, "big")) 79 | stream.write(self.data_offset.to_bytes(4, "big")) 80 | stream.write(self.unknown_0.to_bytes(4, "big")) 81 | stream.write(self.crc.to_bytes(4, "big")) 82 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /okd/chunks/p3_track_info_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | from typing import Self 4 | 5 | from .chunk_base import ChunkBase 6 | from .generic_chunk import GenericChunk 7 | from .p_track_info_chunk import PTrackInfoChannelInfoEntry 8 | 9 | 10 | @dataclass 11 | class P3TrackInfoChannelInfoEntry(PTrackInfoChannelInfoEntry): 12 | """P3-Track Information Channel Information Entry""" 13 | 14 | 15 | @dataclass 16 | class P3TrackInfoChunk(ChunkBase): 17 | """P3-Track Information Chunk""" 18 | 19 | track_number: int 20 | track_status: int 21 | use_channel_group_flag: int 22 | default_channel_groups: list[int] 23 | channel_groups: list[int] 24 | channel_info: list[PTrackInfoChannelInfoEntry] 25 | system_ex_ports: int 26 | 27 | @classmethod 28 | def from_generic(cls, generic: GenericChunk) -> Self: 29 | """From Generic Chunk 30 | 31 | Args: 32 | generic (GenericChunk): Generic Chunk 33 | 34 | Returns: 35 | Self: Instance of this class 36 | """ 37 | stream = BytesIO(generic.payload) 38 | 39 | buffer = stream.read(4) 40 | if len(buffer) < 4: 41 | raise ValueError("Too less read bytes.") 42 | 43 | track_number = buffer[0] 44 | track_status = buffer[1] 45 | use_channel_group_flag = int.from_bytes(buffer[2:4], "big") 46 | 47 | default_channel_groups: list[int] = [] 48 | for channel in range(16): 49 | if (use_channel_group_flag >> channel) & 0x0001 == 0x0001: 50 | buffer = stream.read(2) 51 | if len(buffer) < 2: 52 | raise ValueError("Too less read bytes.") 53 | 54 | default_channel_groups.append(int.from_bytes(buffer, "big")) 55 | else: 56 | default_channel_groups.append(0x0000) 57 | 58 | buffer = stream.read(32) 59 | if len(buffer) < 32: 60 | raise ValueError("Too less read bytes.") 61 | 62 | channel_groups: list[int] = [] 63 | for channel in range(16): 64 | offset = 2 * channel 65 | channel_groups.append(int.from_bytes(buffer[offset : offset + 2], "big")) 66 | 67 | channel_info: list[PTrackInfoChannelInfoEntry] = [] 68 | for channel in range(16): 69 | channel_info.append(PTrackInfoChannelInfoEntry.read(stream)) 70 | 71 | buffer = stream.read(2) 72 | if len(buffer) < 2: 73 | raise ValueError("Too less read bytes.") 74 | 75 | system_ex_ports = int.from_bytes(buffer[0:2], "big") 76 | 77 | return cls( 78 | generic.id, 79 | track_number, 80 | track_status, 81 | use_channel_group_flag, 82 | default_channel_groups, 83 | channel_groups, 84 | channel_info, 85 | system_ex_ports, 86 | ) 87 | 88 | def is_lossless_track(self) -> bool: 89 | return self.track_status & 0x80 == 0x80 90 | 91 | def _payload_buffer(self) -> bytes: 92 | """Write 93 | 94 | Args: 95 | stream (BufferedReader): Output stream 96 | """ 97 | stream = BytesIO() 98 | 99 | stream.write(self.track_number.to_bytes()) 100 | stream.write(self.track_status.to_bytes()) 101 | stream.write(self.use_channel_group_flag.to_bytes(2, "big")) 102 | for channel, default_channel_group in enumerate(self.default_channel_groups): 103 | if (self.use_channel_group_flag >> channel) & 0x0001 != 0x0001: 104 | continue 105 | stream.write(default_channel_group.to_bytes(2, "big")) 106 | for channel_group in self.channel_groups: 107 | stream.write(channel_group.to_bytes(2, "big")) 108 | for channel_info_entry in self.channel_info: 109 | channel_info_entry.write(stream) 110 | stream.write(self.system_ex_ports.to_bytes(2, "little")) 111 | 112 | stream.seek(0) 113 | return stream.read() 114 | -------------------------------------------------------------------------------- /midi/utils.py: -------------------------------------------------------------------------------- 1 | import mido 2 | 3 | 4 | def is_meta_track(track: mido.MidiTrack) -> bool: 5 | """Check if a MIDI track contains any meta messages. 6 | 7 | Args: 8 | track (mido.MidiTrack): MIDI track to check 9 | 10 | Returns: 11 | bool: True if track contains at least one meta message, False otherwise 12 | """ 13 | return any(isinstance(message, mido.MetaMessage) for message in track) 14 | 15 | 16 | def get_meta_track(tracks: list[mido.MidiTrack]) -> mido.MidiTrack | None: 17 | """Find and return the first meta track from a list of MIDI tracks. 18 | 19 | Args: 20 | tracks: List of MIDI tracks to search through. 21 | 22 | Returns: 23 | mido.MidiTrack | None: The first meta track found, or None if no meta track exists. 24 | """ 25 | return next( 26 | (track for track in tracks if is_meta_track(track)), 27 | None, 28 | ) 29 | 30 | 31 | def get_track_port(track: mido.MidiTrack) -> int | None: 32 | """Retrieves the MIDI port number from a MIDI track. 33 | 34 | Args: 35 | track (mido.MidiTrack): A MIDI track object to extract port information from. 36 | 37 | Returns: 38 | int | None: The port number if a midi_port message exists, None otherwise. 39 | """ 40 | return next( 41 | (message.port for message in track if message.type == "midi_port"), None 42 | ) 43 | 44 | 45 | def get_track_by_port_channel( 46 | tracks: list[mido.MidiTrack], port: int, channel: int 47 | ) -> mido.MidiTrack | None: 48 | """Find the first MIDI track that matches the specified port and channel numbers. 49 | 50 | Args: 51 | tracks (list[mido.MidiTrack]): List of MIDI tracks to search through 52 | port (int): Target MIDI port number 53 | channel (int): Target MIDI channel number 54 | 55 | Returns: 56 | mido.MidiTrack | None: The first matching MIDI track, or None if no match is found 57 | """ 58 | for track in tracks: 59 | has_matching_port = False 60 | has_matching_channel = False 61 | 62 | has_matching_port = any( 63 | message.type == "midi_port" and message.port == port for message in track 64 | ) 65 | 66 | if has_matching_port: 67 | has_matching_channel = any( 68 | message.type == "note_on" and message.channel == channel 69 | for message in track 70 | ) 71 | 72 | if has_matching_channel: 73 | return track 74 | 75 | 76 | def get_first_and_last_note_times(tracks: list[mido.MidiTrack]): 77 | """Get the absolute time of the first and last notes in a MIDI tracks 78 | 79 | Args: 80 | tracks (list[mido.MidiTrack]): MIDI tracks 81 | 82 | Returns: 83 | tuple: (first_note_time, last_note_time) in seconds. Returns (None, None) if no notes are found 84 | """ 85 | first_note_time = 0xFFFFFFFF 86 | last_note_time = 0 87 | for track in tracks: 88 | absolute_time = 0 89 | for message in track: 90 | absolute_time += message.time 91 | 92 | if message.type == "note_on": 93 | if absolute_time < first_note_time: 94 | first_note_time = absolute_time 95 | if message.type == "note_off": 96 | if absolute_time > last_note_time: 97 | last_note_time = absolute_time 98 | 99 | return first_note_time, last_note_time 100 | 101 | 102 | def get_time_signatures(tracks: list[mido.MidiTrack]) -> list[tuple[int, int, int]]: 103 | """Get time signatures from MIDI tracks 104 | 105 | Args: 106 | tracks: MIDI tracks 107 | 108 | Returns: 109 | list[tuple[int, int, int]]: List of (tick, numerator, denominator) 110 | """ 111 | time_signatures: list[tuple[int, int, int]] = [] 112 | for track in tracks: 113 | absolute_tick = 0 114 | for message in track: 115 | absolute_tick += message.time 116 | if message.type == "time_signature": 117 | time_signatures.append( 118 | (absolute_tick, message.numerator, message.denominator) 119 | ) 120 | return sorted(time_signatures, key=lambda x: x[0]) 121 | -------------------------------------------------------------------------------- /okd/utils.py: -------------------------------------------------------------------------------- 1 | from logging import getLogger 2 | 3 | import mido 4 | 5 | from midi.utils import get_meta_track, get_track_by_port_channel 6 | from okd.okd_file import OkdGenericHeader, OkdFile 7 | from okd.chunks import ( 8 | MTrackInterpretation, 9 | MTrackChunk, 10 | PTrackInfoChunk, 11 | ExtendedPTrackInfoChunk, 12 | P3TrackInfoChunk, 13 | PTrackChunk, 14 | p_track_info_chunk_by_p_track_chunks, 15 | p3_track_info_chunk_by_p_track_chunks, 16 | ) 17 | from okd.m_track_conversion import midi_to_m_track 18 | from okd.p_track_conversion import p_track_to_midi, midi_to_p_tracks, midi_to_p3_track 19 | 20 | __logger = getLogger(__name__) 21 | 22 | 23 | def okd_to_midi(okd: OkdFile, sysex_to_text: bool) -> mido.MidiFile: 24 | """Make MIDI file from OKD 25 | 26 | Args: 27 | okd (OkdFile): OKD file 28 | sysex_to_text (bool): Convert SysEx Messages to Text Meta Messages 29 | 30 | Raises: 31 | ValueError: Invalid input OKD. 32 | 33 | Returns: 34 | mido.MidiFile: MIDI file 35 | """ 36 | __logger.info(f"OKD loaded. header={okd.header}") 37 | 38 | p_track_info: ( 39 | PTrackInfoChunk | ExtendedPTrackInfoChunk | P3TrackInfoChunk | None 40 | ) = None 41 | p_tracks: list[PTrackChunk] = [] 42 | 43 | m_track_interpritation: MTrackInterpretation | None = None 44 | for chunk in okd.chunks: 45 | chunk_id_hex = chunk.id.hex().upper() 46 | __logger.info(f"{type(chunk).__name__} found. id={chunk.id} (0x{chunk_id_hex})") 47 | if isinstance(chunk, MTrackChunk): 48 | m_track_interpritation = MTrackInterpretation.from_track(chunk) 49 | elif isinstance(chunk, PTrackInfoChunk): 50 | p_track_info = chunk 51 | elif isinstance(chunk, ExtendedPTrackInfoChunk): 52 | p_track_info = chunk 53 | elif isinstance(chunk, P3TrackInfoChunk): 54 | p_track_info = chunk 55 | elif isinstance(chunk, PTrackChunk): 56 | p_tracks.append(chunk) 57 | 58 | if m_track_interpritation is None or p_track_info is None or len(p_tracks) == 0: 59 | raise ValueError( 60 | "Invalid input OKD. Needed M-Track, P-Track Info and P-Tracks." 61 | ) 62 | 63 | __logger.info("Make P-Track MIDI file.") 64 | return p_track_to_midi( 65 | m_track_interpritation, p_track_info, p_tracks, sysex_to_text 66 | ) 67 | 68 | 69 | def midi_to_okds( 70 | midi: mido.MidiFile, header: OkdGenericHeader 71 | ) -> tuple[OkdFile, OkdFile]: 72 | """MIDI to OKDs 73 | 74 | Args: 75 | midi (mido.MidiFile): MIDI file 76 | 77 | Raises: 78 | ValueError: Meta track not found. 79 | ValueError: P-Track not found. 80 | ValueError: P3-Track not found. 81 | 82 | Returns: 83 | tuple[OkdFile, OkdFile]: P-Track and P3-Track 84 | """ 85 | meta_track = get_meta_track(midi.tracks) 86 | if meta_track is None: 87 | raise ValueError("Meta track not found.") 88 | 89 | m_track_chunk = midi_to_m_track(midi) 90 | 91 | p_track = [ 92 | get_track_by_port_channel(midi.tracks, port, track) 93 | for port in range(2) 94 | for track in range(16) 95 | ] 96 | p_track = [track for track in p_track if track is not None] 97 | if len(p_track) < 1: 98 | raise ValueError("P-Track not found.") 99 | p_track_midi = mido.MidiFile() 100 | p_track_midi.tracks = [meta_track, *p_track] 101 | p_track_chunks = midi_to_p_tracks(p_track_midi) 102 | p_track_info_chunk = p_track_info_chunk_by_p_track_chunks(p_track_chunks) 103 | 104 | p3_track = get_track_by_port_channel(midi.tracks, 1, 8) 105 | if p3_track is None: 106 | raise ValueError("P3-Track not found.") 107 | for message in p3_track: 108 | if message.type == "midi_port": 109 | message.port = 2 110 | if hasattr(message, "channel"): 111 | message.channel = 14 112 | p3_track_midi = mido.MidiFile() 113 | p3_track_midi.tracks = [meta_track, p3_track] 114 | p3_track_chunk = midi_to_p3_track(p3_track_midi) 115 | p3_track_info_chunk = p3_track_info_chunk_by_p_track_chunks(p3_track_chunk) 116 | 117 | playing_okd = OkdFile(header, [m_track_chunk, p_track_info_chunk, *p_track_chunks]) 118 | p3_okd = OkdFile(header, [p3_track_info_chunk, p3_track_chunk]) 119 | return playing_okd, p3_okd 120 | -------------------------------------------------------------------------------- /mtf/mtf_audio.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import configparser 3 | import io 4 | from pydub import AudioSegment 5 | from okd.adpcm import AdpcmDecoder 6 | 7 | SETTING_FILE = "settings.ini" 8 | 9 | class MtfAudio: 10 | """MTF Audio Processor Class""" 11 | 12 | def decode_adpcm(self, input_file): 13 | """Decodes ADPCM to AudioSegment""" 14 | with open(input_file, "rb") as f: 15 | data = f.read() 16 | 17 | stream = io.BytesIO(data) 18 | decoder = AdpcmDecoder() 19 | samples = decoder.decode(stream) 20 | samples = np.array(samples, dtype="int16") 21 | 22 | self.audio = AudioSegment(samples.tobytes(), frame_rate=22050, sample_width=2, channels=1) 23 | 24 | def decode_opus(self, input_file): 25 | """Loads OPUS(OGG), converts it to AudioSegment and adjust volume""" 26 | audio = AudioSegment.from_file(input_file) 27 | self.audio = audio + self.load_opus_volume_increase() # Adjust volume from settings file 28 | 29 | def load_opus_volume_increase(self): 30 | """Loads Volume settings from ini""" 31 | config = configparser.ConfigParser() 32 | config.read(SETTING_FILE) 33 | return int(config.get("MtfAudio", "OpusVolumeIncrease", fallback=0)) 34 | 35 | def decode_others(self, input_file): 36 | """Loads Audio File, converts it to AudioSegment""" 37 | self.audio = AudioSegment.from_file(input_file) 38 | 39 | def apply_vol_events(self, vol_events, start_time): 40 | """Applies VolEvent""" 41 | for evt_type in ["Velocity", "Pan", "AdpcmVol", "RelVol", "RecBoostVol", "AdpcmRev"]: 42 | self.audio = self.process_vol_events(vol_events, start_time, evt_type) 43 | return self.audio 44 | 45 | def process_vol_events(self, vol_events, start_time, apply_type): 46 | """Processes audio VolEvent""" 47 | # Initial Values 48 | value_map = { 49 | "Velocity": 1.0, 50 | "Pan": 0.0, 51 | "AdpcmVol": 1.0, 52 | "RelVol": 1.0, 53 | "RecBoostVol": 0.0, # dB 54 | "AdpcmRev": 0 # Not Implemented 55 | } 56 | apply_value = value_map.get(apply_type) 57 | 58 | # Applies VolEvents before start_time 59 | for event in sorted(vol_events, key=lambda e: e["clock"]): 60 | if event["clock"] > start_time: 61 | break 62 | if event["type"] == apply_type: 63 | if apply_type in ["Velocity", "AdpcmVol", "RelVol"]: 64 | apply_value = event["value"] / 127.0 65 | elif apply_type == "Pan": 66 | apply_value = (event["value"] / 100.0) if -100 <= event["value"] <= 100 else 0 67 | elif apply_type == "RecBoostVol": 68 | apply_value = event["value"] 69 | elif apply_type == "AdpcmRev": 70 | apply_value = event["value"] 71 | 72 | audio = self.apply_effect(self.audio, apply_type, apply_value) 73 | 74 | # Applies VolEvents after start_time 75 | for event in sorted(vol_events, key=lambda e: e["clock"]): 76 | if event["clock"] < start_time or event["type"] != apply_type: 77 | continue 78 | 79 | rel_time = event["clock"] - start_time 80 | if apply_type in ["Velocity", "AdpcmVol", "RelVol"]: 81 | apply_value = event["value"] / 127.0 82 | elif apply_type == "Pan": 83 | apply_value = (event["value"] / 100.0) if -100 <= event["value"] <= 100 else 0 84 | elif apply_type == "RecBoostVol": 85 | apply_value = event["value"] 86 | elif apply_type == "AdpcmRev": 87 | apply_value = event["value"] 88 | 89 | segment = self.audio[rel_time:] 90 | segment = self.apply_effect(segment, apply_type, apply_value) 91 | audio = audio[:rel_time] + segment 92 | 93 | return audio 94 | 95 | def apply_effect(self, segment, effect_type, value): 96 | """Applies an Effect""" 97 | if effect_type == "Velocity": 98 | if value <= 0: 99 | return segment - 120 # mute 100 | gain = 20 * np.log10(value) 101 | return segment + gain 102 | elif effect_type == "Pan": 103 | return segment.pan(value) 104 | elif effect_type == "AdpcmVol": 105 | if value <= 0: 106 | return segment - 120 107 | gain = 20 * np.log10(value) 108 | return segment + gain 109 | elif effect_type == "RelVol": 110 | if value <= 0: 111 | return segment - 120 112 | gain = 20 * np.log10(value) 113 | return segment + gain 114 | elif effect_type == "RecBoostVol": 115 | return segment - value 116 | return segment 117 | -------------------------------------------------------------------------------- /okd/chunks/chunk_base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from dataclasses import dataclass 3 | import os 4 | from typing import BinaryIO 5 | 6 | 7 | @dataclass 8 | class ChunkBase(ABC): 9 | """Chunk Base Class""" 10 | 11 | END_OF_FILE_MARK = b"\x00\x00\x00\x00" 12 | 13 | id: bytes 14 | 15 | @staticmethod 16 | def __descramble_header(id: bytes, size: int) -> tuple[bytes, int]: 17 | """Descramble Chunk Header 18 | 19 | Args: 20 | id (bytes): ID 21 | size (int): Size 22 | 23 | Returns: 24 | tuple[int, bytes]: ID and Size 25 | """ 26 | # Scrambled YADD chunk header 27 | if id == b"\x4e\x96\x53\x93": 28 | id = b"YADD" 29 | size ^= 0x17D717D7 30 | return id, size 31 | 32 | @staticmethod 33 | def _read_common(stream: BinaryIO) -> tuple[bytes, bytes]: 34 | """Read Common Part 35 | 36 | Args: 37 | stream (BinaryIO): Input stream 38 | 39 | Returns: 40 | tuple[int, bytes]: Chunk ID and Payload 41 | """ 42 | buffer = stream.read(8) 43 | if len(buffer) == 0 or buffer == ChunkBase.END_OF_FILE_MARK: 44 | # End of File 45 | raise ValueError("Reached to End of File.") 46 | if len(buffer) != 8: 47 | stream.seek(-len(buffer), os.SEEK_CUR) 48 | raise ValueError("Reached to End of File.") 49 | id = buffer[0:4] 50 | size = int.from_bytes(buffer[4:8], "big") 51 | id, size = ChunkBase.__descramble_header(id, size) 52 | payload = stream.read(size) 53 | return id, payload 54 | 55 | @staticmethod 56 | def peek_header(stream: BinaryIO) -> tuple[bytes, int] | None: 57 | """Peek Header 58 | 59 | Args: 60 | stream (BinaryIO): Input stream 61 | 62 | Returns: 63 | bytes: ID and Size 64 | """ 65 | buffer = stream.read(8) 66 | stream.seek(-len(buffer), os.SEEK_CUR) 67 | if len(buffer) == 0 or buffer == ChunkBase.END_OF_FILE_MARK: 68 | # End of File 69 | return 70 | if len(buffer) != 8: 71 | # End of File 72 | return 73 | id = buffer[0:4] 74 | size = int.from_bytes(buffer[4:8], "big") 75 | return ChunkBase.__descramble_header(id, size) 76 | 77 | @staticmethod 78 | def __seek_header( 79 | stream: BinaryIO, id: bytes | None = None 80 | ) -> tuple[bytes, int] | None: 81 | """Seek header 82 | 83 | Args: 84 | stream (BinaryIO): Input stream 85 | id (bytes | None, optional): Target ID. Defaults to None. 86 | 87 | Returns: 88 | tuple[int, int] | None: If ID and size found, else not found 89 | """ 90 | while True: 91 | header = ChunkBase.peek_header(stream) 92 | if header is None: 93 | return 94 | current_id, current_size = header 95 | if id is None: 96 | return (current_id, current_size) 97 | else: 98 | if current_id == id: 99 | return (current_id, current_size) 100 | stream.seek(8 + current_size, os.SEEK_CUR) 101 | 102 | @staticmethod 103 | def index_chunk(stream: BinaryIO) -> list[tuple[int, int, bytes]]: 104 | """Index Chunk 105 | 106 | Args: 107 | stream (BinaryIO): Input stream 108 | 109 | Returns: 110 | list[tuple[int, int, bytes]]: List of offset, size and ID 111 | """ 112 | index: list[tuple[int, int, bytes]] = [] 113 | 114 | id = b"" 115 | last_position = -1 116 | while True: 117 | header = ChunkBase.__seek_header(stream) 118 | if header is None: 119 | break 120 | id, size = header 121 | position = stream.tell() 122 | if last_position != -1: 123 | index.append((last_position, position - last_position, id)) 124 | last_position = position 125 | stream.seek(8 + size, os.SEEK_CUR) 126 | 127 | if last_position != -1: 128 | position = stream.tell() 129 | index.append((last_position, position - last_position, id)) 130 | 131 | return index 132 | 133 | @abstractmethod 134 | def _payload_buffer(self) -> bytes: 135 | """Payload Buffer 136 | 137 | Returns: 138 | bytes: Payload Buffer 139 | """ 140 | pass 141 | 142 | def write(self, stream: BinaryIO) -> None: 143 | """Write 144 | 145 | Args: 146 | stream (BinaryIO): Output stream 147 | """ 148 | payload_buffer = self._payload_buffer() 149 | stream.write(self.id) 150 | if len(payload_buffer) % 2 != 0: 151 | payload_buffer += b"\x00" 152 | stream.write(len(payload_buffer).to_bytes(4, "big")) 153 | stream.write(payload_buffer) 154 | -------------------------------------------------------------------------------- /midi/time_converter.py: -------------------------------------------------------------------------------- 1 | import mido 2 | 3 | 4 | class MidiTimeConverter: 5 | """MIDI time converter""" 6 | 7 | def __init__(self): 8 | self.ppqn = 480 9 | # Tempo changes (position_ms, tempo_bpm) 10 | self.tempo_changes: list[tuple[int, float]] = [] 11 | 12 | def add_tempo_change(self, position_ms: int, tempo_bpm: float): 13 | """Add a tempo change event at the specified position.""" 14 | self.tempo_changes.append((position_ms, tempo_bpm)) 15 | # Keep tempo changes sorted by position 16 | self.tempo_changes.sort(key=lambda x: x[0]) 17 | 18 | def load_from_midi(self, midi: mido.MidiFile): 19 | """Load tempo changes from a MIDI file 20 | 21 | Parameters: 22 | midi (MidiFile): MIDI file 23 | """ 24 | self.ppqn: int = midi.ticks_per_beat 25 | 26 | current_time_ticks = 0 27 | current_time_ms = 0.0 28 | current_tempo = 500000 # 120 BPM 29 | 30 | # Clear existing tempo changes 31 | self.tempo_changes = [(0, mido.tempo2bpm(current_tempo))] 32 | 33 | # Find the first track with tempo changes 34 | tempo_track = None 35 | for track in midi.tracks: 36 | if any(message.type == "set_tempo" for message in track): 37 | tempo_track = track 38 | break 39 | 40 | if tempo_track: 41 | for message in tempo_track: 42 | current_time_ticks += message.time 43 | 44 | # Convert current position to milliseconds 45 | if message.time > 0: 46 | ms_per_tick = current_tempo / (self.ppqn * 1000) 47 | current_time_ms += message.time * ms_per_tick 48 | 49 | if message.type == "set_tempo": 50 | current_tempo = message.tempo 51 | self.add_tempo_change( 52 | round(current_time_ms), mido.tempo2bpm(current_tempo) 53 | ) 54 | 55 | def ms_to_ticks(self, time_ms: int) -> int: 56 | """Convert milliseconds to MIDI ticks""" 57 | if not self.tempo_changes: 58 | raise ValueError("No tempo information available") 59 | 60 | total_ticks = 0.0 61 | 62 | # Handle time before first tempo change 63 | if time_ms < self.tempo_changes[0][0]: 64 | return self._calculate_ticks_at_tempo(time_ms, self.tempo_changes[0][1]) 65 | 66 | # Process each tempo section 67 | for i in range(len(self.tempo_changes)): 68 | current_tempo = self.tempo_changes[i][1] 69 | 70 | # Calculate end of current tempo section 71 | section_end = ( 72 | self.tempo_changes[i + 1][0] 73 | if i < len(self.tempo_changes) - 1 74 | else time_ms 75 | ) 76 | section_end = min(section_end, time_ms) 77 | 78 | # Calculate ticks for this section 79 | section_duration = section_end - self.tempo_changes[i][0] 80 | if section_duration > 0: 81 | total_ticks += self._calculate_ticks_at_tempo( 82 | section_duration, current_tempo 83 | ) 84 | 85 | if section_end == time_ms: 86 | break 87 | 88 | return round(total_ticks) 89 | 90 | def _calculate_ticks_at_tempo(self, duration_ms, tempo_bpm) -> float: 91 | """Calculate ticks for a duration at a constant tempo.""" 92 | microseconds_per_beat = 60_000_000 / tempo_bpm 93 | microseconds = duration_ms * 1000 94 | return (microseconds / microseconds_per_beat) * self.ppqn 95 | 96 | def ticks_to_ms(self, ticks: int): 97 | """Convert MIDI ticks to milliseconds""" 98 | if not self.tempo_changes: 99 | raise ValueError("No tempo information available") 100 | 101 | remaining_ticks = ticks 102 | current_time = 0 103 | 104 | for i in range(len(self.tempo_changes)): 105 | current_tempo = self.tempo_changes[i][1] 106 | 107 | # Calculate how many ticks until next tempo change 108 | if i < len(self.tempo_changes) - 1: 109 | section_duration = ( 110 | self.tempo_changes[i + 1][0] - self.tempo_changes[i][0] 111 | ) 112 | section_ticks = self._calculate_ticks_at_tempo( 113 | section_duration, current_tempo 114 | ) 115 | else: 116 | section_ticks = remaining_ticks 117 | 118 | if remaining_ticks <= section_ticks: 119 | # Convert remaining ticks to ms at current tempo 120 | microseconds_per_beat = 60_000_000 / current_tempo 121 | ms = (remaining_ticks * microseconds_per_beat) / (self.ppqn * 1000) 122 | return round(current_time + ms) 123 | 124 | remaining_ticks -= section_ticks 125 | current_time = self.tempo_changes[i + 1][0] 126 | 127 | return round(current_time) 128 | -------------------------------------------------------------------------------- /okd/adpcm.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | import os 3 | from typing import BinaryIO, Self 4 | 5 | FRAMES_PER_FRAME_GROUP = 18 6 | 7 | SUB_FRAMES = 4 8 | SUB_FRAME_NIBBLES = 28 9 | SAMPLES_PER_FRAME = SUB_FRAME_NIBBLES * SUB_FRAMES 10 | 11 | SHIFT_LIMIT = 12 12 | INDEX_LIMIT = 3 13 | 14 | K0 = [0.0, 0.9375, 1.796875, 1.53125] 15 | K1 = [0.0, 0.0, -0.8125, -0.859375] 16 | SIGNED_NIBBLES = [0, 1, 2, 3, 4, 5, 6, 7, -8, -7, -6, -5, -4, -3, -2, -1] 17 | 18 | 19 | @dataclass 20 | class AdpcmFrame: 21 | """ADPCM Frame""" 22 | 23 | parameters: bytes 24 | samples: bytes 25 | 26 | @classmethod 27 | def read(cls, stream: BinaryIO) -> Self: 28 | """Read 29 | 30 | Args: 31 | stream (BinaryIO): Input stream 32 | 33 | Returns: 34 | Self: Instance of this class 35 | """ 36 | buffer = stream.read(128) 37 | if len(buffer) < 128: 38 | raise ValueError("Too less read bytes.") 39 | 40 | parameters = buffer[0:16] 41 | samples = buffer[16:128] 42 | 43 | return cls(parameters, samples) 44 | 45 | 46 | class AdpcmDecoder: 47 | """ADPCM Decoder""" 48 | 49 | def __init__(self): 50 | """Constructor""" 51 | self.prev1 = 0 52 | self.prev2 = 0 53 | 54 | @staticmethod 55 | def __clamp16(value: float) -> int: 56 | """Clamp float to signed 16 bit int 57 | 58 | Args: 59 | value (float): float value 60 | 61 | Returns: 62 | int: int value 63 | """ 64 | if value > 32767.0: 65 | return 32767 66 | elif value < -32768.0: 67 | return -32768 68 | else: 69 | return round(value) 70 | 71 | def __decode_sample(self, sp: int, su: int) -> int: 72 | """Decode Sample 73 | 74 | Args: 75 | sp (int): Parameter 76 | su (int): Sample 77 | 78 | Raises: 79 | ValueError: Parameter `shift` out of range. 80 | ValueError: Parameter `index` out of range. 81 | 82 | Returns: 83 | int: Decoded sample 84 | """ 85 | shift = sp & 0x0F 86 | if SHIFT_LIMIT < shift: 87 | raise ValueError("Parameter `shift` out of range.") 88 | index = sp >> 4 89 | if INDEX_LIMIT < index: 90 | raise ValueError("Parameter `index` out of range.") 91 | 92 | sample = SIGNED_NIBBLES[su] << (12 - (shift & 0x1F)) 93 | sample += K0[index] * self.prev1 + K1[index] * self.prev2 94 | sample = AdpcmDecoder.__clamp16(sample) 95 | 96 | self.prev2 = self.prev1 97 | self.prev1 = sample 98 | 99 | return sample 100 | 101 | def __decode_subframe( 102 | self, sp: int, samples: bytes, subframe_index: int, nibble: int 103 | ) -> list[int]: 104 | """Decode Subframe 105 | 106 | Args: 107 | sp (int): Parameter 108 | samples (bytes): Samples 109 | subframe_index (int): Subframe index 110 | nibble (int): Nibble (0: High, 1: Low) 111 | 112 | Returns: 113 | list[int]: Decoded subframe 114 | """ 115 | decoded = [0] * SUB_FRAME_NIBBLES 116 | for i in range(SUB_FRAME_NIBBLES): 117 | su_index = i * SUB_FRAMES + subframe_index 118 | su = samples[su_index] 119 | su = su >> 4 if nibble != 0 else su & 0x0F 120 | decoded[i] = self.__decode_sample(sp, su) 121 | return decoded 122 | 123 | def __decode_frame(self, frame: AdpcmFrame) -> list[int]: 124 | """Decode Frame 125 | 126 | Args: 127 | frame (AdpcmFrame): Frame 128 | 129 | Returns: 130 | list[int]: Decoded Frame 131 | """ 132 | decoded: list[int] = [] 133 | for i in range(SUB_FRAMES): 134 | for j in range(2): 135 | sp_index = j + i * 2 136 | if 2 <= i: 137 | sp_index += 4 138 | sp = frame.parameters[sp_index] 139 | decoded += self.__decode_subframe(sp, frame.samples, i, j) 140 | return decoded 141 | 142 | def __decode_frame_group(self, stream: BinaryIO) -> list[int]: 143 | """Decode Frame Group 144 | 145 | Args: 146 | stream (BinaryIO): Input stream 147 | 148 | Returns: 149 | list[int]: Decoded Frame Group 150 | """ 151 | decoded: list[int] = [] 152 | for _ in range(FRAMES_PER_FRAME_GROUP): 153 | frame = AdpcmFrame.read(stream) 154 | decoded += self.__decode_frame(frame) 155 | return decoded 156 | 157 | def decode(self, stream: BinaryIO) -> list[int]: 158 | """Decode 159 | 160 | Args: 161 | stream (BinaryIO): Input stream 162 | 163 | Returns: 164 | list[int]: Decoded samples 165 | """ 166 | decoded: list[int] = [] 167 | while True: 168 | try: 169 | decoded += self.__decode_frame_group(stream) 170 | except ValueError: 171 | break 172 | # Skip null bytes 173 | stream.seek(20, os.SEEK_CUR) 174 | return decoded 175 | -------------------------------------------------------------------------------- /sprc/header.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Final, BinaryIO, Self 3 | import io 4 | from fastcrc import crc16 5 | 6 | # Magic bytes that identify SPRC files 7 | _MAGIC_BYTES: Final[bytes] = b"SPRC" 8 | 9 | # Header size in bytes 10 | _HEADER_SIZE: Final[int] = 16 11 | 12 | 13 | @dataclass 14 | class SprcHeader: 15 | """ 16 | SPRC Header class for handling SPRC file format headers. 17 | 18 | This class provides functionality to read, write, and validate SPRC headers, 19 | which include magic bytes, revision information, CRC checksums, and flags. 20 | 21 | Attributes: 22 | revision: Header revision number 23 | crc_value: CRC-16 checksum of the file content 24 | force_flag: Flag indicating if force processing is required 25 | unknown_0: Reserved bytes for future use 26 | """ 27 | 28 | revision: int = 0 29 | crc_value: int = 0 30 | force_flag: int = 0 31 | unknown_0: bytes = b"" 32 | 33 | @staticmethod 34 | def has_sprc_header(stream: BinaryIO) -> bool: 35 | """ 36 | Check if a stream contains a valid SPRC header. 37 | 38 | This method reads the first 16 bytes of the stream to check for the SPRC 39 | magic bytes signature, then restores the stream position. 40 | 41 | Args: 42 | stream: Input stream to check 43 | 44 | Returns: 45 | True if the stream has a valid SPRC header, False otherwise 46 | """ 47 | position = stream.tell() 48 | try: 49 | buffer = stream.read(_HEADER_SIZE) 50 | if len(buffer) < _HEADER_SIZE: 51 | return False 52 | 53 | magic_bytes = buffer[0:4] 54 | return magic_bytes == _MAGIC_BYTES 55 | finally: 56 | # Restore original stream position 57 | stream.seek(position) 58 | 59 | @classmethod 60 | def read(cls, stream: BinaryIO) -> Self: 61 | """ 62 | Read SPRC header from a stream. 63 | 64 | Args: 65 | stream: Input stream to read from 66 | 67 | Returns: 68 | New SprcHeader instance 69 | 70 | Raises: 71 | ValueError: If the stream doesn't contain a valid SPRC header 72 | """ 73 | buffer = stream.read(_HEADER_SIZE) 74 | if len(buffer) < _HEADER_SIZE: 75 | raise ValueError( 76 | f"Insufficient data: expected {_HEADER_SIZE} bytes, got {len(buffer)}" 77 | ) 78 | 79 | # Check magic bytes 80 | magic_bytes = buffer[0:4] 81 | if magic_bytes != _MAGIC_BYTES: 82 | raise ValueError( 83 | f"Invalid magic bytes: expected {_MAGIC_BYTES!r}, got {magic_bytes!r}" 84 | ) 85 | 86 | # Parse header fields 87 | revision = int.from_bytes(buffer[4:6], "big") 88 | crc_value = int.from_bytes(buffer[6:8], "big") 89 | force_flag = buffer[8] 90 | unknown_0 = buffer[9:16] 91 | 92 | return cls(revision, crc_value, force_flag, unknown_0) 93 | 94 | def validate_crc(self, data: bytes | BinaryIO) -> bool: 95 | """ 96 | Validate data with stored CRC value. 97 | 98 | Args: 99 | data: Data bytes or stream to validate 100 | 101 | Returns: 102 | True if the calculated CRC matches the stored CRC, False otherwise 103 | """ 104 | if isinstance(data, io.IOBase): 105 | # Save current position 106 | position = data.tell() 107 | 108 | try: 109 | # Skip SPRC header 110 | data.seek(_HEADER_SIZE) 111 | buffer = data.read() 112 | data_bytes = buffer 113 | finally: 114 | # Restore original position 115 | data.seek(position) 116 | else: 117 | data_bytes = data 118 | 119 | # Calculate CRC-16 (Genibus) of the data 120 | calculated_crc = crc16.genibus(data_bytes) 121 | 122 | return calculated_crc == self.crc_value 123 | 124 | def write(self, stream: BinaryIO) -> None: 125 | """ 126 | Write SPRC header to a stream. 127 | 128 | Args: 129 | stream: Output stream to write to 130 | """ 131 | # Write magic bytes 132 | stream.write(_MAGIC_BYTES) 133 | 134 | # Write header fields 135 | stream.write(self.revision.to_bytes(2, "big")) 136 | stream.write(self.crc_value.to_bytes(2, "big")) 137 | stream.write(self.force_flag.to_bytes(1, "big")) 138 | stream.write(self.unknown_0) 139 | 140 | @classmethod 141 | def create(cls, data: bytes, revision: int = 1, force_flag: int = 0) -> Self: 142 | """ 143 | Create a new SPRC header for the given data. 144 | 145 | Args: 146 | data: Data bytes to calculate CRC for 147 | revision: Header revision number (default: 1) 148 | force_flag: Force processing flag (default: 0) 149 | 150 | Returns: 151 | New SprcHeader instance with calculated CRC 152 | """ 153 | # Calculate CRC-16 (Genibus) of the data 154 | crc_value = crc16.genibus(data) 155 | 156 | # Create unknown_0 bytes (all zeros) 157 | unknown_0 = bytes(7) 158 | 159 | return cls(revision, crc_value, force_flag, unknown_0) 160 | -------------------------------------------------------------------------------- /okd/okd_midi.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import BinaryIO 3 | 4 | 5 | def read_status_byte(stream: BinaryIO) -> int: 6 | """Read Status Byte 7 | 8 | Args: 9 | stream (BinaryIO): Input stream 10 | 11 | Raises: 12 | ValueError: Invalid Status Byte 13 | 14 | Returns: 15 | int: Status Byte 16 | """ 17 | byte = stream.read(1) 18 | if len(byte) < 1: 19 | raise ValueError("Too less read bytes.") 20 | byte = byte[0] 21 | if byte & 0x80 != 0x80: 22 | position = stream.tell() 23 | raise ValueError(f"Invalid status byte. byte={byte} position={position}") 24 | return byte 25 | 26 | 27 | def peek_status_byte(stream: BinaryIO) -> int: 28 | """Peek Status Byte 29 | 30 | Args: 31 | stream (BinaryIO): Input stream 32 | 33 | Raises: 34 | ValueError: Invalid Status Byte 35 | 36 | Returns: 37 | int: Status Byte 38 | """ 39 | byte = stream.read(1) 40 | if len(byte) < 1: 41 | raise ValueError("Too less read bytes.") 42 | stream.seek(-1, os.SEEK_CUR) 43 | byte = byte[0] 44 | if byte & 0x80 != 0x80: 45 | position = stream.tell() 46 | raise ValueError(f"Invalid Status Byte. byte={byte} position={position}") 47 | return byte 48 | 49 | 50 | def read_data_byte(stream: BinaryIO) -> int: 51 | """Read Data Byte 52 | 53 | Args: 54 | stream (BinaryIO): Input stream 55 | 56 | Raises: 57 | ValueError: Invalid Data Byte 58 | 59 | Returns: 60 | int: Data Byte 61 | """ 62 | byte = stream.read(1) 63 | if len(byte) < 1: 64 | raise ValueError("Too less read bytes.") 65 | byte = byte[0] 66 | if byte & 0x80 == 0x80: 67 | position = stream.tell() 68 | raise ValueError(f"Invalid Data Byte. byte={byte} position={position}") 69 | return byte 70 | 71 | 72 | def peek_data_byte(stream: BinaryIO) -> int: 73 | """Peek Data Byte 74 | 75 | Args: 76 | stream (BinaryIO): Input stream 77 | 78 | Raises: 79 | ValueError: Invalid data byte 80 | 81 | Returns: 82 | int: Data Byte 83 | """ 84 | byte = stream.read(1) 85 | if len(byte) < 1: 86 | raise ValueError("Too less read bytes.") 87 | stream.seek(-1, os.SEEK_CUR) 88 | byte = byte[0] 89 | if byte & 0x80 == 0x80: 90 | position = stream.tell() 91 | raise ValueError(f"Invalid data byte. byte={byte} position={position}") 92 | return byte 93 | 94 | 95 | def is_data_bytes(data: bytes) -> bool: 96 | """Is Data Bytes 97 | 98 | Args: 99 | data (bytes): Data 100 | 101 | Returns: 102 | bool: True if Data Bytes, else False 103 | """ 104 | for byte in data: 105 | if byte & 0x80 == 0x80: 106 | return False 107 | return True 108 | 109 | 110 | def read_variable_int(stream: BinaryIO) -> int: 111 | """Read Variable Int 112 | 113 | Args: 114 | stream (BinaryIO): Input stream 115 | 116 | Raises: 117 | ValueError: Invalid byte sequence 118 | 119 | Returns: 120 | int: Variable Int value 121 | """ 122 | value = 0 123 | for i in range(3): 124 | byte: int = read_data_byte(stream) 125 | value += byte << (i * 6) 126 | if byte & 0x40 != 0x40: 127 | return value 128 | 129 | position = stream.tell() 130 | raise ValueError(f"Invalid byte sequence. position={position}") 131 | 132 | 133 | def write_variable_int(stream: BinaryIO, value: int) -> None: 134 | """Write Variable Int 135 | 136 | Args: 137 | stream (BinaryIO): Output stream 138 | value (int): Value 139 | 140 | Raises: 141 | ValueError: Invalid argument `value` 142 | """ 143 | if 0x04103F < value: 144 | raise ValueError("Too big argument `value`. Use write_extended_variable_int.") 145 | 146 | for i in range(3): 147 | masked_value = value & (0x3F << (i * 6)) 148 | byte = masked_value >> (i * 6) 149 | next_value = value - masked_value 150 | if next_value != 0x000000: 151 | byte |= 0x40 152 | next_value -= 0x40 << (i * 6) 153 | value = next_value 154 | stream.write(byte.to_bytes()) 155 | 156 | if value == 0x000000: 157 | if byte & 0x40 == 0x40: 158 | stream.write(b"\x00") 159 | break 160 | 161 | 162 | def read_extended_variable_int(stream: BinaryIO) -> int: 163 | """Read Extended Variable Int 164 | 165 | Args: 166 | stream (BinaryIO): Input stream 167 | 168 | Returns: 169 | int: Extended Variable Int value 170 | """ 171 | value = 0 172 | while True: 173 | try: 174 | byte = peek_data_byte(stream) 175 | if byte == 0x00: 176 | # Maybe End of Track 177 | return value 178 | except ValueError: 179 | break 180 | value += read_variable_int(stream) 181 | return value 182 | 183 | 184 | def write_extended_variable_int(stream: BinaryIO, value: int) -> None: 185 | """Write Extended Variable Int 186 | 187 | Args: 188 | stream (BinaryIO): Output stream 189 | value (int): Value 190 | """ 191 | while 0x000000 < value: 192 | write_value = min(value, 0x04103F) 193 | write_variable_int(stream, write_value) 194 | value -= write_value 195 | -------------------------------------------------------------------------------- /okd/okd_file_scramble.py: -------------------------------------------------------------------------------- 1 | from logging import getLogger 2 | from random import randint 3 | from typing import BinaryIO 4 | 5 | # OKD Scramble Pattern 6 | # uint16_t[256] 7 | _scramble_pattern = [0x0000] * 256 8 | 9 | __logger = getLogger(__name__) 10 | 11 | 12 | def set_scramble_pattern(pattern: list[int]) -> None: 13 | global _scramble_pattern 14 | _scramble_pattern = pattern.copy() 15 | 16 | 17 | def choose_scramble_pattern_index(): 18 | return randint(0x00, 0xFF) 19 | 20 | 21 | def scramble( 22 | input_stream: BinaryIO, 23 | output_stream: BinaryIO, 24 | scramble_pattern_index: int, 25 | length: int | None = None, 26 | ): 27 | """Scramble 28 | 29 | Args: 30 | input_stream (BinaryIO): Input stream 31 | output_stream (BinaryIO): Output stream 32 | scramble_pattern_index (int): Scramble pattern index 33 | length (int | None, optional): Length. Defaults to None. 34 | 35 | Returns: 36 | int: Last scramble pattern index 37 | """ 38 | if length is not None and length % 2 != 0: 39 | raise ValueError("Argument `length` length must be multiple of 2.") 40 | 41 | start_position = input_stream.tell() 42 | while length is None or ( 43 | length is not None and (input_stream.tell() - start_position) < length 44 | ): 45 | plaintext_buffer = input_stream.read(2) 46 | if len(plaintext_buffer) == 0: 47 | if length is None: 48 | break 49 | else: 50 | raise RuntimeError("Reached to unexpected End of Stream.") 51 | if len(plaintext_buffer) % 2 != 0: 52 | raise ValueError("`plaintext_buffer` length must be 2.") 53 | plaintext = int.from_bytes(plaintext_buffer, "big") 54 | scramble_pattern = _scramble_pattern[scramble_pattern_index % 0x100] 55 | scrambled = plaintext ^ scramble_pattern 56 | scrambled_buffer = scrambled.to_bytes(2, "big") 57 | output_stream.write(scrambled_buffer) 58 | scramble_pattern_index += 1 59 | return scramble_pattern_index % 0x100 60 | 61 | 62 | def detect_scramble_pattern_index( 63 | stream: BinaryIO, 64 | expected_magic_bytes: bytes, 65 | ) -> int | None: 66 | """Detect scramble pattern index 67 | 68 | Args: 69 | stream (BinaryIO): Input stream 70 | expected_magic_bytes (bytes): Expected magic bytes (4 bytes) 71 | 72 | Raises: 73 | ValueError: Invalid argument `expected_magic_bytes` 74 | RuntimeError: Failed to detect OKD file `scramble_pattern_index` 75 | 76 | Returns: 77 | int | None: Scrambled pattern index if int, unscrambled if None 78 | """ 79 | if len(expected_magic_bytes) != 4: 80 | raise ValueError("Argument `expected_magic_bytes` length must be 4.") 81 | 82 | expected_magic_bytes_int = int.from_bytes(expected_magic_bytes, "big") 83 | 84 | position = stream.tell() 85 | magic_bytes_buffer = stream.read(4) 86 | stream.seek(position) 87 | if len(magic_bytes_buffer) != 4: 88 | raise RuntimeError("Invalid `magic_bytes_buffer` length.") 89 | magic_bytes_int = int.from_bytes(magic_bytes_buffer, "big") 90 | if magic_bytes_int == expected_magic_bytes_int: 91 | __logger.info("OKD file is not scrambled.") 92 | return 93 | 94 | __logger.info("OKD file is scrambled.") 95 | expected_pattern = magic_bytes_int ^ expected_magic_bytes_int 96 | for scramble_pattern_index in range(0x100): 97 | if scramble_pattern_index == 0xFF: 98 | candidated_pattern = _scramble_pattern[0] 99 | else: 100 | candidated_pattern = _scramble_pattern[scramble_pattern_index + 1] 101 | candidated_pattern |= _scramble_pattern[scramble_pattern_index] << 16 102 | if candidated_pattern == expected_pattern: 103 | __logger.info( 104 | f"OKD file `scramble_pattern_index` detected. scramble_pattern_index={scramble_pattern_index}" 105 | ) 106 | return scramble_pattern_index 107 | raise RuntimeError("Failed to detect OKD file `scramble_pattern_index`.") 108 | 109 | 110 | def descramble( 111 | input_stream: BinaryIO, 112 | output_stream: BinaryIO, 113 | scramble_pattern_index: int, 114 | length: int | None = None, 115 | ) -> int: 116 | """Descramble 117 | 118 | Args: 119 | input_stream (BinaryIO): Input stream 120 | output_stream (BinaryIO): Output stream 121 | scramble_pattern_index (int): Scramble pattern index 122 | length (int | None, optional): Length. Defaults to None. 123 | 124 | Returns: 125 | int: Last scramble pattern index 126 | """ 127 | if length is not None and length % 2 != 0: 128 | raise ValueError("Argument `length` length must be multiple of 2.") 129 | 130 | start_position = input_stream.tell() 131 | while length is None or ( 132 | length is not None and (input_stream.tell() - start_position) < length 133 | ): 134 | scrambled_buffer = input_stream.read(2) 135 | if len(scrambled_buffer) == 0: 136 | if length is None: 137 | break 138 | else: 139 | raise RuntimeError("Reached to unexpected End of Stream.") 140 | if len(scrambled_buffer) % 2 != 0: 141 | raise ValueError("`plaintext_buffer` length must be 2.") 142 | scrambled = int.from_bytes(scrambled_buffer, "big") 143 | scramble_pattern = _scramble_pattern[scramble_pattern_index % 0x100] 144 | plaintext = scrambled ^ scramble_pattern 145 | plaintext_buffer = plaintext.to_bytes(2, "big") 146 | output_stream.write(plaintext_buffer) 147 | scramble_pattern_index = scramble_pattern_index + 1 148 | return scramble_pattern_index % 0x100 149 | -------------------------------------------------------------------------------- /okd/chunks/utils.py: -------------------------------------------------------------------------------- 1 | from typing import BinaryIO 2 | 3 | from .generic_chunk import GenericChunk 4 | from .ykyi_chunk import YkyiChunk 5 | from .p_track_info_chunk import ( 6 | PTrackInfoChannelInfoEntry, 7 | PTrackInfoEntry, 8 | PTrackInfoChunk, 9 | ) 10 | from .p3_track_info_chunk import P3TrackInfoChunk 11 | from .extended_p_track_info_chunk import ( 12 | ExtendedPTrackInfoChannelInfoEntry, 13 | ExtendedPTrackInfoEntry, 14 | ExtendedPTrackInfoChunk, 15 | ) 16 | from .m_track_chunk import MTrackChunk 17 | from .p_track_chunk import PTrackChunk 18 | from .adpcm_chunk import AdpcmChunk 19 | from .okd_chunk import OkdChunk 20 | 21 | 22 | def read_chunk(stream: BinaryIO) -> OkdChunk: 23 | """Read Chunk 24 | 25 | Args: 26 | stream (BufferedReader): Input stream 27 | 28 | Returns: 29 | OkdChunk: OKD Chunk 30 | """ 31 | generic = GenericChunk.read(stream) 32 | 33 | if generic.id == b"YKYI": 34 | return YkyiChunk.from_generic(generic) 35 | elif generic.id == b"YPTI": 36 | return PTrackInfoChunk.from_generic(generic) 37 | elif generic.id == b"YP3I": 38 | return P3TrackInfoChunk.from_generic(generic) 39 | elif generic.id == b"YPXI": 40 | return ExtendedPTrackInfoChunk.from_generic(generic) 41 | elif generic.id[0:3] == b"\xffMR": 42 | return MTrackChunk.from_generic(generic) 43 | elif generic.id[0:3] == b"\xffPR": 44 | return PTrackChunk.from_generic(generic) 45 | elif generic.id == b"YADD": 46 | return AdpcmChunk.from_generic(generic) 47 | 48 | return generic 49 | 50 | 51 | def p_track_info_chunk_by_p_track_chunks( 52 | p_track_chunks: list[PTrackChunk], 53 | ) -> PTrackInfoChunk | ExtendedPTrackInfoChunk: 54 | if len(p_track_chunks) <= 2: 55 | p_track_info_entries_1: list[PTrackInfoEntry] = [] 56 | for p_track_chunk in p_track_chunks: 57 | ports = ( 58 | 0x0001 59 | << PTrackChunk.CHUNK_NUMBER_PORT_MAP[p_track_chunk.track_number()] 60 | ) 61 | sysex_ports = 4 if p_track_chunk.track_number() >= 2 else 1 62 | 63 | track_info_channel_info_entries_1: list[PTrackInfoChannelInfoEntry] = [] 64 | for channel in range(16): 65 | exists_message = p_track_chunk.exists_channel_message(channel) 66 | channel_attribute = ( 67 | 127 if p_track_chunk.track_number() == 1 and channel == 9 else 255 68 | ) 69 | track_info_channel_info_entries_1.append( 70 | PTrackInfoChannelInfoEntry( 71 | channel_attribute if exists_message else 0, 72 | ports, 73 | 0x00, 74 | 0x00, 75 | ) 76 | ) 77 | 78 | p_track_info_entries_1.append( 79 | PTrackInfoEntry( 80 | p_track_chunk.track_number(), 81 | 0x40, 82 | 0x0000, 83 | [0] * 16, 84 | [0] * 16, 85 | track_info_channel_info_entries_1, 86 | sysex_ports, 87 | ) 88 | ) 89 | 90 | return PTrackInfoChunk(b"YPTI", p_track_info_entries_1) 91 | 92 | else: 93 | p_track_info_entries_2: list[ExtendedPTrackInfoEntry] = [] 94 | for p_track_chunk in p_track_chunks: 95 | ports = ( 96 | 0x0001 97 | << PTrackChunk.CHUNK_NUMBER_PORT_MAP[p_track_chunk.track_number()] 98 | ) 99 | sysex_ports = 4 if p_track_chunk.track_number() >= 2 else 1 100 | 101 | track_info_channel_info_entries_2: list[ 102 | ExtendedPTrackInfoChannelInfoEntry 103 | ] = [] 104 | for channel in range(16): 105 | exists_message = p_track_chunk.exists_channel_message(channel) 106 | channel_attribute = ( 107 | 127 if p_track_chunk.track_number() == 1 and channel == 9 else 255 108 | ) 109 | track_info_channel_info_entries_2.append( 110 | ExtendedPTrackInfoChannelInfoEntry( 111 | channel_attribute if exists_message else 0, 112 | ports, 113 | 0x00, 114 | 0x00, 115 | 0x00, 116 | ) 117 | ) 118 | 119 | p_track_info_entries_2.append( 120 | ExtendedPTrackInfoEntry( 121 | p_track_chunk.track_number(), 122 | 0x40, 123 | 0x00, 124 | [0] * 16, 125 | [0] * 16, 126 | track_info_channel_info_entries_2, 127 | sysex_ports, 128 | 0x00, 129 | ) 130 | ) 131 | 132 | return ExtendedPTrackInfoChunk( 133 | b"YPXI", b"\x00\x00\x00\x00\x00\x00\x00\x00", 0, p_track_info_entries_2 134 | ) 135 | 136 | 137 | def p3_track_info_chunk_by_p_track_chunks( 138 | p_track_chunk: PTrackChunk, 139 | ) -> P3TrackInfoChunk: 140 | track_info_channel_info_entries: list[PTrackInfoChannelInfoEntry] = [] 141 | for channel in range(16): 142 | exists_message = p_track_chunk.exists_channel_message(channel) 143 | track_info_channel_info_entries.append( 144 | PTrackInfoChannelInfoEntry( 145 | 255 if exists_message else 0, 146 | 0x0004, 147 | 0x00, 148 | 0x00, 149 | ) 150 | ) 151 | 152 | return P3TrackInfoChunk( 153 | b"YP3I", 154 | 0x02, 155 | 0x40, 156 | 0x0000, 157 | [0] * 16, 158 | [0] * 16, 159 | track_info_channel_info_entries, 160 | 0x0004, 161 | ) 162 | -------------------------------------------------------------------------------- /okd/chunks/ykyi_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Self 3 | 4 | from .chunk_base import ChunkBase 5 | from .generic_chunk import GenericChunk 6 | 7 | 8 | @dataclass 9 | class YkyiInfoEntry: 10 | """YKYI Info Entry (key-value pair)""" 11 | 12 | key: str 13 | value: str 14 | 15 | 16 | @dataclass 17 | class YkyiChunk(ChunkBase): 18 | """YKYI Information Chunk 19 | 20 | This chunk contains metadata about the song in a text-based format. 21 | The structure consists of: 22 | - 32 bytes of header data 23 | - Text data with semicolon-separated key:value pairs 24 | 25 | Known fields (keys are in half-width katakana): 26 | - Artist name (romanized and katakana reading) 27 | - Song title (romanized and katakana reading) 28 | - Composer/Lyricist information 29 | - Release date 30 | - PSVV: Version 31 | - PSVS: Serial number 32 | """ 33 | 34 | # Header fields (32 bytes total) 35 | unknown_00: bytes # 0x00-0x0F: 16 bytes, usually zero 36 | flags_10: int # 0x10-0x11: 2 bytes, e.g., 0x1818 37 | unknown_12: int # 0x12-0x13: 2 bytes 38 | value_14: int # 0x14-0x15: 2 bytes, e.g., 0x0278 39 | unknown_16: bytes # 0x16-0x1F: 10 bytes, usually zero 40 | 41 | # Text data entries 42 | entries: list[YkyiInfoEntry] 43 | 44 | # Raw text data for preservation 45 | raw_text: bytes 46 | 47 | @classmethod 48 | def from_generic(cls, generic: GenericChunk) -> Self: 49 | """From Generic Chunk 50 | 51 | Args: 52 | generic (GenericChunk): Generic Chunk 53 | 54 | Returns: 55 | Self: Instance of this class 56 | """ 57 | payload = generic.payload 58 | 59 | # Parse header (32 bytes) 60 | unknown_00 = payload[0x00:0x10] 61 | flags_10 = int.from_bytes(payload[0x10:0x12], "big") 62 | unknown_12 = int.from_bytes(payload[0x12:0x14], "big") 63 | value_14 = int.from_bytes(payload[0x14:0x16], "big") 64 | unknown_16 = payload[0x16:0x20] 65 | 66 | # Parse text data (after 32-byte header) 67 | raw_text = payload[0x20:] 68 | entries = cls._parse_text_data(raw_text) 69 | 70 | return cls( 71 | generic.id, 72 | unknown_00, 73 | flags_10, 74 | unknown_12, 75 | value_14, 76 | unknown_16, 77 | entries, 78 | raw_text, 79 | ) 80 | 81 | @staticmethod 82 | def _parse_text_data(raw_text: bytes) -> list[YkyiInfoEntry]: 83 | """Parse text data into key-value entries 84 | 85 | Args: 86 | raw_text (bytes): Raw text data 87 | 88 | Returns: 89 | list[YkyiInfoEntry]: List of parsed entries 90 | """ 91 | entries: list[YkyiInfoEntry] = [] 92 | 93 | # Remove trailing null bytes 94 | text_stripped = raw_text.rstrip(b"\x00") 95 | 96 | # Split by semicolon 97 | parts = text_stripped.split(b";") 98 | 99 | for part in parts: 100 | if not part: 101 | continue 102 | 103 | # Try to decode with EUC-JP (Japanese encoding used in YKYI chunks) 104 | try: 105 | decoded = part.decode("euc_jp") 106 | except UnicodeDecodeError: 107 | # Fallback to latin-1 for raw bytes 108 | decoded = part.decode("latin-1") 109 | 110 | # Split by colon to get key and value 111 | if ":" in decoded: 112 | # Find the first colon as separator 113 | colon_idx = decoded.index(":") 114 | key = decoded[:colon_idx] 115 | value = decoded[colon_idx + 1 :] 116 | entries.append(YkyiInfoEntry(key, value)) 117 | else: 118 | # No colon, treat entire string as value with empty key 119 | entries.append(YkyiInfoEntry("", decoded)) 120 | 121 | return entries 122 | 123 | def _payload_buffer(self) -> bytes: 124 | """Payload Buffer 125 | 126 | Returns: 127 | bytes: Payload Buffer 128 | """ 129 | buffer = bytearray() 130 | 131 | # Header 132 | buffer.extend(self.unknown_00) 133 | buffer.extend(self.flags_10.to_bytes(2, "big")) 134 | buffer.extend(self.unknown_12.to_bytes(2, "big")) 135 | buffer.extend(self.value_14.to_bytes(2, "big")) 136 | buffer.extend(self.unknown_16) 137 | 138 | # Text data (use raw_text to preserve original encoding) 139 | buffer.extend(self.raw_text) 140 | 141 | return bytes(buffer) 142 | 143 | def get_entry(self, key: str) -> str | None: 144 | """Get entry value by key 145 | 146 | Args: 147 | key (str): Key to search for 148 | 149 | Returns: 150 | str | None: Value if found, None otherwise 151 | """ 152 | for entry in self.entries: 153 | if entry.key == key: 154 | return entry.value 155 | return None 156 | 157 | def get_psvv(self) -> str | None: 158 | """Get PSV Version 159 | 160 | Returns: 161 | str | None: PSV Version if found 162 | """ 163 | return self.get_entry("PSVV") 164 | 165 | def get_psvs(self) -> str | None: 166 | """Get PSV Serial 167 | 168 | Returns: 169 | str | None: PSV Serial if found 170 | """ 171 | return self.get_entry("PSVS") 172 | 173 | def to_json_serializable(self) -> dict: 174 | """Convert to JSON serializable dict 175 | 176 | Returns: 177 | dict: JSON serializable representation 178 | """ 179 | return { 180 | "id": self.id.hex(" ").upper(), 181 | "header": { 182 | "unknown_00": self.unknown_00.hex(" ").upper(), 183 | "flags_10": f"0x{self.flags_10:04X}", 184 | "unknown_12": f"0x{self.unknown_12:04X}", 185 | "value_14": f"0x{self.value_14:04X}", 186 | "unknown_16": self.unknown_16.hex(" ").upper(), 187 | }, 188 | "entries": [{"key": e.key, "value": e.value} for e in self.entries], 189 | } 190 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # dam-song-tools 2 | 3 | Tools for DAM Karaoke Song data 4 | 5 | ## !! Important notes !! 6 | 7 | This software is developed for technical research on DAM Karaoke machines. 8 | 9 | The Karaoke song data normally recorded on DAM Karaoke machines is protected by copyright. You must handle it in accordance with your local laws and regulations. 10 | 11 | ## [Demonstration video](https://twitter.com/soltia48/status/1620095004374093824) 12 | 13 | In this video, a song not normally included in DAM Karaoke machines, "This is an Attack," is played and scored on that machine. 14 | 15 | ## Summary 16 | 17 | This software reads and writes DAM Karaoke machines compatible karaOKe Data (OKD) file. 18 | 19 | ## Usage 20 | 21 | ### dump-okd 22 | 23 | Dump chunks of a OKD 24 | 25 | ``` 26 | $ dam-song-tools dump-okd --help 27 | NAME 28 | dam-song-tools dump-okd - Dump chunks of a OKD 29 | 30 | SYNOPSIS 31 | dam-song-tools dump-okd OKD_PATH OUTPUT_DIR_PATH 32 | 33 | DESCRIPTION 34 | Dump OKD 35 | 36 | POSITIONAL ARGUMENTS 37 | OKD_PATH 38 | Input OKD path 39 | OUTPUT_DIR_PATH 40 | Output directory path 41 | 42 | NOTES 43 | You can also use flags syntax for POSITIONAL ARGUMENTS 44 | ``` 45 | 46 | ### pack-okd 47 | 48 | Pack a OKD by directly inputting a required data in each chunk 49 | 50 | ``` 51 | $ dam-song-tools pack-okd --help 52 | NAME 53 | dam-song-tools pack-okd - Pack a OKD by directly inputting a required data in each chunk 54 | 55 | SYNOPSIS 56 | dam-song-tools pack-okd OKD_PATH [CHUNK_PATHS]... 57 | 58 | DESCRIPTION 59 | Pack OKD 60 | 61 | POSITIONAL ARGUMENTS 62 | OKD_PATH 63 | Output OKD path 64 | CHUNK_PATHS 65 | Input chunk paths 66 | 67 | FLAGS 68 | -s, --scramble=SCRAMBLE 69 | Default: False 70 | Scramble. Defaults to False. 71 | 72 | NOTES 73 | You can also use flags syntax for POSITIONAL ARGUMENTS 74 | ``` 75 | 76 | ### okd-to-midi 77 | 78 | Convert a OKD to a Standard MIDI File 79 | 80 | ``` 81 | $ dam-song-tools okd-to-midi --help 82 | NAME 83 | dam-song-tools okd-to-midi - Convert a OKD to a Standard MIDI File 84 | 85 | SYNOPSIS 86 | dam-song-tools okd-to-midi OKD_PATH MIDI_PATH 87 | 88 | DESCRIPTION 89 | Convert a OKD to a Standard MIDI File 90 | 91 | POSITIONAL ARGUMENTS 92 | OKD_PATH 93 | Input OKD path 94 | MIDI_PATH 95 | Output MIDI path 96 | 97 | FLAGS 98 | -s, --sysex_to_text=SYSEX_TO_TEXT 99 | Default: True 100 | Convert SysEx Messages to Text Meta Messages 101 | 102 | NOTES 103 | You can also use flags syntax for POSITIONAL ARGUMENTS 104 | ``` 105 | 106 | ### midi-to-okd 107 | 108 | Convert a Standard MIDI File to a OKD 109 | 110 | ``` 111 | $ dam-song-tools midi-to-okd --help 112 | NAME 113 | dam-song-tools midi-to-okd - Convert a Standard MIDI File to a OKD 114 | 115 | SYNOPSIS 116 | dam-song-tools midi-to-okd MIDI_PATH PLAYING_OKD_PATH P3_OKD_PATH 117 | 118 | DESCRIPTION 119 | Convert a Standard MIDI File to a OKD 120 | 121 | POSITIONAL ARGUMENTS 122 | MIDI_PATH 123 | Type: str 124 | Input MIDI file path 125 | PLAYING_OKD_PATH 126 | Type: str 127 | Output Playing OKD path 128 | P3_OKD_PATH 129 | Type: str 130 | Output P3 OKD path 131 | 132 | FLAGS 133 | -s, --scramble=SCRAMBLE 134 | Default: False 135 | Scramble. Defaults to False. 136 | 137 | NOTES 138 | You can also use flags syntax for POSITIONAL ARGUMENTS 139 | ``` 140 | 141 | ### dump-mtf 142 | 143 | Dump files contained in a MTF file 144 | 145 | ``` 146 | NAME 147 | dam-song-tools dump-mtf - Dump files contained in a MTF file 148 | 149 | SYNOPSIS 150 | dam-song-tools dump-mtf MTF_PATH OUTPUT_PATH 151 | 152 | DESCRIPTION 153 | Dump files contained in a MTF file 154 | 155 | POSITIONAL ARGUMENTS 156 | MTF_PATH 157 | Type: str 158 | Path to the MTF file 159 | OUTPUT_PATH 160 | Type: str 161 | Path to extract the archive into 162 | 163 | NOTES 164 | You can also use flags syntax for POSITIONAL ARGUMENTS 165 | ``` 166 | 167 | ### mtf-to-audio 168 | 169 | Mix MTF file into "output.wav", "output.mid" files in extracted mtf folder 170 | 171 | ``` 172 | NAME 173 | dam-song-tools mtf-to-audio - Mix MTF file into "output.wav", "output.mid" files in extracted mtf folder. 174 | 175 | SYNOPSIS 176 | dam-song-tools mtf-to-audio MTF_PATH OUTPUT_PATH 177 | 178 | DESCRIPTION 179 | Mix MTF file into "output.wav", "output.mid" files in extracted mtf folder. 180 | 181 | POSITIONAL ARGUMENTS 182 | MTF_PATH 183 | Type: str 184 | Path to the MTF file 185 | OUTPUT_PATH 186 | Type: str 187 | Path to extract the archive into, output will be saved inside 188 | 189 | FLAGS 190 | -e, --export_each_file=EXPORT_EACH_FILE 191 | Type: bool 192 | Default: False 193 | Whether to export each individual audio file (RawADPCM → .wav, OPUS → .ogg, etc...) 194 | 195 | NOTES 196 | You can also use flags syntax for POSITIONAL ARGUMENTS 197 | ``` 198 | 199 | ## How to craete MIDI data for compose 200 | 201 | ### MIDI port and track map 202 | 203 | - Port 0, Track 0-15: Instrument 204 | - Port 1, Track 0-7,9-15: Instrument 205 | - Port 1, Track 8: Guide melody 206 | - Port 15, Track 0: M-Track 207 | 208 | ### P-Track 209 | 210 | P(laying)-Track is performance data of a song. 211 | 212 | ### M-Track 213 | 214 | M(arking)-Track includes list of hook section, two-chorus fadeout position and others. 215 | The note map in MIDI for compose is as follows. 216 | 217 | - Hook section: C3 218 | - Two-chorus fadeout position: C5 (Note on alone is sufficient) 219 | 220 | Please check [the test data](test/data/p_track.mid). 221 | 222 | ## List of verified DAM Karaoke machine 223 | 224 | - DAM-XG5000[G,R] (LIVE DAM [(GOLD EDITION|RED TUNE)]) 225 | - DAM-XG7000[Ⅱ] (LIVE DAM STADIUM [STAGE]) 226 | - DAM-XG8000[R] (LIVE DAM Ai[R]) 227 | - DAM-XG9000 (LIVE DAM WAO!) 228 | 229 | ## Authors 230 | 231 | - KIRISHIKI Yudai 232 | - 東京スーパーチャンネル 233 | 234 | ## Thanks 235 | 236 | - [Nurupo](https://github.com/gta191977649) - Author of the MIDI file ["This is an Attack"](https://github.com/gta191977649/midi_godekisenda) from which [the test data](test/data/p_track.mid) was derived 237 | 238 | ## License 239 | 240 | [MIT](https://opensource.org/licenses/MIT) 241 | 242 | Copyright (c) 2024-2025 KIRISHIKI Yudai 243 | -------------------------------------------------------------------------------- /okd/chunks/p_track_info_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | from typing import BinaryIO, Self 4 | 5 | from .chunk_base import ChunkBase 6 | from .generic_chunk import GenericChunk 7 | 8 | 9 | @dataclass 10 | class PTrackInfoChannelInfoEntry: 11 | """P-Track Information Channel Information Entry""" 12 | 13 | attribute: int 14 | ports: int 15 | control_change_ax: int 16 | control_change_cx: int 17 | 18 | @classmethod 19 | def read(cls, stream: BinaryIO) -> Self: 20 | """Read 21 | 22 | Args: 23 | stream (BinaryIO): Input stream 24 | 25 | Returns: 26 | Self: Instance of this class 27 | """ 28 | buffer = stream.read(4) 29 | if len(buffer) < 4: 30 | raise ValueError("Too less read bytes.") 31 | 32 | attribute = buffer[0] 33 | ports = buffer[1] 34 | control_change_ax = buffer[2] 35 | control_change_cx = buffer[3] 36 | return cls(attribute, ports, control_change_ax, control_change_cx) 37 | 38 | def is_chorus(self) -> bool: 39 | """Is Chorus 40 | 41 | Returns: 42 | bool: True if Chorus, else False 43 | """ 44 | return self.attribute & 0x01 != 0x01 45 | 46 | def is_guide_melody(self) -> bool: 47 | """Is Guide Melody 48 | 49 | Returns: 50 | bool: True if Guide Melody, else False 51 | """ 52 | 53 | return self.attribute & 0x80 != 0x80 54 | 55 | def write(self, stream: BinaryIO) -> None: 56 | """Write 57 | 58 | Args: 59 | stream (BinaryIO): Output stream 60 | """ 61 | stream.write(self.attribute.to_bytes()) 62 | stream.write(self.ports.to_bytes()) 63 | stream.write(self.control_change_ax.to_bytes()) 64 | stream.write(self.control_change_cx.to_bytes()) 65 | 66 | 67 | @dataclass 68 | class PTrackInfoEntry: 69 | """P-Track Information Entry""" 70 | 71 | track_number: int 72 | track_status: int 73 | use_channel_group_flag: int 74 | default_channel_groups: list[int] 75 | channel_groups: list[int] 76 | channel_info: list[PTrackInfoChannelInfoEntry] 77 | system_ex_ports: int 78 | 79 | @classmethod 80 | def read(cls, stream: BinaryIO) -> Self: 81 | """Read 82 | 83 | Args: 84 | stream (BinaryIO): Input stream 85 | 86 | Returns: 87 | Self: Instance of this class 88 | """ 89 | buffer = stream.read(4) 90 | if len(buffer) < 4: 91 | raise ValueError("Too less read bytes.") 92 | 93 | track_number = buffer[0] 94 | track_status = buffer[1] 95 | use_channel_group_flag = int.from_bytes(buffer[2:4], "big") 96 | 97 | default_channel_groups: list[int] = [] 98 | for channel in range(16): 99 | if (use_channel_group_flag >> channel) & 0x0001 == 0x0001: 100 | buffer = stream.read(2) 101 | if len(buffer) < 2: 102 | raise ValueError("Too less read bytes.") 103 | 104 | default_channel_groups.append(int.from_bytes(buffer, "big")) 105 | else: 106 | default_channel_groups.append(0x0000) 107 | 108 | buffer = stream.read(32) 109 | if len(buffer) < 32: 110 | raise ValueError("Too less read bytes.") 111 | 112 | channel_groups: list[int] = [] 113 | for channel in range(16): 114 | offset = 2 * channel 115 | channel_groups.append(int.from_bytes(buffer[offset : offset + 2], "big")) 116 | 117 | channel_info: list[PTrackInfoChannelInfoEntry] = [] 118 | for channel in range(16): 119 | channel_info.append(PTrackInfoChannelInfoEntry.read(stream)) 120 | 121 | buffer = stream.read(2) 122 | if len(buffer) < 2: 123 | raise ValueError("Too less read bytes.") 124 | 125 | system_ex_ports = int.from_bytes(buffer[0:2], "little") 126 | 127 | return cls( 128 | track_number, 129 | track_status, 130 | use_channel_group_flag, 131 | default_channel_groups, 132 | channel_groups, 133 | channel_info, 134 | system_ex_ports, 135 | ) 136 | 137 | def is_lossless_track(self) -> bool: 138 | return self.track_status & 0x80 == 0x80 139 | 140 | def write(self, stream: BinaryIO) -> None: 141 | """Write 142 | 143 | Args: 144 | stream (BinaryIO): Output stream 145 | """ 146 | stream.write(self.track_number.to_bytes()) 147 | stream.write(self.track_status.to_bytes()) 148 | stream.write(self.use_channel_group_flag.to_bytes(2, "big")) 149 | for channel, default_channel_group in enumerate(self.default_channel_groups): 150 | if (self.use_channel_group_flag >> channel) & 0x0001 != 0x0001: 151 | continue 152 | stream.write(default_channel_group.to_bytes(2, "big")) 153 | for channel_group in self.channel_groups: 154 | stream.write(channel_group.to_bytes(2, "big")) 155 | for channel_info_entry in self.channel_info: 156 | channel_info_entry.write(stream) 157 | stream.write(self.system_ex_ports.to_bytes(2, "little")) 158 | 159 | 160 | @dataclass 161 | class PTrackInfoChunk(ChunkBase): 162 | """P-Track Information Chunk""" 163 | 164 | data: list[PTrackInfoEntry] 165 | 166 | @classmethod 167 | def from_generic(cls, generic: GenericChunk) -> Self: 168 | """From Generic Chunk 169 | 170 | Args: 171 | generic (GenericChunk): Generic Chunk 172 | 173 | Returns: 174 | Self: Instance of this class 175 | """ 176 | p_track_info: list[PTrackInfoEntry] = [] 177 | entry_count = int.from_bytes(generic.payload[0:2], "big") 178 | stream = BytesIO(generic.payload[2:]) 179 | for _ in range(entry_count): 180 | entry = PTrackInfoEntry.read(stream) 181 | p_track_info.append(entry) 182 | return cls(generic.id, p_track_info) 183 | 184 | def _payload_buffer(self) -> bytes: 185 | buffer = len(self.data).to_bytes(2, "big") 186 | 187 | stream = BytesIO() 188 | for entry in self.data: 189 | entry.write(stream) 190 | stream.seek(0) 191 | buffer += stream.read() 192 | 193 | return buffer 194 | -------------------------------------------------------------------------------- /okd/chunks/extended_p_track_info_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | from typing import BinaryIO, Self 4 | 5 | from .chunk_base import ChunkBase 6 | from .generic_chunk import GenericChunk 7 | 8 | 9 | @dataclass 10 | class ExtendedPTrackInfoChannelInfoEntry: 11 | """Extended P-Track Information Channel Information Entry""" 12 | 13 | attribute: int 14 | ports: int 15 | unknown_0: int 16 | control_change_ax: int 17 | control_change_cx: int 18 | 19 | @classmethod 20 | def read(cls, stream: BinaryIO) -> Self: 21 | """Read 22 | 23 | Args: 24 | stream (BinaryIO): Input stream 25 | 26 | Returns: 27 | Self: Instance of this class 28 | """ 29 | buffer = stream.read(8) 30 | if len(buffer) < 8: 31 | raise ValueError("Too less read bytes.") 32 | 33 | attribute = int.from_bytes(buffer[0:2], "little") 34 | ports = int.from_bytes(buffer[2:4], "big") 35 | unknown_0 = int.from_bytes(buffer[4:6], "big") 36 | control_change_ax = buffer[6] 37 | control_change_cx = buffer[7] 38 | return cls(attribute, ports, unknown_0, control_change_ax, control_change_cx) 39 | 40 | def is_chorus(self) -> bool: 41 | """Is Chorus 42 | 43 | Returns: 44 | bool: True if Chorus, else False 45 | """ 46 | return self.attribute & 0x0080 != 0x0080 47 | 48 | def is_guide_melody(self) -> bool: 49 | """Is Guide Melody 50 | 51 | Returns: 52 | bool: True if Guide Melody, else False 53 | """ 54 | return self.attribute & 0x0100 == 0x0100 55 | 56 | def write(self, stream: BinaryIO) -> None: 57 | """Write 58 | 59 | Args: 60 | stream (BinaryIO): Output stream 61 | """ 62 | stream.write(self.attribute.to_bytes(2, "little")) 63 | stream.write(self.ports.to_bytes(2, "big")) 64 | stream.write(self.unknown_0.to_bytes(2, "big")) 65 | stream.write(self.control_change_ax.to_bytes()) 66 | stream.write(self.control_change_cx.to_bytes()) 67 | 68 | 69 | @dataclass 70 | class ExtendedPTrackInfoEntry: 71 | """Extended P-Track Information Entry""" 72 | 73 | track_number: int 74 | track_status: int 75 | unused_0: int 76 | default_channel_groups: list[int] 77 | channel_groups: list[int] 78 | channel_info: list[ExtendedPTrackInfoChannelInfoEntry] 79 | system_ex_ports: int 80 | unknown_0: int 81 | 82 | @classmethod 83 | def read(cls, stream: BinaryIO) -> Self: 84 | """Read 85 | 86 | Args: 87 | stream (BinaryIO): Input stream 88 | 89 | Returns: 90 | Self: Instance of this class 91 | """ 92 | buffer = stream.read(68) 93 | if len(buffer) < 68: 94 | raise ValueError("Too less read bytes.") 95 | 96 | track_number = buffer[0] 97 | track_status = buffer[1] 98 | unused_0 = int.from_bytes(buffer[2:4], "big") 99 | 100 | default_channel_groups: list[int] = [] 101 | for channel in range(16): 102 | offset = 4 + 2 * channel 103 | default_channel_groups.append( 104 | int.from_bytes(buffer[offset : offset + 2], "big") 105 | ) 106 | 107 | channel_groups: list[int] = [] 108 | for channel in range(16): 109 | offset = 36 + 2 * channel 110 | channel_groups.append(int.from_bytes(buffer[offset : offset + 2], "big")) 111 | 112 | channel_info: list[ExtendedPTrackInfoChannelInfoEntry] = [] 113 | for _ in range(16): 114 | channel_info.append(ExtendedPTrackInfoChannelInfoEntry.read(stream)) 115 | 116 | buffer = stream.read(4) 117 | if len(buffer) < 4: 118 | raise ValueError("Too less read bytes.") 119 | 120 | system_ex_ports = int.from_bytes(buffer[0:2], "big") 121 | unknown_0 = int.from_bytes(buffer[2:4], "big") 122 | 123 | return cls( 124 | track_number, 125 | track_status, 126 | unused_0, 127 | default_channel_groups, 128 | channel_groups, 129 | channel_info, 130 | system_ex_ports, 131 | unknown_0, 132 | ) 133 | 134 | def is_lossless_track(self) -> bool: 135 | return self.track_status & 0x80 == 0x80 136 | 137 | def write(self, stream: BinaryIO) -> None: 138 | """Write 139 | 140 | Args: 141 | stream (BinaryIO): Output stream 142 | """ 143 | stream.write(self.track_number.to_bytes()) 144 | stream.write(self.track_status.to_bytes()) 145 | stream.write(self.unused_0.to_bytes(2, "big")) 146 | for default_channel_group in self.default_channel_groups: 147 | stream.write(default_channel_group.to_bytes(2, "big")) 148 | for channel_group in self.channel_groups: 149 | stream.write(channel_group.to_bytes(2, "big")) 150 | for channel_info_entry in self.channel_info: 151 | channel_info_entry.write(stream) 152 | stream.write(self.system_ex_ports.to_bytes(2, "big")) 153 | stream.write(self.unknown_0.to_bytes(2, "big")) 154 | 155 | 156 | @dataclass 157 | class ExtendedPTrackInfoChunk(ChunkBase): 158 | """Extended P-Track Information Chunk""" 159 | 160 | unknown_0: bytes 161 | tg_mode: int 162 | data: list[ExtendedPTrackInfoEntry] 163 | 164 | @classmethod 165 | def from_generic(cls, generic: GenericChunk) -> Self: 166 | """From Generic Chunk 167 | 168 | Args: 169 | generic (GenericChunk): Generic Chunk 170 | 171 | Returns: 172 | Self: ExtendedPTrackInfoChunk 173 | """ 174 | unknown_0 = generic.payload[0:8] 175 | tg_mode = int.from_bytes(generic.payload[8:10:3], "big") 176 | entry_count = int.from_bytes(generic.payload[10:12], "big") 177 | data: list[ExtendedPTrackInfoEntry] = [] 178 | stream = BytesIO(generic.payload[12:]) 179 | for _ in range(entry_count): 180 | entry = ExtendedPTrackInfoEntry.read(stream) 181 | data.append(entry) 182 | return cls(generic.id, unknown_0, tg_mode, data) 183 | 184 | def _payload_buffer(self) -> bytes: 185 | buffer = self.unknown_0 186 | buffer += self.tg_mode.to_bytes(2, "big") 187 | buffer += len(self.data).to_bytes(2, "big") 188 | 189 | stream = BytesIO() 190 | for entry in self.data: 191 | entry.write(stream) 192 | stream.seek(0) 193 | buffer += stream.read() 194 | 195 | return buffer 196 | -------------------------------------------------------------------------------- /okd/m_track_conversion.py: -------------------------------------------------------------------------------- 1 | import math 2 | import mido 3 | 4 | from .chunks import MTrackEvent, MTrackAbsoluteTimeEvent, MTrackChunk 5 | from midi.time_converter import MidiTimeConverter 6 | from midi.utils import ( 7 | get_track_by_port_channel, 8 | get_first_and_last_note_times, 9 | get_time_signatures, 10 | ) 11 | 12 | MIDI_M_TRACK_PORT = 16 13 | 14 | 15 | def __midi_to_absolute_time_track(midi: mido.MidiFile) -> list[MTrackAbsoluteTimeEvent]: 16 | midi_time_converter = MidiTimeConverter() 17 | midi_time_converter.load_from_midi(midi) 18 | 19 | melody_track = get_track_by_port_channel(midi.tracks, 1, 8) 20 | if melody_track is None: 21 | raise ValueError("Melody track not found.") 22 | 23 | melody_notes: list[tuple[int, int]] = [] 24 | current_melody_note_start = -1 25 | current_melody_node_number = -1 26 | track_time = 0 27 | for midi_message in melody_track: 28 | track_time += midi_message.time 29 | absolute_time = midi_time_converter.ticks_to_ms(track_time) 30 | 31 | if not isinstance(midi_message, mido.Message): 32 | continue 33 | 34 | if midi_message.type == "note_on": # type: ignore 35 | current_melody_note_start = absolute_time 36 | current_melody_node_number = midi_message.note # type: ignore 37 | elif ( 38 | midi_message.type == "note_off" # type: ignore 39 | and midi_message.note == current_melody_node_number # type: ignore 40 | ): 41 | melody_notes.append((current_melody_note_start, absolute_time)) 42 | 43 | if len(melody_notes) < 1: 44 | raise ValueError("Melody note not found.") 45 | 46 | m_track = get_track_by_port_channel(midi.tracks, MIDI_M_TRACK_PORT, 0) 47 | 48 | hooks: list[tuple[int, int]] = [] 49 | 50 | two_chorus_fadeout_time = -1 51 | 52 | if m_track is not None: 53 | current_hook_start = -1 54 | track_time = 0 55 | for midi_message in midi.tracks[1]: 56 | track_time += midi_message.time 57 | absolute_time = midi_time_converter.ticks_to_ms(track_time) 58 | 59 | if not isinstance(midi_message, mido.Message): 60 | continue 61 | 62 | if midi_message.type == "note_on": # type: ignore 63 | if midi_message.note == 48: # type: ignore 64 | current_hook_start = absolute_time 65 | elif midi_message.note == 72: # type: ignore 66 | two_chorus_fadeout_time = absolute_time 67 | elif midi_message.type == "note_off": # type: ignore 68 | if midi_message.note == 48: # type: ignore 69 | hooks.append((current_hook_start, absolute_time)) 70 | 71 | first_note_on_tick, last_note_off_tick = get_first_and_last_note_times(midi.tracks) 72 | first_note_on_time = midi_time_converter.ticks_to_ms(first_note_on_tick) 73 | last_note_off_time = midi_time_converter.ticks_to_ms(last_note_off_tick) 74 | 75 | absolute_time_track: list[MTrackAbsoluteTimeEvent] = [] 76 | 77 | time_signatures = get_time_signatures(midi.tracks) 78 | 79 | visible_guide_melody_delimiters: list[tuple[int, int]] = [] 80 | for tick, numerator, denominator in time_signatures: 81 | absolute_time_track.append( 82 | MTrackAbsoluteTimeEvent( 83 | 0xFF, 84 | bytes(bytearray([0x00, numerator, int(math.log2(denominator)), 0xFE])), 85 | midi_time_converter.ticks_to_ms(tick), 86 | ) 87 | ) 88 | 89 | melody_notes_copy = melody_notes.copy() 90 | current_page_start = -1 91 | while True: 92 | melody_note: tuple[int, int] 93 | try: 94 | melody_note = melody_notes_copy.pop(0) 95 | except IndexError: 96 | break 97 | melody_note_start, melody_note_end = melody_note 98 | 99 | if current_page_start == -1: 100 | current_page_start = melody_note_start 101 | visible_guide_melody_delimiters.append((melody_note_start, 0)) 102 | continue 103 | 104 | next_melody_note: tuple[int, int] 105 | try: 106 | next_melody_note = melody_notes_copy[0] 107 | except IndexError: 108 | visible_guide_melody_delimiters.append((melody_note_end + 1, 2)) 109 | break 110 | next_melody_note_start, next_melody_note_end = next_melody_note 111 | 112 | page_length = melody_note_end - current_page_start 113 | if 7000 < page_length: 114 | void_length = next_melody_note_start - melody_note_end 115 | if 7000 < void_length: 116 | melody_notes_copy.pop(0) 117 | visible_guide_melody_delimiters.append((melody_note_end + 1, 1)) 118 | current_page_start = -1 119 | else: 120 | visible_guide_melody_delimiters.append((next_melody_note_start, 3)) 121 | current_page_start = next_melody_note_start 122 | 123 | if len(time_signatures) > 0: 124 | current_beat_time = 0 125 | current_beat_count = time_signatures[0][1] 126 | while current_beat_time < last_note_off_time + 1: 127 | time_signature_time = current_beat_time 128 | time_signature = next( 129 | ( 130 | time_signature 131 | for time_signature in reversed(time_signatures) 132 | if time_signature[0] <= time_signature_time 133 | ), 134 | None, 135 | ) 136 | if time_signature is None: 137 | raise ValueError("Time signature not found.") 138 | 139 | if current_beat_count < time_signature[1]: 140 | absolute_time_track.append( 141 | MTrackAbsoluteTimeEvent( 142 | 0xF2, b"", midi_time_converter.ticks_to_ms(current_beat_time) 143 | ) 144 | ) 145 | current_beat_count += 1 146 | else: 147 | absolute_time_track.append( 148 | MTrackAbsoluteTimeEvent( 149 | 0xF1, b"", midi_time_converter.ticks_to_ms(current_beat_time) 150 | ) 151 | ) 152 | current_beat_count = 1 153 | 154 | current_beat_time += midi.ticks_per_beat 155 | 156 | absolute_time_track.append( 157 | MTrackAbsoluteTimeEvent( 158 | 0xF6, b"\x00", midi_time_converter.ticks_to_ms(first_note_on_time) 159 | ) 160 | ) 161 | absolute_time_track.append( 162 | MTrackAbsoluteTimeEvent( 163 | 0xF6, b"\x01", midi_time_converter.ticks_to_ms(last_note_off_time) 164 | ) 165 | ) 166 | 167 | for hook_start, hook_end in hooks[:-1]: 168 | absolute_time_track.append( 169 | MTrackAbsoluteTimeEvent( 170 | 0xF3, b"\x00", midi_time_converter.ticks_to_ms(hook_start) 171 | ) 172 | ) 173 | absolute_time_track.append( 174 | MTrackAbsoluteTimeEvent( 175 | 0xF3, b"\x01", midi_time_converter.ticks_to_ms(hook_end) 176 | ) 177 | ) 178 | 179 | if len(hooks) > 0: 180 | last_hook_start, last_hook_end = hooks[-1] 181 | absolute_time_track.append( 182 | MTrackAbsoluteTimeEvent( 183 | 0xF3, b"\x02", midi_time_converter.ticks_to_ms(last_hook_start) 184 | ) 185 | ) 186 | absolute_time_track.append( 187 | MTrackAbsoluteTimeEvent( 188 | 0xF3, b"\x03", midi_time_converter.ticks_to_ms(last_hook_end) 189 | ) 190 | ) 191 | 192 | for ( 193 | visible_guide_melody_delimiter_time, 194 | visible_guide_melody_delimiter_type, 195 | ) in visible_guide_melody_delimiters: 196 | absolute_time_track.append( 197 | MTrackAbsoluteTimeEvent( 198 | 0xF4, 199 | visible_guide_melody_delimiter_type.to_bytes(), 200 | midi_time_converter.ticks_to_ms(visible_guide_melody_delimiter_time), 201 | ) 202 | ) 203 | 204 | if two_chorus_fadeout_time != -1: 205 | absolute_time_track.append( 206 | MTrackAbsoluteTimeEvent( 207 | 0xF5, b"", midi_time_converter.ticks_to_ms(two_chorus_fadeout_time) 208 | ) 209 | ) 210 | 211 | absolute_time_track.sort(key=lambda absolute_time_event: absolute_time_event.time) 212 | 213 | return absolute_time_track 214 | 215 | 216 | def midi_to_m_track( 217 | midi: mido.MidiFile, 218 | ) -> MTrackChunk: 219 | absolute_time_track = __midi_to_absolute_time_track(midi) 220 | events: list[MTrackEvent] = [] 221 | current_time = 0 222 | for event in absolute_time_track: 223 | delta_time = event.time - current_time 224 | events.append(MTrackEvent(event.status_byte, event.data_bytes, delta_time)) 225 | current_time = event.time 226 | # End of Track 227 | events.append(MTrackEvent(0x00, b"\x00\x00\x00", 0)) 228 | return MTrackChunk(b"\xffMR\x00", events) 229 | -------------------------------------------------------------------------------- /mtf/mtf_conversion.py: -------------------------------------------------------------------------------- 1 | from logging import getLogger 2 | 3 | import tarfile 4 | import os 5 | import io 6 | import mido 7 | import json 8 | import shutil 9 | from pydub import AudioSegment 10 | 11 | from mtf import MtfAudio 12 | from mtf import note_events_conversion 13 | from mtf.note_events_conversion import NoteEvent 14 | from mtf import saiten_ref_conversion 15 | from mtf.saiten_ref_conversion import SaitenRefEvent 16 | from mtf.saiten_ref_conversion import SaitenRefEventType 17 | 18 | __logger = getLogger(__name__) 19 | 20 | class MtfFileContent: 21 | PLAYLIST_FILES: dict = { 22 | "PlayListDrum0.json": "Drum", 23 | "PlayListUpper0.json": "Upper", 24 | "PlayListGuideMelo0.json": "GuideMelody", 25 | "PlayListSynthChorus0.json": "SynthChorus", 26 | "PlayListMusic0.json": "MixedMusic", 27 | "PlayListChorus0.json": "AdpcmChorus", 28 | "PlayListGuideVocal00.json": "GuideVocal", 29 | "PlayListGuideVocal10.json": "GuideVocalMale", 30 | "PlayListGuideVocal20.json": "GuideVocalFemale", 31 | } 32 | 33 | SONG_PROPERTY_FILE: str = "SongProperty.json" 34 | 35 | REF_FILES: dict = { 36 | "RefGuideMelo.json": {"description": "GuideMelody", "port": 0, "channel": 0}, # Port 0 Channel 0 (Not compatible with OKD) 37 | "RefChorus.json": {"description": "Chorus", "port": 0, "channel": 1}, # Port 0 Channel 1 (Not compatible with OKD) 38 | "RefGuideVocal0.json": {"description": "GuideVocal", "port": 0, "channel": 2}, # Port 0 Channel 2 (Not compatible with OKD) 39 | "RefGuideVocal1.json": {"description": "GuideVocalMale", "port": 0, "channel": 3}, # Port 0 Channel 3 (Not compatible with OKD) 40 | "RefGuideVocal2.json": {"description": "GuideVocalFemale", "port": 0, "channel": 4}, # Port 0 Channel 4 (Not compatible with OKD) 41 | } 42 | 43 | def extract_mtf(mtf_path: str, output_path: str) -> str: 44 | """Extracts MTF file to output_path""" 45 | with open(mtf_path, "rb") as f: 46 | data = bytearray(f.read()) 47 | data[0:4] = b"\x1f\x8b\x08\x00" # GZip header 48 | 49 | with tarfile.open(fileobj=io.BytesIO(data), mode='r:gz') as tar: 50 | tar.extractall(path=output_path) 51 | members = tar.getmembers() 52 | root_folder_name = members[0].name.split('/')[0] if members else '' 53 | 54 | if not root_folder_name: 55 | __logger.error(f"Failed to detect root folder in archive.") 56 | return 57 | 58 | mtf_root_path = os.path.join(output_path, root_folder_name, 'mtf') 59 | 60 | return mtf_root_path 61 | 62 | def dump_playlist(mtf_root_path: str, export_each_file: bool = False) -> str: 63 | """Reads PlayList JSON files, parses them and converts to a WAV file""" 64 | mixed_audio = AudioSegment.silent(duration=0) 65 | for playlist_file in MtfFileContent.PLAYLIST_FILES: 66 | json_path = os.path.join(mtf_root_path, playlist_file) 67 | if not os.path.exists(json_path): 68 | __logger.info(f"{MtfFileContent.PLAYLIST_FILES[playlist_file]} track not found, skipping.") 69 | continue 70 | 71 | with open(json_path, "r", encoding="utf-8") as f: 72 | playlist = json.load(f) 73 | 74 | vol_events = playlist.get("VolEvent", []) 75 | 76 | for item in playlist.get("AudioPlayListItem", []): 77 | file_name = item["file"] 78 | start_time = item["start_clk"] # ms 79 | input_path = os.path.join(mtf_root_path, file_name) 80 | 81 | mtf_audio_processor = MtfAudio() 82 | 83 | if item["codec"] == "RawADPCM": 84 | if os.path.exists(input_path): 85 | audio = mtf_audio_processor.decode_adpcm(input_path) 86 | audio = mtf_audio_processor.apply_vol_events(vol_events, start_time) 87 | 88 | if export_each_file: 89 | adpcm_output_path = input_path + '.wav' 90 | audio.export(adpcm_output_path, format='wav') 91 | else: 92 | __logger.warning(f"{input_path} not found, skipping.") 93 | continue 94 | elif item["codec"] == "OPUS": 95 | if os.path.exists(input_path): 96 | audio = mtf_audio_processor.decode_opus(input_path) 97 | audio = mtf_audio_processor.apply_vol_events(vol_events, start_time) 98 | 99 | if export_each_file: 100 | ogg_output_path = input_path + '.ogg' 101 | shutil.copy(input_path, ogg_output_path) 102 | else: 103 | __logger.warning(f"{input_path} not found, skipping.") 104 | continue 105 | elif item["codec"] == "MP3": 106 | if os.path.exists(input_path): 107 | audio = mtf_audio_processor.decode_others(input_path) 108 | audio = mtf_audio_processor.apply_vol_events(vol_events, start_time) 109 | 110 | if export_each_file: 111 | mp3_output_path = input_path + '.mp3' 112 | shutil.copy(input_path, mp3_output_path) 113 | else: 114 | __logger.warning(f"{input_path} not found, skipping.") 115 | continue 116 | elif item["codec"] == "AAC": 117 | if os.path.exists(input_path): 118 | audio = mtf_audio_processor.decode_others(input_path) 119 | audio = mtf_audio_processor.apply_vol_events(vol_events, start_time) 120 | 121 | if export_each_file: 122 | aac_output_path = input_path + '.aac' 123 | shutil.copy(input_path, aac_output_path) 124 | else: 125 | __logger.warning(f"{input_path} not found, skipping.") 126 | continue 127 | elif item["codec"] == "FLAC": 128 | if os.path.exists(input_path): 129 | audio = mtf_audio_processor.decode_others(input_path) 130 | audio = mtf_audio_processor.apply_vol_events(vol_events, start_time) 131 | 132 | if export_each_file: 133 | flac_output_path = input_path + '.flac' 134 | shutil.copy(input_path, flac_output_path) 135 | else: 136 | __logger.warning(f"{input_path} not found, skipping.") 137 | continue 138 | else: 139 | __logger.warning(f"Unsupported codec {item['codec']}, skipping.") 140 | continue 141 | 142 | # MIX 143 | if len(mixed_audio) < start_time + len(audio): 144 | mixed_audio = mixed_audio.append(AudioSegment.silent(duration=start_time + len(audio) - len(mixed_audio)), crossfade=0) 145 | mixed_audio = mixed_audio.overlay(audio, position=int(start_time)) 146 | 147 | # Export to a WAV file 148 | final_output_path = os.path.join(mtf_root_path, "output.wav") 149 | mixed_audio.export(final_output_path, format="wav") 150 | __logger.info(f"Mixed WAV file saved at: {final_output_path}") 151 | 152 | return final_output_path 153 | 154 | def dump_refs(mtf_root_path: str, export_each_file: bool = False) -> str: 155 | """Dump refs to a SMF file""" 156 | # Prepare MidiFile 157 | midi = mido.MidiFile(ticks_per_beat=480) 158 | 159 | # Dump SaitenRef 160 | saiten_ref_midi = dump_saiten_ref(mtf_root_path, export_each_file) 161 | for track in saiten_ref_midi.tracks: 162 | midi.tracks.append(track) 163 | 164 | # Dump Ref JSON files 165 | for ref_file in MtfFileContent.REF_FILES: 166 | ref_midi = dump_ref(mtf_root_path, ref_file, export_each_file) 167 | if ref_midi is None: 168 | continue 169 | for track in ref_midi.tracks: 170 | midi.tracks.append(track) 171 | 172 | # Export to a SMF file 173 | final_output_path = os.path.join(mtf_root_path, "output.mid") 174 | midi.save(final_output_path) 175 | __logger.info(f"Mixed SMF file saved at: {final_output_path}") 176 | 177 | return final_output_path 178 | 179 | def dump_saiten_ref(mtf_root_path: str, export_each_file: bool = False) -> mido.MidiFile: 180 | """Dump SaitenRef""" 181 | json_path = os.path.join(mtf_root_path, MtfFileContent.SONG_PROPERTY_FILE) 182 | 183 | with open(json_path, "r", encoding="utf-8") as f: 184 | song_property = json.load(f) 185 | 186 | saiten_ref_events: list[NoteEvent] = [] 187 | 188 | for item in song_property.get("SaitenRef", []): 189 | saiten_ref_event = SaitenRefEvent( 190 | item["Clock"], SaitenRefEventType.value_of(item["msg"][0]), item["msg"][1], item["msg"][2] 191 | ) 192 | saiten_ref_events.append(saiten_ref_event) 193 | 194 | saiten_ref_midi = saiten_ref_conversion.saiten_ref_to_midi(saiten_ref_events) 195 | if export_each_file: 196 | saiten_ref_midi_output_path = os.path.join(mtf_root_path, MtfFileContent.SONG_PROPERTY_FILE + ".mid") 197 | saiten_ref_midi.save(saiten_ref_midi_output_path) 198 | 199 | return saiten_ref_midi 200 | 201 | def dump_ref(mtf_root_path: str, ref_file: str, export_each_file: bool = False) -> mido.MidiFile: 202 | """Dump Ref""" 203 | json_path = os.path.join(mtf_root_path, ref_file) 204 | if not os.path.exists(json_path): 205 | __logger.info(f"{MtfFileContent.REF_FILES[ref_file]["description"]} ref not found, skipping.") 206 | return None 207 | 208 | with open(json_path, "r", encoding="utf-8") as f: 209 | ref = json.load(f) 210 | 211 | note_events: list[NoteEvent] = [] 212 | 213 | for item in ref.get("Pitch", []): 214 | note_event = NoteEvent(item["StartClk"], item["EndClk"], item["Note"]) 215 | note_events.append(note_event) 216 | 217 | ref_midi = note_events_conversion.note_event_to_midi(note_events, MtfFileContent.REF_FILES[ref_file]["port"], MtfFileContent.REF_FILES[ref_file]["channel"]) 218 | if export_each_file: 219 | ref_midi_output_path = json_path + ".mid" 220 | ref_midi.save(ref_midi_output_path) 221 | return ref_midi 222 | -------------------------------------------------------------------------------- /okd/chunks/m_track_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | import os 4 | from typing import BinaryIO, Self 5 | 6 | from midi.event import MidiEvent, MidiTrackEvent 7 | from ..okd_midi import ( 8 | read_status_byte, 9 | is_data_bytes, 10 | read_extended_variable_int, 11 | write_extended_variable_int, 12 | ) 13 | 14 | from .chunk_base import ChunkBase 15 | from .generic_chunk import GenericChunk 16 | 17 | 18 | @dataclass 19 | class MTrackEvent(MidiTrackEvent): 20 | """M-Track Event""" 21 | 22 | __END_OF_TRACK_MARK = b"\x00\x00\x00\x00" 23 | 24 | @staticmethod 25 | def read_sysex_data_bytes(stream: BinaryIO) -> bytes: 26 | """Read Data Bytes of SysEx Message 27 | 28 | Args: 29 | stream (BinaryIO): Input stream 30 | 31 | Raises: 32 | ValueError: Unterminated SysEx message detected 33 | 34 | Returns: 35 | bytes: Data Bytes 36 | """ 37 | data_bytes = b"" 38 | while True: 39 | byte = stream.read(1) 40 | if len(byte) < 1: 41 | raise ValueError("Too less read bytes.") 42 | data_bytes += byte 43 | byte = byte[0] 44 | if byte & 0x80 == 0x80: 45 | if byte != 0xFE: 46 | raise ValueError( 47 | f"Unterminated SysEx message detected. stop_byte={hex(byte)}" 48 | ) 49 | break 50 | return data_bytes 51 | 52 | @classmethod 53 | def read(cls, stream: BinaryIO) -> Self | None: 54 | """Read 55 | 56 | Args: 57 | stream (BinaryIO): Input stream 58 | 59 | Raises: 60 | ValueError: Unknown Status Byte detected 61 | 62 | Returns: 63 | Self: Instance of this class 64 | """ 65 | delta_time = read_extended_variable_int(stream) 66 | 67 | end_of_track = stream.read(4) 68 | if end_of_track == MTrackEvent.__END_OF_TRACK_MARK: 69 | return None 70 | stream.seek(-4, os.SEEK_CUR) 71 | 72 | status_byte = read_status_byte(stream) 73 | 74 | # System messages 75 | if status_byte == 0xFF: 76 | # SysEx message 77 | data_bytes = MTrackEvent.read_sysex_data_bytes(stream) 78 | return cls(status_byte, data_bytes, delta_time) 79 | elif status_byte == 0xF1: 80 | # Strong beat 81 | data_bytes_length = 0 82 | elif status_byte == 0xF2: 83 | # Weak beat 84 | data_bytes_length = 0 85 | elif status_byte == 0xF3: 86 | # Hook section 87 | data_bytes_length = 1 88 | elif status_byte == 0xF4: 89 | # Visible Guide Melody page delimiter 90 | data_bytes_length = 1 91 | elif status_byte == 0xF5: 92 | # Two chorus fadeout position 93 | data_bytes_length = 0 94 | elif status_byte == 0xF6: 95 | # Playing section 96 | data_bytes_length = 1 97 | elif status_byte == 0xF8: 98 | # ADPCM playing section 99 | data_bytes_length = 1 100 | else: 101 | raise ValueError( 102 | f"Unknown Status Byte detected. status_byte={hex(status_byte)}" 103 | ) 104 | 105 | data_bytes = stream.read(data_bytes_length) 106 | if not is_data_bytes(data_bytes): 107 | raise ValueError( 108 | f"Invalid Data Byte detected. data_bytes=`{data_bytes.hex(" ").upper()}`" 109 | ) 110 | 111 | return cls(status_byte, data_bytes, delta_time) 112 | 113 | def write(self, stream: BinaryIO) -> None: 114 | """Write 115 | 116 | Args: 117 | stream (BinaryIO): Output stream 118 | """ 119 | write_extended_variable_int(stream, self.delta_time) 120 | stream.write(self.status_byte.to_bytes()) 121 | stream.write(self.data_bytes) 122 | 123 | 124 | @dataclass 125 | class MTrackAbsoluteTimeEvent(MidiEvent): 126 | """M-Track Absolute Time Event""" 127 | 128 | time: int 129 | 130 | 131 | @dataclass 132 | class MTrackChunk(ChunkBase): 133 | """M-Track Chunk""" 134 | 135 | events: list[MTrackEvent] 136 | 137 | @classmethod 138 | def from_generic(cls, generic: GenericChunk) -> Self: 139 | """From Generic Chunk 140 | 141 | Args: 142 | generic (GenericChunk): Generic Chunk 143 | 144 | Returns: 145 | Self: Instance of this class 146 | """ 147 | stream = BytesIO(generic.payload) 148 | events: list[MTrackEvent] = [] 149 | while True: 150 | event = MTrackEvent.read(stream) 151 | if event is None: 152 | # End of Track 153 | break 154 | events.append(event) 155 | return cls(generic.id, events) 156 | 157 | def track_number(self) -> int: 158 | """Track Number 159 | 160 | Returns: 161 | int: Track Number 162 | """ 163 | return self.id[3] 164 | 165 | def _payload_buffer(self) -> bytes: 166 | stream = BytesIO() 167 | for event in self.events: 168 | event.write(stream) 169 | stream.seek(0) 170 | return stream.read() 171 | 172 | def to_json_serializable(self): 173 | json_events = [] 174 | for event in self.events: 175 | json_events.append( 176 | { 177 | "delta_time": event.delta_time, 178 | "status_byte": format(event.status_byte, "02X"), 179 | "data": event.data_bytes.hex(" ").upper(), 180 | } 181 | ) 182 | return {"events": json_events} 183 | 184 | def absolute_time_track( 185 | self, 186 | ) -> list[MTrackAbsoluteTimeEvent]: 187 | absolute_time_track: list[MTrackAbsoluteTimeEvent] = [] 188 | absolute_time = 0 189 | for event in self.events: 190 | absolute_time += event.delta_time 191 | absolute_time_track.append( 192 | MTrackAbsoluteTimeEvent( 193 | event.status_byte, event.data_bytes, absolute_time 194 | ) 195 | ) 196 | return absolute_time_track 197 | 198 | 199 | @dataclass 200 | class MTrackInterpretation: 201 | tempos: list[tuple[int, int]] 202 | time_signatures: list[tuple[int, int, int]] 203 | hooks: list[tuple[int, int]] 204 | visible_guide_melody_delimiters: list[tuple[int, int]] 205 | two_chorus_fadeout_time: int 206 | song_section: tuple[int, int] 207 | adpcm_sections: list[tuple[int, int]] 208 | 209 | @classmethod 210 | def from_track(cls, track: MTrackChunk): 211 | tempos: list[tuple[int, int]] = [] 212 | time_signatures: list[tuple[int, int, int]] = [] 213 | hooks: list[tuple[int, int]] = [] 214 | visible_guide_melody_delimiters: list[tuple[int, int]] = [] 215 | two_chorus_fadeout_time = -1 216 | song_section: tuple[int, int] = (-1, -1) 217 | adpcm_sections: list[tuple[int, int]] = [] 218 | 219 | absolute_time_track = track.absolute_time_track() 220 | 221 | beats = 1 222 | current_beat_start = next( 223 | ( 224 | event.time 225 | for event in absolute_time_track 226 | if event.status_byte == 0xF1 or event.status_byte == 0xF2 227 | ), 228 | -1, 229 | ) 230 | current_bpm = 125 231 | current_hook_start_time = 0 232 | song_section_start = -1 233 | current_adpcm_section_start = -1 234 | 235 | for event in absolute_time_track: 236 | if event.status_byte == 0xF1: 237 | if current_beat_start != -1: 238 | beat_length = event.time - current_beat_start 239 | if beat_length == 0: 240 | continue 241 | bpm = round(60000 / beat_length) 242 | if bpm != current_bpm: 243 | tempos.append( 244 | ( 245 | current_beat_start, 246 | bpm, 247 | ) 248 | ) 249 | current_bpm = bpm 250 | beats = 1 251 | current_beat_start = event.time 252 | elif event.status_byte == 0xF2: 253 | if current_beat_start != -1: 254 | beat_length = event.time - current_beat_start 255 | if beat_length == 0: 256 | continue 257 | bpm = round(60000 / beat_length) 258 | if bpm != current_bpm: 259 | tempos.append( 260 | ( 261 | current_beat_start, 262 | bpm, 263 | ) 264 | ) 265 | current_bpm = bpm 266 | beats += 1 267 | current_beat_start = event.time 268 | elif event.status_byte == 0xF3: 269 | mark_type = event.data_bytes[0] 270 | if mark_type == 0x00 or mark_type == 0x02: 271 | current_hook_start_time = event.time 272 | elif mark_type == 0x01 or mark_type == 0x03: 273 | hooks.append((current_hook_start_time, event.time)) 274 | elif event.status_byte == 0xF4: 275 | visible_guide_melody_delimiters.append( 276 | (event.time, event.data_bytes[0]) 277 | ) 278 | pass 279 | elif event.status_byte == 0xF5: 280 | two_chorus_fadeout_time = event.time 281 | elif event.status_byte == 0xF6: 282 | mark_type = event.data_bytes[0] 283 | if mark_type == 0x00: 284 | song_section_start = event.time 285 | elif mark_type == 0x01: 286 | song_section = (song_section_start, event.time) 287 | elif event.status_byte == 0xF8: 288 | mark_type = event.data_bytes[0] 289 | if mark_type == 0x00: 290 | current_adpcm_section_start = event.time 291 | elif mark_type == 0x01: 292 | adpcm_sections.append((current_adpcm_section_start, event.time)) 293 | elif event.status_byte == 0xFF: 294 | time_signatures.append( 295 | (event.time, event.data_bytes[1], 2 ** event.data_bytes[2]) 296 | ) 297 | 298 | return cls( 299 | tempos, 300 | time_signatures, 301 | hooks, 302 | visible_guide_melody_delimiters, 303 | two_chorus_fadeout_time, 304 | song_section, 305 | adpcm_sections, 306 | ) 307 | -------------------------------------------------------------------------------- /mtf/saiten_ref_conversion.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from enum import Enum 3 | 4 | import mido 5 | 6 | from midi.time_converter import MidiTimeConverter 7 | from midi.utils import ( 8 | get_track_by_port_channel, 9 | get_first_and_last_note_times, 10 | get_time_signatures, 11 | ) 12 | 13 | 14 | class SaitenRefEventType(Enum): 15 | OLD_HAMORUN_OFF_1 = 0x8B 16 | OLD_HAMORUN_ON_1 = 0x9B 17 | OLDHAMORUN_OFF_2 = 0x8C 18 | OLD_HAMORUN_ON_2 = 0x9C 19 | HAMORUN_OFF = 0x8D 20 | HAMORUN_ON = 0x9D 21 | NOTE_OFF = 0x8E 22 | NOTE_ON = 0x9E 23 | PLAY_MARK = 0xFF 24 | 25 | @classmethod 26 | def value_of(self, value: int): 27 | for e in SaitenRefEventType: 28 | if e.value == value: 29 | return e 30 | 31 | 32 | class PlayMarkType(Enum): 33 | START_OF_SONG = 0x00 34 | END_OF_SONG = 0x01 35 | BEATMARK_ACCENT = 0x02 36 | BEATMARK_NOT_ACCENT = 0x03 37 | START_OF_VOCAL = 0x04 38 | START_OF_BRIDGE = 0x05 39 | START_OF_ENDING = 0x06 40 | START_OF_LYRICS_PAGE = 0x07 41 | START_OF_SABI = 0x08 42 | END_OF_SABI = 0x09 43 | START_OF_CLIMAX = 0x0A 44 | END_OF_CLIMAX = 0x0B 45 | SECOND_CHORUS_FADEOUT = 0x0C 46 | NOT_PLAY_MARK = 0x7F 47 | 48 | 49 | @dataclass 50 | class SaitenRefEvent: 51 | time: int 52 | event_type: SaitenRefEventType 53 | note_number: int 54 | value: int 55 | 56 | def to_dict(self): 57 | return { 58 | "Clock": self.time, 59 | "msg": [self.event_type.value, self.note_number, self.value], 60 | } 61 | 62 | 63 | MIDI_M_TRACK_PORT = 16 64 | 65 | 66 | def midi_to_saiten_ref(midi: mido.MidiFile) -> list[SaitenRefEvent]: 67 | absolute_time_track: list[SaitenRefEvent] = [] 68 | 69 | midi_time_converter = MidiTimeConverter() 70 | midi_time_converter.load_from_midi(midi) 71 | 72 | melody_track = get_track_by_port_channel(midi.tracks, 1, 8) 73 | if melody_track is None: 74 | raise ValueError("Melody track not found.") 75 | 76 | melody_notes: list[tuple[int, int]] = [] 77 | track_time = 0 78 | for midi_message in melody_track: 79 | track_time += midi_message.time 80 | absolute_time = midi_time_converter.ticks_to_ms(track_time) 81 | 82 | if not isinstance(midi_message, mido.Message): 83 | continue 84 | 85 | if midi_message.type == "note_on": # type: ignore 86 | absolute_time_track.append( 87 | SaitenRefEvent( 88 | absolute_time, SaitenRefEventType.NOTE_ON, midi_message.note, 100 # type: ignore 89 | ) 90 | ) 91 | elif midi_message.type == "note_off": # type: ignore 92 | absolute_time_track.append( 93 | SaitenRefEvent( 94 | absolute_time, SaitenRefEventType.NOTE_OFF, midi_message.note, 100 # type: ignore 95 | ) 96 | ) 97 | 98 | if midi_message.type == "note_on": # type: ignore 99 | current_melody_note_start = absolute_time 100 | current_melody_node_number = midi_message.note # type: ignore 101 | elif ( 102 | midi_message.type == "note_off" # type: ignore 103 | and midi_message.note == current_melody_node_number # type: ignore 104 | ): 105 | melody_notes.append((current_melody_note_start, absolute_time)) # type: ignore 106 | 107 | if len(melody_notes) < 1: 108 | raise ValueError("Melody note not found.") 109 | 110 | m_track = get_track_by_port_channel(midi.tracks, MIDI_M_TRACK_PORT, 0) 111 | 112 | hooks: list[tuple[int, int]] = [] 113 | 114 | two_chorus_fadeout_time = -1 115 | 116 | if m_track is not None: 117 | current_hook_start = -1 118 | track_time = 0 119 | for midi_message in midi.tracks[1]: 120 | track_time += midi_message.time 121 | absolute_time = midi_time_converter.ticks_to_ms(track_time) 122 | 123 | if not isinstance(midi_message, mido.Message): 124 | continue 125 | 126 | if midi_message.type == "note_on": # type: ignore 127 | if midi_message.note == 48: # type: ignore 128 | current_hook_start = absolute_time 129 | elif midi_message.note == 72: # type: ignore 130 | two_chorus_fadeout_time = absolute_time 131 | elif midi_message.type == "note_off": # type: ignore 132 | if midi_message.note == 48: # type: ignore 133 | hooks.append((current_hook_start, absolute_time)) 134 | 135 | first_note_on_tick, last_note_off_tick = get_first_and_last_note_times(midi.tracks) 136 | first_note_on_time = midi_time_converter.ticks_to_ms(first_note_on_tick) 137 | last_note_off_time = midi_time_converter.ticks_to_ms(last_note_off_tick) 138 | 139 | time_signatures = get_time_signatures(midi.tracks) 140 | 141 | if len(time_signatures) > 0: 142 | current_beat_time = 0 143 | current_beat_count = time_signatures[0][1] 144 | while current_beat_time < last_note_off_time + 1: 145 | time_signature_time = current_beat_time 146 | time_signature = next( 147 | ( 148 | time_signature 149 | for time_signature in reversed(time_signatures) 150 | if time_signature[0] <= time_signature_time 151 | ), 152 | None, 153 | ) 154 | if time_signature is None: 155 | raise ValueError("Time signature not found.") 156 | 157 | if current_beat_count < time_signature[1]: 158 | absolute_time_track.append( 159 | SaitenRefEvent( 160 | midi_time_converter.ticks_to_ms(current_beat_time), 161 | SaitenRefEventType.PLAY_MARK, 162 | 0x30, 163 | PlayMarkType.BEATMARK_NOT_ACCENT.value, 164 | ) 165 | ) 166 | current_beat_count += 1 167 | else: 168 | absolute_time_track.append( 169 | SaitenRefEvent( 170 | midi_time_converter.ticks_to_ms(current_beat_time), 171 | SaitenRefEventType.PLAY_MARK, 172 | 0x30, 173 | PlayMarkType.BEATMARK_ACCENT.value, 174 | ) 175 | ) 176 | current_beat_count = 1 177 | 178 | current_beat_time += midi.ticks_per_beat 179 | 180 | absolute_time_track.append( 181 | SaitenRefEvent( 182 | midi_time_converter.ticks_to_ms(first_note_on_time), 183 | SaitenRefEventType.PLAY_MARK, 184 | 0x30, 185 | PlayMarkType.START_OF_SONG.value, 186 | ) 187 | ) 188 | absolute_time_track.append( 189 | SaitenRefEvent( 190 | midi_time_converter.ticks_to_ms(last_note_off_time), 191 | SaitenRefEventType.PLAY_MARK, 192 | 0x30, 193 | PlayMarkType.END_OF_SONG.value, 194 | ) 195 | ) 196 | 197 | for hook_start, hook_end in hooks[:-1]: 198 | absolute_time_track.append( 199 | SaitenRefEvent( 200 | midi_time_converter.ticks_to_ms(hook_start), 201 | SaitenRefEventType.PLAY_MARK, 202 | 0x30, 203 | PlayMarkType.START_OF_SABI.value, 204 | ) 205 | ) 206 | absolute_time_track.append( 207 | SaitenRefEvent( 208 | midi_time_converter.ticks_to_ms(hook_end), 209 | SaitenRefEventType.PLAY_MARK, 210 | 0x30, 211 | PlayMarkType.END_OF_SABI.value, 212 | ) 213 | ) 214 | 215 | if len(hooks) > 0: 216 | last_hook_start, last_hook_end = hooks[-1] 217 | absolute_time_track.append( 218 | SaitenRefEvent( 219 | midi_time_converter.ticks_to_ms(last_hook_start), 220 | SaitenRefEventType.PLAY_MARK, 221 | 0x30, 222 | PlayMarkType.START_OF_CLIMAX.value, 223 | ) 224 | ) 225 | absolute_time_track.append( 226 | SaitenRefEvent( 227 | midi_time_converter.ticks_to_ms(last_hook_end), 228 | SaitenRefEventType.PLAY_MARK, 229 | 0x30, 230 | PlayMarkType.END_OF_CLIMAX.value, 231 | ) 232 | ) 233 | 234 | if two_chorus_fadeout_time != -1: 235 | absolute_time_track.append( 236 | SaitenRefEvent( 237 | midi_time_converter.ticks_to_ms(two_chorus_fadeout_time), 238 | SaitenRefEventType.PLAY_MARK, 239 | 0x30, 240 | PlayMarkType.SECOND_CHORUS_FADEOUT.value, 241 | ) 242 | ) 243 | 244 | absolute_time_track.sort(key=lambda absolute_time_event: absolute_time_event.time) 245 | 246 | return absolute_time_track 247 | 248 | def saiten_ref_to_midi(saiten_ref_events: list[SaitenRefEvent], ticks_per_beat: int = 480) -> mido.MidiFile: 249 | midi = mido.MidiFile(ticks_per_beat=ticks_per_beat) 250 | melody_track = mido.MidiTrack() 251 | melody_track.append(mido.MetaMessage("midi_port", port=1)) 252 | melody_track.append(mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(125.0))) 253 | m_track = mido.MidiTrack() 254 | m_track.append(mido.MetaMessage("midi_port", port=16)) 255 | m_track.append(mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(125.0))) 256 | midi.tracks.append(melody_track) 257 | midi.tracks.append(m_track) 258 | 259 | midi_time_converter = MidiTimeConverter() 260 | midi_time_converter.ticks_per_beat = ticks_per_beat 261 | midi_time_converter.add_tempo_change(0, 125.0) 262 | 263 | melody_events = [] 264 | m_events = [] 265 | 266 | for event in saiten_ref_events: 267 | ticks = int(midi_time_converter.ms_to_ticks(event.time)) 268 | 269 | if event.event_type == SaitenRefEventType.NOTE_ON: 270 | melody_events.append((ticks, mido.Message("note_on", note=event.note_number, velocity=event.value, time=0, channel=8))) 271 | elif event.event_type == SaitenRefEventType.NOTE_OFF: 272 | melody_events.append((ticks, mido.Message("note_off", note=event.note_number, velocity=event.value, time=0, channel=8))) 273 | elif event.event_type == SaitenRefEventType.PLAY_MARK: 274 | note = None 275 | if event.value == PlayMarkType.START_OF_SABI.value: 276 | note = 48 277 | m_events.append((ticks, mido.Message("note_on", note=note, velocity=100, time=0, channel=0))) 278 | elif event.value == PlayMarkType.END_OF_SABI.value: 279 | note = 48 280 | m_events.append((ticks, mido.Message("note_off", note=note, velocity=100, time=0, channel=0))) 281 | elif event.value == PlayMarkType.SECOND_CHORUS_FADEOUT.value: 282 | note = 72 283 | m_events.append((ticks, mido.Message("note_on", note=note, velocity=100, time=0, channel=0))) 284 | m_events.append((ticks + 10, mido.Message("note_off", note=note, velocity=100, time=0, channel=0))) 285 | 286 | def insert_track_messages(track, sorted_events): 287 | """Sorts events and calculates delta""" 288 | sorted_events.sort(key=lambda x: x[0]) 289 | last_tick = 0 290 | for tick, msg in sorted_events: 291 | msg.time = tick - last_tick 292 | track.append(msg) 293 | last_tick = tick 294 | 295 | insert_track_messages(melody_track, melody_events) 296 | insert_track_messages(m_track, m_events) 297 | 298 | return midi -------------------------------------------------------------------------------- /okd/p_track_conversion.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, asdict 2 | from logging import getLogger 3 | import math 4 | import mido 5 | 6 | from .chunks import ( 7 | MTrackInterpretation, 8 | PTrackInfoChunk, 9 | ExtendedPTrackInfoChunk, 10 | P3TrackInfoChunk, 11 | PTrackEvent, 12 | PTrackAbsoluteTimeEvent, 13 | PTrackChunk, 14 | ) 15 | from midi.event import MidiEvent 16 | from midi.time_converter import MidiTimeConverter 17 | from midi.utils import get_track_port 18 | from .mmt_tg import MultiPartEntry, MmtTg 19 | 20 | __logger = getLogger(__name__) 21 | 22 | 23 | @dataclass 24 | class PTrackAbsoluteTimeMetaEvent(MidiEvent): 25 | """P-Track Absolute Time Meta Event""" 26 | 27 | track: int 28 | time: int 29 | 30 | 31 | def __p_tracks_to_absolute_time_track( 32 | track_info: PTrackInfoChunk | ExtendedPTrackInfoChunk | P3TrackInfoChunk, 33 | tracks: list[PTrackChunk], 34 | ) -> list[PTrackAbsoluteTimeEvent]: 35 | absolute_time_track: list[PTrackAbsoluteTimeEvent] = [] 36 | for track in tracks: 37 | absolute_time_track += track.absolute_time_track(track_info) 38 | 39 | absolute_time_track.sort(key=lambda absolute_time_event: absolute_time_event.time) 40 | return absolute_time_track 41 | 42 | 43 | def p_track_to_midi( 44 | m_track_interpretation: MTrackInterpretation, 45 | track_info: PTrackInfoChunk | ExtendedPTrackInfoChunk | P3TrackInfoChunk, 46 | tracks: list[PTrackChunk], 47 | sysex_to_text: bool, 48 | ) -> mido.MidiFile: 49 | midi_time_converter = MidiTimeConverter() 50 | for time, tempo in m_track_interpretation.tempos: 51 | midi_time_converter.add_tempo_change(time, tempo) 52 | 53 | midi_device_1 = MmtTg() 54 | midi_device_2 = MmtTg() 55 | 56 | midi = mido.MidiFile() 57 | for port in range(PTrackChunk.PORTS): 58 | for channel in range(PTrackChunk.CHANNELS_PER_PORT): 59 | midi_track = mido.MidiTrack() 60 | 61 | # Set port 62 | midi_track.append( 63 | mido.MetaMessage( 64 | "midi_port", 65 | port=port, 66 | ) 67 | ) 68 | # Track setup messages 69 | midi_device = midi_device_1 if port < 2 else midi_device_2 70 | muti_part_entry_index = port // 2 * MmtTg.PARTS_PER_PORT + channel 71 | multi_part_entry = midi_device.multi_part_entry(muti_part_entry_index) 72 | part_number = port * MmtTg.PARTS_PER_PORT + channel 73 | track_setup_messages = MultiPartEntry.to_mido_messages( 74 | multi_part_entry, 75 | part_number % PTrackChunk.CHANNELS_PER_PORT, 76 | 0, 77 | ) 78 | midi_track += track_setup_messages 79 | 80 | midi.tracks.append(midi_track) 81 | 82 | absolute_time_track: list[PTrackAbsoluteTimeEvent | PTrackAbsoluteTimeMetaEvent] = ( 83 | [] 84 | ) 85 | absolute_time_track += __p_tracks_to_absolute_time_track(track_info, tracks) 86 | if len(absolute_time_track) < 1: 87 | raise ValueError("Track empty.") 88 | 89 | for time, tempo in m_track_interpretation.tempos: 90 | absolute_time_track.append( 91 | PTrackAbsoluteTimeMetaEvent( 92 | 0x51, b"\x03" + round(mido.bpm2tempo(tempo)).to_bytes(3, "big"), 0, time 93 | ) 94 | ) 95 | for time, numerator, denominator in m_track_interpretation.time_signatures: 96 | absolute_time_track.append( 97 | PTrackAbsoluteTimeMetaEvent( 98 | 0x58, 99 | bytes([0x04, numerator, int(math.log2(denominator)), 24, 8]), 100 | 0, 101 | time, 102 | ), 103 | ) 104 | absolute_time_track.sort(key=lambda absolute_time_event: absolute_time_event.time) 105 | 106 | track_times = [0] * PTrackChunk.TOTAL_CHANNELS 107 | for event in absolute_time_track: 108 | status_type = event.status_byte & 0xF0 109 | 110 | tick = midi_time_converter.ms_to_ticks(event.time) 111 | 112 | delta_time = tick - track_times[event.track] 113 | track_times[event.track] = tick 114 | 115 | if isinstance(event, PTrackAbsoluteTimeMetaEvent): 116 | meta_message = mido.MetaMessage.from_bytes( 117 | b"\xff" + event.status_byte.to_bytes() + event.data_bytes 118 | ) 119 | meta_message.time = delta_time 120 | midi.tracks[event.track].append(meta_message) 121 | continue 122 | 123 | if status_type == 0xF0: 124 | if event.status_byte != 0xF0: 125 | midi.tracks[event.track].append( 126 | mido.MetaMessage( 127 | type="text", 128 | text=event.to_bytes().hex(" ").upper(), 129 | time=delta_time, 130 | ) 131 | ) 132 | continue 133 | 134 | # Convert SysEx event to General MIDI message 135 | midi_device = midi_device_1 if event.port < 2 else midi_device_2 136 | part_number = MmtTg.effecting_multi_part_number(event) 137 | if part_number is not None: 138 | before_sysex = midi_device.multi_part_entry(part_number) 139 | midi_device.receive_sysex_message(event) 140 | after_sysex = midi_device.multi_part_entry(part_number) 141 | multi_part_diff = dict( 142 | asdict(after_sysex).items() - asdict(before_sysex).items() 143 | ) 144 | track_number = event.port * MmtTg.PARTS_PER_PORT + part_number 145 | midi.tracks[track_number] += MultiPartEntry.to_mido_messages( 146 | multi_part_diff, 147 | part_number % PTrackChunk.CHANNELS_PER_PORT, 148 | delta_time, 149 | ) 150 | 151 | if sysex_to_text: 152 | midi.tracks[event.track].append( 153 | mido.MetaMessage( 154 | type="text", 155 | text=event.to_bytes().hex(" ").upper(), 156 | time=delta_time, 157 | ) 158 | ) 159 | continue 160 | 161 | try: 162 | mido.messages.specs.SPEC_BY_STATUS[event.status_byte] 163 | except KeyError: 164 | __logger.warning( 165 | f"Unknown MIDI message detected. status_byte={hex(event.status_byte)}" 166 | ) 167 | pass 168 | 169 | midi_message: mido.Message 170 | try: 171 | midi_message = mido.Message.from_bytes(event.to_bytes(), delta_time) 172 | except ValueError: 173 | __logger.warning( 174 | f"Invalid MIDI event data. message=`{event.to_bytes().hex(" ").upper()}`" 175 | ) 176 | continue 177 | midi.tracks[event.track].append(midi_message) 178 | 179 | return midi 180 | 181 | 182 | def __midi_to_absolute_time_tracks( 183 | midi: mido.MidiFile, 184 | ) -> list[list[PTrackAbsoluteTimeEvent]]: 185 | midi_time_converter = MidiTimeConverter() 186 | midi_time_converter.load_from_midi(midi) 187 | 188 | absolute_time_tracks: list[list[PTrackAbsoluteTimeEvent]] = [[]] * PTrackChunk.PORTS 189 | for i, midi_track in enumerate(midi.tracks): 190 | port = get_track_port(midi_track) 191 | if port is None: 192 | __logger.warning(f"Port undefined. track={i}") 193 | continue 194 | 195 | track_time = 0 196 | for midi_message in midi_track: 197 | midi_message_data = bytes(midi_message.bin()) 198 | status_byte = midi_message_data[0] 199 | status_type = status_byte & 0xF0 200 | data_bytes = midi_message_data[1:] 201 | 202 | track_time += midi_message.time 203 | absolute_time = midi_time_converter.ticks_to_ms(track_time) 204 | 205 | if status_type == 0xF0: 206 | # System messages 207 | track = port * PTrackChunk.CHANNELS_PER_PORT 208 | absolute_time_tracks[port].append( 209 | PTrackAbsoluteTimeEvent( 210 | status_byte, 211 | data_bytes, 212 | port, 213 | track, 214 | absolute_time, 215 | ) 216 | ) 217 | else: 218 | # Channel voice messages 219 | channel = status_byte & 0x0F 220 | track = (port * PTrackChunk.CHANNELS_PER_PORT) + channel 221 | absolute_time_tracks[port].append( 222 | PTrackAbsoluteTimeEvent( 223 | status_byte, 224 | data_bytes, 225 | port, 226 | track, 227 | absolute_time, 228 | ) 229 | ) 230 | 231 | for absolute_time_track in absolute_time_tracks: 232 | absolute_time_track.sort( 233 | key=lambda absolute_time_event: absolute_time_event.time 234 | ) 235 | 236 | return absolute_time_tracks 237 | 238 | 239 | def __absolute_time_track_to_p_track( 240 | absolute_time_track: list[PTrackAbsoluteTimeEvent], 241 | ) -> list[PTrackEvent]: 242 | events: list[PTrackEvent] = [] 243 | current_time = 0 244 | for event_index, event in enumerate(absolute_time_track): 245 | status_type = event.status_byte & 0xF0 246 | delta_time = event.time - current_time 247 | 248 | if status_type == 0x80: 249 | # Do nothing 250 | continue 251 | elif status_type == 0x90: 252 | channel = event.status_byte & 0x0F 253 | note_number = event.data_bytes[0] 254 | note_off_time = event.time 255 | for i in range(event_index, len(absolute_time_track)): 256 | note_off_event = absolute_time_track[i] 257 | note_off_event_status_type = note_off_event.status_byte & 0xF0 258 | note_off_event_channel = note_off_event.status_byte & 0x0F 259 | if ( 260 | note_off_event_status_type == 0x80 261 | and note_off_event_channel == channel 262 | ): 263 | note_off_event_note_number = note_off_event.data_bytes[0] 264 | if note_off_event_note_number == note_number: 265 | note_off_time = note_off_event.time 266 | break 267 | duration = (note_off_time - event.time) >> 2 268 | events.append( 269 | PTrackEvent( 270 | event.status_byte, 271 | event.data_bytes, 272 | delta_time, 273 | duration, 274 | ) 275 | ) 276 | elif status_type == 0xA0 or status_type == 0xC0: 277 | data_bytes = event.status_byte.to_bytes() + event.data_bytes 278 | events.append( 279 | PTrackEvent( 280 | 0xFE, 281 | data_bytes, 282 | delta_time, 283 | ) 284 | ) 285 | elif status_type == 0xF0: 286 | if event.status_byte != 0xF0: 287 | continue 288 | 289 | events.append( 290 | PTrackEvent( 291 | 0xF0, 292 | event.data_bytes, 293 | delta_time, 294 | ) 295 | ) 296 | else: 297 | events.append( 298 | PTrackEvent( 299 | event.status_byte, 300 | event.data_bytes, 301 | delta_time, 302 | ) 303 | ) 304 | 305 | current_time = event.time 306 | 307 | # End of Track 308 | events.append(PTrackEvent(0x00, b"\x00\x00\x00", 0)) 309 | 310 | return events 311 | 312 | 313 | def midi_to_p_tracks(midi: mido.MidiFile) -> list[PTrackChunk]: 314 | absolute_time_tracks = __midi_to_absolute_time_tracks(midi) 315 | p_tracks: list[PTrackChunk] = [] 316 | track_count = 0 317 | for i in range(PTrackChunk.PORTS): 318 | if absolute_time_tracks[i] is None: 319 | continue 320 | 321 | track_number = track_count + 1 if track_count >= 2 else track_count 322 | p_tracks.append( 323 | PTrackChunk( 324 | b"\xffPR" + track_number.to_bytes(), 325 | __absolute_time_track_to_p_track(absolute_time_tracks[i]), 326 | ) 327 | ) 328 | track_count += 1 329 | return p_tracks 330 | 331 | 332 | def midi_to_p3_track(midi: mido.MidiFile) -> PTrackChunk: 333 | absolute_time_tracks = __midi_to_absolute_time_tracks(midi) 334 | absolute_time_track = absolute_time_tracks[2] 335 | if absolute_time_tracks is None: 336 | raise ValueError("P-Track 2 not found.") 337 | absolute_time_track = [ 338 | event 339 | for event in absolute_time_track 340 | # Note Off and Note On 341 | if event.status_byte_type() in [0x80, 0x90] 342 | ] 343 | return PTrackChunk( 344 | b"\xffPR\x02", 345 | __absolute_time_track_to_p_track(absolute_time_track), 346 | ) 347 | -------------------------------------------------------------------------------- /okd/mmt_tg/mmt_tg.py: -------------------------------------------------------------------------------- 1 | from logging import getLogger 2 | 3 | from midi.event import MidiEvent 4 | 5 | from .midi_parameter_change_table import System, MultiPartEntry 6 | 7 | 8 | class MmtTg: 9 | """YAMAHA MMT TG MIDI Device""" 10 | 11 | PARTS_PER_PORT = 16 12 | PORTS = 2 13 | PARTS = PARTS_PER_PORT * PORTS 14 | 15 | sound_module_mode: int 16 | native_parameter_memory: list[int] 17 | 18 | @staticmethod 19 | def __is_sysex_message(event: MidiEvent) -> bool: 20 | if len(event.data_bytes) < 2: 21 | return False 22 | if event.status_byte != 0xF0: 23 | return False 24 | end_mark = event.data_bytes[-1] 25 | if end_mark != 0xF7: 26 | return False 27 | return True 28 | 29 | @staticmethod 30 | def __is_universal_realtime_message(event: MidiEvent) -> bool: 31 | if not MmtTg.__is_sysex_message(event): 32 | return False 33 | if len(event.data_bytes) < 7: 34 | return False 35 | manufacture_id = event.data_bytes[0] 36 | if manufacture_id != 0x7F: 37 | return False 38 | return True 39 | 40 | @staticmethod 41 | def __is_universal_non_realtime_message(event: MidiEvent) -> bool: 42 | if not MmtTg.__is_sysex_message(event): 43 | return False 44 | if len(event.data_bytes) < 5: 45 | return False 46 | manufacture_id = event.data_bytes[0] 47 | if manufacture_id != 0x7E: 48 | return False 49 | return True 50 | 51 | @staticmethod 52 | def __is_native_parameter_change_message(event: MidiEvent) -> bool: 53 | if not MmtTg.__is_sysex_message(event): 54 | return False 55 | if len(event.data_bytes) < 9: 56 | return False 57 | manufacture_id = event.data_bytes[0] 58 | if manufacture_id != 0x43: 59 | return False 60 | return True 61 | 62 | @staticmethod 63 | def effecting_multi_part_number(event: MidiEvent): 64 | if not MmtTg.__is_native_parameter_change_message(event): 65 | return 66 | if event.data_bytes[3] != 0x02: 67 | return 68 | return MultiPartEntry.ENTRY_INDEX_TO_PART_NUMBER_TABLE[event.data_bytes[4]] 69 | 70 | def __init__(self) -> None: 71 | self.__logger = getLogger(__name__) 72 | 73 | self.initialize_state() 74 | 75 | def initialize_state(self) -> None: 76 | self.sound_module_mode = 0x00 77 | self.native_parameter_memory = [0x00] * 0x200000 78 | 79 | # Set default value 80 | for entry_index in range(0x20): 81 | entry_address = 0x008000 + (entry_index << 7) 82 | 83 | self.native_parameter_memory[entry_address + 0x01] = 0x00 84 | self.native_parameter_memory[entry_address + 0x02] = 0x00 85 | self.native_parameter_memory[entry_address + 0x03] = 0x00 86 | self.native_parameter_memory[entry_address + 0x04] = entry_index 87 | self.native_parameter_memory[entry_address + 0x05] = 0x01 88 | self.native_parameter_memory[entry_address + 0x06] = 0x01 89 | self.native_parameter_memory[entry_address + 0x07] = 0x01 90 | self.native_parameter_memory[entry_address + 0x08] = 0x01 91 | self.native_parameter_memory[entry_address + 0x09] = 0x01 92 | self.native_parameter_memory[entry_address + 0x0A] = 0x01 93 | self.native_parameter_memory[entry_address + 0x0B] = 0x01 94 | self.native_parameter_memory[entry_address + 0x0C] = 0x01 95 | self.native_parameter_memory[entry_address + 0x0D] = 0x01 96 | self.native_parameter_memory[entry_address + 0x0E] = 0x01 97 | self.native_parameter_memory[entry_address + 0x0F] = 0x01 98 | self.native_parameter_memory[entry_address + 0x10] = 0x01 99 | self.native_parameter_memory[entry_address + 0x11] = 0x01 100 | self.native_parameter_memory[entry_address + 0x12] = 0x01 101 | self.native_parameter_memory[entry_address + 0x13] = 0x01 102 | self.native_parameter_memory[entry_address + 0x14] = 0x01 103 | 104 | self.native_parameter_memory[entry_address + 0x15] = 0x01 105 | self.native_parameter_memory[entry_address + 0x16] = 0x01 106 | self.native_parameter_memory[entry_address + 0x17] = 0x01 107 | self.native_parameter_memory[entry_address + 0x18] = 0x01 108 | self.native_parameter_memory[entry_address + 0x19] = 0x08 109 | self.native_parameter_memory[entry_address + 0x1A] = 0x00 110 | self.native_parameter_memory[entry_address + 0x1B] = 0x64 111 | self.native_parameter_memory[entry_address + 0x1C] = 0x40 112 | self.native_parameter_memory[entry_address + 0x1D] = 0x40 113 | self.native_parameter_memory[entry_address + 0x1E] = 0x40 114 | self.native_parameter_memory[entry_address + 0x1F] = 0x00 115 | self.native_parameter_memory[entry_address + 0x20] = 0x7F 116 | self.native_parameter_memory[entry_address + 0x21] = 0x10 117 | self.native_parameter_memory[entry_address + 0x22] = 0x11 118 | self.native_parameter_memory[entry_address + 0x23] = 0x7F 119 | self.native_parameter_memory[entry_address + 0x24] = 0x00 120 | self.native_parameter_memory[entry_address + 0x25] = 0x40 121 | self.native_parameter_memory[entry_address + 0x26] = 0x00 122 | 123 | self.native_parameter_memory[entry_address + 0x27] = 0x40 124 | self.native_parameter_memory[entry_address + 0x28] = 0x40 125 | self.native_parameter_memory[entry_address + 0x29] = 0x40 126 | self.native_parameter_memory[entry_address + 0x2A] = 0x40 127 | self.native_parameter_memory[entry_address + 0x2B] = 0x40 128 | self.native_parameter_memory[entry_address + 0x2C] = 0x40 129 | self.native_parameter_memory[entry_address + 0x2D] = 0x40 130 | self.native_parameter_memory[entry_address + 0x2E] = 0x40 131 | 132 | self.native_parameter_memory[entry_address + 0x2F] = 0x40 133 | self.native_parameter_memory[entry_address + 0x30] = 0x40 134 | self.native_parameter_memory[entry_address + 0x31] = 0x40 135 | self.native_parameter_memory[entry_address + 0x32] = 0x40 136 | self.native_parameter_memory[entry_address + 0x33] = 0x40 137 | self.native_parameter_memory[entry_address + 0x34] = 0x40 138 | self.native_parameter_memory[entry_address + 0x35] = 0x40 139 | self.native_parameter_memory[entry_address + 0x36] = 0x40 140 | self.native_parameter_memory[entry_address + 0x37] = 0x40 141 | self.native_parameter_memory[entry_address + 0x38] = 0x40 142 | self.native_parameter_memory[entry_address + 0x39] = 0x40 143 | self.native_parameter_memory[entry_address + 0x3A] = 0x40 144 | 145 | self.native_parameter_memory[entry_address + 0x3B] = 0x40 146 | self.native_parameter_memory[entry_address + 0x3C] = 0x40 147 | self.native_parameter_memory[entry_address + 0x3D] = 0x40 148 | self.native_parameter_memory[entry_address + 0x3E] = 0x0A 149 | self.native_parameter_memory[entry_address + 0x3F] = 0x00 150 | 151 | self.native_parameter_memory[entry_address + 0x41] = 0x42 152 | self.native_parameter_memory[entry_address + 0x42] = 0x40 153 | self.native_parameter_memory[entry_address + 0x43] = 0x40 154 | self.native_parameter_memory[entry_address + 0x44] = 0x00 155 | self.native_parameter_memory[entry_address + 0x45] = 0x00 156 | 157 | self.native_parameter_memory[entry_address + 0x47] = 0x40 158 | self.native_parameter_memory[entry_address + 0x48] = 0x40 159 | self.native_parameter_memory[entry_address + 0x49] = 0x40 160 | self.native_parameter_memory[entry_address + 0x4A] = 0x00 161 | self.native_parameter_memory[entry_address + 0x4B] = 0x00 162 | 163 | self.native_parameter_memory[entry_address + 0x4D] = 0x40 164 | self.native_parameter_memory[entry_address + 0x4E] = 0x40 165 | self.native_parameter_memory[entry_address + 0x4F] = 0x40 166 | self.native_parameter_memory[entry_address + 0x50] = 0x00 167 | self.native_parameter_memory[entry_address + 0x51] = 0x00 168 | 169 | self.native_parameter_memory[entry_address + 0x53] = 0x40 170 | self.native_parameter_memory[entry_address + 0x54] = 0x40 171 | self.native_parameter_memory[entry_address + 0x55] = 0x40 172 | self.native_parameter_memory[entry_address + 0x56] = 0x00 173 | self.native_parameter_memory[entry_address + 0x57] = 0x00 174 | 175 | self.native_parameter_memory[entry_address + 0x59] = 0x40 176 | self.native_parameter_memory[entry_address + 0x5A] = 0x40 177 | self.native_parameter_memory[entry_address + 0x5B] = 0x40 178 | self.native_parameter_memory[entry_address + 0x5C] = 0x00 179 | self.native_parameter_memory[entry_address + 0x5D] = 0x00 180 | 181 | self.native_parameter_memory[entry_address + 0x5F] = 0x00 182 | self.native_parameter_memory[entry_address + 0x60] = 0x00 183 | 184 | def __receive_universal_realtime_message(self, event: MidiEvent) -> None: 185 | if event.status_byte != 0xF0: 186 | raise ValueError( 187 | f"Invalid status_byte. status_byte={hex(event.status_byte)}" 188 | ) 189 | manufacture_id = event.data_bytes[0] 190 | if manufacture_id != 0x7F: 191 | raise ValueError( 192 | f"Invalid manufacture_id. manufacture_id={hex(manufacture_id)}" 193 | ) 194 | target_device_id = event.data_bytes[1] 195 | sub_id_1 = event.data_bytes[2] 196 | if sub_id_1 != 0x04: 197 | self.__logger.warning( 198 | f"Unknown sub_id_1 detected. sub_id_1={hex(sub_id_1)}" 199 | ) 200 | 201 | sub_id_2 = event.data_bytes[3] 202 | if sub_id_2 == 0x01: 203 | # Master Volume 204 | volume_lsb = event.data_bytes[4] 205 | volume_msb = event.data_bytes[5] 206 | # MASTER VOLUME 207 | self.native_parameter_memory[0x000004] = volume_msb 208 | elif sub_id_2 == 0x02: 209 | # Master Balance 210 | balance_lsb = event.data_bytes[4] 211 | balance_msb = event.data_bytes[5] 212 | # MASTER PAN 213 | self.native_parameter_memory[0x000006] = balance_msb 214 | else: 215 | self.__logger.warning( 216 | f"Unknown sub_id_2 detected. sub_id_2={hex(sub_id_2)}" 217 | ) 218 | 219 | def __receive_universal_non_realtime_message(self, event: MidiEvent) -> None: 220 | if event.status_byte != 0xF0: 221 | raise ValueError( 222 | f"Invalid status_byte. status_byte={hex(event.status_byte)}" 223 | ) 224 | manufacture_id = event.data_bytes[0] 225 | if manufacture_id != 0x7E: 226 | raise ValueError( 227 | f"Invalid manufacture_id. manufacture_id={hex(manufacture_id)}" 228 | ) 229 | target_device_id = event.data_bytes[1] 230 | sub_id_1 = event.data_bytes[2] 231 | if sub_id_1 != 0x09: 232 | self.__logger.warning( 233 | f"Unknown sub_id_1 detected. sub_id_1={hex(sub_id_1)}" 234 | ) 235 | 236 | sub_id_2 = event.data_bytes[3] 237 | if sub_id_2 == 0x01: 238 | self.sound_module_mode = event.data_bytes[4] 239 | else: 240 | self.__logger.warning( 241 | f"Unknown sub_id_2 detected. sub_id_2={hex(sub_id_2)}" 242 | ) 243 | 244 | def __receive_native_parameter_change_message(self, event: MidiEvent) -> None: 245 | if event.status_byte != 0xF0: 246 | raise ValueError( 247 | f"Invalid status_byte. status_byte={hex(event.status_byte)}" 248 | ) 249 | manufacture_id = event.data_bytes[0] 250 | if manufacture_id != 0x43: 251 | raise ValueError( 252 | f"Invalid manufacture_id. manufacture_id={hex(manufacture_id)}" 253 | ) 254 | device_number_byte = event.data_bytes[1] 255 | if device_number_byte & 0xF0 != 0x10: 256 | raise ValueError( 257 | f"Invalid device_number_byte detected. device_number_byte={hex(device_number_byte)}" 258 | ) 259 | device_number = device_number_byte & 0x0F 260 | model_id = event.data_bytes[2] 261 | 262 | address = ( 263 | event.data_bytes[3] << 14 | event.data_bytes[4] << 7 | event.data_bytes[5] 264 | ) 265 | data_length = len(event.data_bytes) - 8 266 | data = event.data_bytes[6 : 6 + data_length] 267 | check_sum = event.data_bytes[-2] 268 | 269 | if address == 0x00007F: 270 | # All Parameters Reset 271 | self.initialize_state() 272 | return 273 | self.native_parameter_memory[address : address + data_length] = data 274 | 275 | def receive_sysex_message(self, event: MidiEvent) -> None: 276 | if len(event.data_bytes) < 1: 277 | raise ValueError("Invalid event.data legnth.") 278 | 279 | if event.status_byte != 0xF0: 280 | raise ValueError( 281 | f"Invalid status_byte. status_byte={hex(event.status_byte)}" 282 | ) 283 | end_mark = event.data_bytes[-1] 284 | if end_mark != 0xF7: 285 | raise ValueError(f"Invalid end_mark. end_mark={hex(end_mark)}") 286 | 287 | manufacture_id = event.data_bytes[0] 288 | if manufacture_id == 0x7F: 289 | self.__receive_universal_realtime_message(event) 290 | elif manufacture_id == 0x7E: 291 | self.__receive_universal_non_realtime_message(event) 292 | elif manufacture_id == 0x43: 293 | return self.__receive_native_parameter_change_message(event) 294 | else: 295 | self.__logger.warning( 296 | f"Unknown manufacture_id detected. manufacture_id={hex(manufacture_id)}" 297 | ) 298 | 299 | def system(self) -> System: 300 | return System.from_memory(self.native_parameter_memory) 301 | 302 | def multi_part_entry(self, part_number: int) -> MultiPartEntry: 303 | return MultiPartEntry.from_memory(self.native_parameter_memory, part_number) 304 | 305 | def multi_part_entries(self) -> list[MultiPartEntry]: 306 | return [ 307 | self.multi_part_entry(part_number) for part_number in range(MmtTg.PARTS) 308 | ] 309 | -------------------------------------------------------------------------------- /dam_song_tools_cli/cli.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import fire 3 | import logging 4 | import mido 5 | import numpy as np 6 | import os 7 | import json 8 | import soundfile as sf 9 | from typing import Any 10 | 11 | from okd import ( 12 | YksOkdHeader, 13 | OkdFile, 14 | GenericChunk, 15 | YkyiChunk, 16 | MTrackInterpretation, 17 | MTrackChunk, 18 | PTrackInfoChunk, 19 | ExtendedPTrackInfoChunk, 20 | P3TrackInfoChunk, 21 | PTrackChunk, 22 | AdpcmChunk, 23 | okd_to_midi, 24 | midi_to_okds, 25 | ) 26 | 27 | from mtf import mtf_conversion 28 | 29 | def default(item: Any): 30 | match item: 31 | case bytes(): 32 | return item.hex(" ").upper() 33 | case _ if dataclasses.is_dataclass(item): 34 | return dataclasses.asdict(item) 35 | case _: 36 | raise TypeError(type(item)) 37 | 38 | 39 | class Cli: 40 | """DAM OKD Tools CLI 41 | 42 | Args: 43 | log_level (str, optional): Log level. Defaults to "INFO". {CRITICAL|FATAL|ERROR|WARN|WARNING|INFO|DEBUG|NOTSET} 44 | """ 45 | 46 | @staticmethod 47 | def __config_logger(level: str) -> None: 48 | """Config logger 49 | 50 | Args: 51 | level (str): Log level 52 | """ 53 | 54 | logging.basicConfig( 55 | level=level, 56 | format="[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s", 57 | ) 58 | 59 | def __init__(self, log_level="INFO"): 60 | """DAM OKD Tools CLI 61 | 62 | Args: 63 | log_level (str, optional): Log level. Defaults to "INFO". {CRITICAL|FATAL|ERROR|WARN|WARNING|INFO|DEBUG|NOTSET} 64 | """ 65 | 66 | Cli.__config_logger(log_level) 67 | self.__logger = logging.getLogger(__name__) 68 | 69 | def song_info(self, okd_path, output_json: bool = False) -> None: 70 | """Show song information (YKYI chunk) from a OKD file 71 | 72 | Args: 73 | okd_path (str): Input OKD path 74 | output_json (bool): Output as JSON format. Defaults to False. 75 | 76 | Raises: 77 | ValueError: Argument `okd_path` must be str. 78 | """ 79 | 80 | if not isinstance(okd_path, str): 81 | raise ValueError("Argument `okd_path` must be str.") 82 | 83 | with open(okd_path, "rb") as okd_file: 84 | okd_file = OkdFile.read(okd_file) 85 | 86 | ykyi_chunk = None 87 | for chunk in okd_file.chunks: 88 | if isinstance(chunk, YkyiChunk): 89 | ykyi_chunk = chunk 90 | break 91 | 92 | if ykyi_chunk is None: 93 | print("YKYI chunk not found in the OKD file.") 94 | return 95 | 96 | if output_json: 97 | output = json.dumps( 98 | ykyi_chunk.to_json_serializable(), 99 | indent=2, 100 | ensure_ascii=False, 101 | ) 102 | print(output) 103 | else: 104 | print("=" * 50) 105 | print("楽曲情報 (Song Information)") 106 | print("=" * 50) 107 | for entry in ykyi_chunk.entries: 108 | if entry.key: 109 | print(f"{entry.key}: {entry.value}") 110 | else: 111 | print(f" {entry.value}") 112 | print("=" * 50) 113 | 114 | def dump_okd(self, okd_path, output_dir_path) -> None: 115 | """Dump chunks of a OKD 116 | 117 | Args: 118 | okd_path (str): Input OKD path 119 | output_dir_path (str): Output directory path 120 | 121 | Raises: 122 | ValueError: Argument `okd_path` must be str. 123 | ValueError: Argument `output_directory_path` must be str. 124 | """ 125 | 126 | if not isinstance(okd_path, str): 127 | raise ValueError("Argument `okd_path` must be str.") 128 | if not isinstance(output_dir_path, str): 129 | raise ValueError("Argument `output_directory_path` must be str.") 130 | 131 | os.makedirs(output_dir_path, exist_ok=True) 132 | 133 | with open(okd_path, "rb") as okd_file: 134 | okd_file = OkdFile.read(okd_file) 135 | self.__logger.info(f"OKD loaded. header={okd_file.header}") 136 | 137 | for chunk in okd_file.chunks: 138 | chunk_id_hex = chunk.id.hex().upper() 139 | self.__logger.info( 140 | f"{type(chunk).__name__} found. id={chunk.id} (0x{chunk_id_hex})" 141 | ) 142 | if isinstance(chunk, GenericChunk): 143 | output_path = os.path.join( 144 | output_dir_path, 145 | "chunk_0x" + chunk.id.hex().upper() + ".bin", 146 | ) 147 | with open(output_path, "wb") as output_file: 148 | output_file.write(chunk.id) 149 | output_file.write(chunk.payload) 150 | elif isinstance(chunk, YkyiChunk): 151 | output_path = os.path.join(output_dir_path, "ykyi_info.json") 152 | output_json = json.dumps( 153 | chunk.to_json_serializable(), 154 | indent=2, 155 | ensure_ascii=False, 156 | ) 157 | with open(output_path, "w", encoding="utf-8") as output_file: 158 | output_file.write(output_json) 159 | elif isinstance(chunk, MTrackChunk): 160 | m_track_interpritation = MTrackInterpretation.from_track(chunk) 161 | 162 | track_number = chunk.id[3] 163 | output_path = os.path.join( 164 | output_dir_path, "m_track_" + str(track_number) + ".json" 165 | ) 166 | output_json = json.dumps( 167 | chunk.to_json_serializable(), 168 | indent=2, 169 | ) 170 | with open(output_path, "w") as output_file: 171 | output_file.write(output_json) 172 | 173 | output_path = os.path.join( 174 | output_dir_path, 175 | "m_track_interpretation_" + str(track_number) + ".json", 176 | ) 177 | output_json = json.dumps( 178 | m_track_interpritation, 179 | indent=2, 180 | default=default, 181 | ) 182 | with open(output_path, "w") as output_file: 183 | output_file.write(output_json) 184 | elif isinstance(chunk, PTrackInfoChunk): 185 | output_path = os.path.join(output_dir_path, "p_track_info.json") 186 | output_json = json.dumps( 187 | chunk, 188 | indent=2, 189 | default=default, 190 | ) 191 | with open(output_path, "w") as output_file: 192 | output_file.write(output_json) 193 | elif isinstance(chunk, ExtendedPTrackInfoChunk): 194 | output_path = os.path.join( 195 | output_dir_path, "extended_p_track_info.json" 196 | ) 197 | output_json = json.dumps( 198 | chunk, 199 | indent=2, 200 | default=default, 201 | ) 202 | with open(output_path, "w") as output_file: 203 | output_file.write(output_json) 204 | elif isinstance(chunk, P3TrackInfoChunk): 205 | output_path = os.path.join(output_dir_path, "p3_track_info.json") 206 | output_json = json.dumps( 207 | chunk, 208 | indent=2, 209 | default=default, 210 | ) 211 | with open(output_path, "w") as output_file: 212 | output_file.write(output_json) 213 | elif isinstance(chunk, PTrackChunk): 214 | track_number = chunk.id[3] 215 | output_path = os.path.join( 216 | output_dir_path, "p_track_" + str(track_number) + ".json" 217 | ) 218 | output_json = json.dumps( 219 | chunk.to_json_serializable(), 220 | indent=2, 221 | ) 222 | with open(output_path, "w") as output_file: 223 | output_file.write(output_json) 224 | elif isinstance(chunk, AdpcmChunk): 225 | for i, track in enumerate(chunk.tracks): 226 | output_path = os.path.join(output_dir_path, f"adpcm_{i}.wav") 227 | samples = track.decode() 228 | samples = np.array(samples, "int16") 229 | sf.write(output_path, samples, 22050) 230 | else: 231 | self.__logger.error("Unknown chunk type detected.") 232 | 233 | def pack_okd(self, okd_path, *chunk_paths, scramble=False): 234 | """Pack a OKD by directly inputting the required data in chunks 235 | 236 | Args: 237 | okd_path (str): Output OKD path 238 | chunk_paths (*str): Input chunk paths 239 | scramble (bool, optional): Scramble. Defaults to False. 240 | 241 | Raises: 242 | ValueError: Argument `okd_path` must be str. 243 | ValueError: Argument `chunk_paths` must be *str. 244 | ValueError: Argument `scramble` must be bool. 245 | """ 246 | 247 | if not isinstance(okd_path, str): 248 | raise ValueError("Argument `output` must be str.") 249 | if not isinstance(scramble, bool): 250 | raise ValueError("Argument `scramble` must be bool.") 251 | 252 | for chunk_path in chunk_paths: 253 | if not isinstance(chunk_path, str): 254 | raise ValueError("Argument `input` must be *str.") 255 | 256 | chunks: list[GenericChunk] = [] 257 | with open(chunk_path, "rb") as input_file: 258 | chunk = GenericChunk.read(input_file) 259 | chunk_id_hex = chunk.id.hex().upper() 260 | self.__logger.info(f"Add chunk {id} (0x{chunk_id_hex}).") 261 | chunks.append(chunk) 262 | 263 | header = YksOkdHeader(0, "YKS-1 v6.0v110", 0, 0, 0) 264 | self.__logger.info(f"Set header. header={header}") 265 | okd = OkdFile(header, chunks) 266 | with open(okd_path, "wb") as output_file: 267 | okd.write(output_file, scramble) 268 | 269 | def okd_to_midi(self, okd_path, midi_path, sysex_to_text=True) -> None: 270 | """Convert a OKD to a Standard MIDI File 271 | 272 | Args: 273 | okd_path (str): Input OKD path 274 | midi_path (str): Output MIDI path 275 | sysex_to_text (bool): Convert SysEx Messages to Text Meta Messages 276 | 277 | Raises: 278 | ValueError: Argument `okd_path` must be str. 279 | ValueError: Argument `midi_path` must be str. 280 | """ 281 | 282 | if not isinstance(okd_path, str): 283 | raise ValueError("Argument `okd_path` must be str.") 284 | if not isinstance(midi_path, str): 285 | raise ValueError("Argument `midi_path` must be str.") 286 | if not isinstance(sysex_to_text, bool): 287 | raise ValueError("Argument `sysex_to_text` must be bool.") 288 | 289 | with open(okd_path, "rb") as okd_file: 290 | okd = OkdFile.read(okd_file) 291 | midi = okd_to_midi(okd, sysex_to_text) 292 | midi.save(midi_path) 293 | 294 | def midi_to_okd( 295 | self, midi_path: str, playing_okd_path: str, p3_okd_path: str, scramble=False 296 | ) -> None: 297 | """Convert a Standard MIDI File to a OKD 298 | 299 | Args: 300 | midi_path (str): Input MIDI file path 301 | playing_okd_path (str): Output Playing OKD path 302 | p3_okd_path (str): Output P3 OKD path 303 | scramble (bool, optional): Scramble. Defaults to False. 304 | 305 | Raises: 306 | ValueError: Argument `midi_path` must be str. 307 | ValueError: Argument `playing_okd_path` must be str. 308 | ValueError: Argument `p3_okd_path` must be str. 309 | ValueError: Argument `scramble` must be bool. 310 | """ 311 | 312 | if not isinstance(midi_path, str): 313 | raise ValueError("Argument `midi_path` must be str.") 314 | if not isinstance(playing_okd_path, str): 315 | raise ValueError("Argument `playing_okd_path` must be str.") 316 | if not isinstance(p3_okd_path, str): 317 | raise ValueError("Argument `p3_okd_path` must be str.") 318 | if not isinstance(scramble, bool): 319 | raise ValueError("Argument `scramble` must be bool.") 320 | 321 | midi = mido.MidiFile(midi_path) 322 | header = YksOkdHeader(0, "YKS-1 v6.0v110", 0, 0, 0) 323 | self.__logger.info(f"Set header. header={header}") 324 | playing_okd, p3_okd = midi_to_okds(midi, header) 325 | with open(playing_okd_path, "wb") as playing_okd_file: 326 | self.__logger.info("Write Playing OKD.") 327 | playing_okd.write(playing_okd_file, scramble) 328 | with open(p3_okd_path, "wb") as p3_okd_file: 329 | self.__logger.info("Write P3 OKD.") 330 | p3_okd.write(p3_okd_file, scramble) 331 | 332 | def dump_mtf(self, mtf_path: str, output_path: str): 333 | """Dump files contained in a MTF file 334 | 335 | Args: 336 | mtf_path (str): Path to the MTF file 337 | output_path (str): Path to extract the archive into 338 | """ 339 | mtf_conversion.extract_mtf(mtf_path, output_path) 340 | 341 | def mtf_to_audio(self, mtf_path: str, output_path: str, export_each_file: bool = False): 342 | """Mix MTF file into "output.wav", "output.mid" files in extracted mtf folder. 343 | 344 | Args: 345 | mtf_path (str): Path to the MTF file 346 | output_path (str): Path to extract the archive into, output will be saved inside 347 | export_each_file (bool): Whether to export each individual audio file (RawADPCM → .wav, OPUS → .ogg, etc...) 348 | """ 349 | mtf_root_path = mtf_conversion.extract_mtf(mtf_path, output_path) 350 | mtf_conversion.dump_playlist(mtf_root_path, export_each_file) 351 | mtf_conversion.dump_refs(mtf_root_path, export_each_file) 352 | 353 | 354 | def main() -> None: 355 | fire.Fire(Cli) 356 | 357 | 358 | if __name__ == "__main__": 359 | main() 360 | -------------------------------------------------------------------------------- /okd/chunks/p_track_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from io import BytesIO 3 | import os 4 | from typing import BinaryIO, Self 5 | 6 | from midi.event import MidiEvent, MidiTrackEvent 7 | from ..okd_midi import ( 8 | read_status_byte, 9 | is_data_bytes, 10 | read_variable_int, 11 | read_extended_variable_int, 12 | write_variable_int, 13 | write_extended_variable_int, 14 | ) 15 | 16 | from .chunk_base import ChunkBase 17 | from .generic_chunk import GenericChunk 18 | from .p_track_info_chunk import PTrackInfoEntry, PTrackInfoChunk 19 | from .extended_p_track_info_chunk import ( 20 | ExtendedPTrackInfoEntry, 21 | ExtendedPTrackInfoChunk, 22 | ) 23 | from .p3_track_info_chunk import P3TrackInfoChunk 24 | 25 | 26 | @dataclass 27 | class PTrackEvent(MidiTrackEvent): 28 | """P-Track Event""" 29 | 30 | __END_OF_TRACK_MARK = b"\x00\x00\x00\x00" 31 | 32 | duration: int | None = None 33 | 34 | @staticmethod 35 | def read_sysex_data_bytes(stream: BinaryIO) -> bytes: 36 | """Read Data Bytes of SysEx Message 37 | 38 | Args: 39 | stream (BinaryIO): Input stream 40 | 41 | Raises: 42 | ValueError: Unterminated SysEx message detected 43 | 44 | Returns: 45 | bytes: Data Bytes 46 | """ 47 | data_bytes = b"" 48 | while True: 49 | byte = stream.read(1) 50 | if len(byte) < 1: 51 | raise ValueError("Too less read bytes.") 52 | data_bytes += byte 53 | byte = byte[0] 54 | if byte & 0x80 == 0x80: 55 | if byte != 0xF7: 56 | raise ValueError( 57 | f"Unterminated SysEx message detected. stop_byte={hex(byte)}" 58 | ) 59 | break 60 | return data_bytes 61 | 62 | @classmethod 63 | def read(cls, stream: BinaryIO) -> Self | None: 64 | delta_time = read_extended_variable_int(stream) 65 | 66 | end_of_track = stream.read(4) 67 | if end_of_track == PTrackEvent.__END_OF_TRACK_MARK: 68 | return None 69 | stream.seek(-4, os.SEEK_CUR) 70 | 71 | status_byte = read_status_byte(stream) 72 | status_type = status_byte & 0xF0 73 | 74 | # Channel voice messages 75 | if status_type == 0x80: 76 | # Note off 77 | data_bytes_length = 3 78 | elif status_type == 0x90: 79 | # Note on 80 | data_bytes_length = 2 81 | elif status_type == 0xA0: 82 | # Alternative CC AX 83 | data_bytes_length = 1 84 | elif status_type == 0xB0: 85 | # Control change 86 | data_bytes_length = 2 87 | elif status_type == 0xC0: 88 | # Alternative CC CX 89 | data_bytes_length = 1 90 | elif status_type == 0xD0: 91 | # Channel pressure 92 | data_bytes_length = 1 93 | elif status_type == 0xE0: 94 | # Pitch bend 95 | data_bytes_length = 2 96 | # System messages 97 | elif status_byte == 0xF0: 98 | # SysEx message 99 | data_bytes = PTrackEvent.read_sysex_data_bytes(stream) 100 | return cls(status_byte, data_bytes, delta_time) 101 | elif status_byte == 0xF8: 102 | # ADPCM note on 103 | data_bytes_length = 3 104 | elif status_byte == 0xF9: 105 | # Unknown 106 | data_bytes_length = 1 107 | elif status_byte == 0xFA: 108 | # ADPCM channel volume 109 | data_bytes_length = 1 110 | elif status_byte == 0xFD: 111 | # Enable channel grouping 112 | data_bytes_length = 0 113 | elif status_byte == 0xFE: 114 | # Compensation of Alternative CC 115 | byte = stream.read(1) 116 | if len(byte) < 1: 117 | raise ValueError("Too less read bytes.") 118 | stream.seek(-1, os.SEEK_CUR) 119 | byte = byte[0] 120 | if byte & 0xF0 == 0xA0: 121 | # Polyphonic key pressure 122 | data_bytes_length = 3 123 | elif byte & 0xF0 == 0xC0: 124 | # Program change 125 | data_bytes_length = 2 126 | else: 127 | raise ValueError( 128 | f"Unknown Compensation of Alternative CC detected. data_bytes[0]={format(byte, "02X")}" 129 | ) 130 | else: 131 | raise ValueError( 132 | f"Unknown Status byte detected. status_byte={format(status_byte, "02X")}" 133 | ) 134 | 135 | data_bytes: bytes = stream.read(data_bytes_length) 136 | data_bytes_validate = data_bytes[1:] if status_byte == 0xFE else data_bytes 137 | if not is_data_bytes(data_bytes_validate): 138 | raise ValueError( 139 | f"Invalid Data Byte detected. data_bytes=`{data_bytes.hex(" ").upper()}`" 140 | ) 141 | 142 | duration = None 143 | if status_type == 0x80 or status_type == 0x90: 144 | duration = read_variable_int(stream) 145 | 146 | return cls(status_byte, data_bytes, delta_time, duration) 147 | 148 | def write(self, stream: BinaryIO) -> None: 149 | """Write 150 | 151 | Args: 152 | stream (BinaryIO): Output stream 153 | """ 154 | write_extended_variable_int(stream, self.delta_time) 155 | stream.write(self.status_byte.to_bytes()) 156 | stream.write(self.data_bytes) 157 | if self.duration is not None: 158 | write_variable_int(stream, self.duration) 159 | 160 | 161 | @dataclass 162 | class PTrackAbsoluteTimeEvent(MidiEvent): 163 | """P-Track Absolute Time Event""" 164 | 165 | port: int 166 | track: int 167 | time: int 168 | 169 | 170 | @dataclass 171 | class PTrackChunk(ChunkBase): 172 | """P-Track Chunk""" 173 | 174 | PORTS = 4 175 | CHANNELS_PER_PORT = 16 176 | TOTAL_CHANNELS = CHANNELS_PER_PORT * PORTS 177 | 178 | CHUNK_NUMBER_PORT_MAP = [0, 1, 2, 2, 3] 179 | 180 | events: list[PTrackEvent] 181 | 182 | @classmethod 183 | def from_generic(cls, generic: GenericChunk) -> Self: 184 | """From Generic Chunk 185 | 186 | Args: 187 | generic (GenericChunk): Generic Chunk 188 | 189 | Returns: 190 | Self: Instance of this class 191 | """ 192 | stream = BytesIO(generic.payload) 193 | events: list[PTrackEvent] = [] 194 | while True: 195 | message = PTrackEvent.read(stream) 196 | if message is None: 197 | # End of Track 198 | break 199 | events.append(message) 200 | return cls(generic.id, events) 201 | 202 | @staticmethod 203 | def __relocate_event( 204 | track_info_entry: PTrackInfoEntry | ExtendedPTrackInfoEntry | P3TrackInfoChunk, 205 | status_byte: int, 206 | data_bytes: bytes, 207 | time: int, 208 | group_channel: bool, 209 | ) -> list[PTrackAbsoluteTimeEvent]: 210 | status_type = status_byte & 0xF0 211 | 212 | if status_byte == 0xFE: 213 | # Compensation of Alternative CC 214 | status_byte = data_bytes[0] 215 | status_type = status_byte & 0xF0 216 | data_bytes = data_bytes[1:] 217 | 218 | relocated_events: list[PTrackAbsoluteTimeEvent] = [] 219 | 220 | if status_type == 0xF0: 221 | # System messages 222 | for port in range(PTrackChunk.PORTS): 223 | if (track_info_entry.system_ex_ports >> port) & 0x0001 != 0x0001: 224 | continue 225 | 226 | track = port * PTrackChunk.CHANNELS_PER_PORT 227 | relocated_events.append( 228 | PTrackAbsoluteTimeEvent( 229 | status_byte, 230 | data_bytes, 231 | port, 232 | track, 233 | time, 234 | ) 235 | ) 236 | return relocated_events 237 | 238 | channel = status_byte & 0x0F 239 | channel_info_entry = track_info_entry.channel_info[channel] 240 | 241 | default_channel_group = track_info_entry.default_channel_groups[channel] 242 | # Fill default channel group 243 | if default_channel_group == 0x0000: 244 | default_channel_group = 0x0001 << channel 245 | 246 | for port in range(PTrackChunk.PORTS): 247 | if (channel_info_entry.ports >> port) & 0x0001 != 0x0001: 248 | continue 249 | 250 | for grouped_channel in range(PTrackChunk.CHANNELS_PER_PORT): 251 | if group_channel: 252 | if ( 253 | track_info_entry.channel_groups[channel] >> grouped_channel 254 | ) & 0x0001 != 0x0001: 255 | continue 256 | else: 257 | if (default_channel_group >> grouped_channel) & 0x0001 != 0x0001: 258 | continue 259 | 260 | track = (port * PTrackChunk.CHANNELS_PER_PORT) + grouped_channel 261 | relocated_status_byte = status_type | grouped_channel 262 | relocated_events.append( 263 | PTrackAbsoluteTimeEvent( 264 | relocated_status_byte, 265 | data_bytes, 266 | port, 267 | track, 268 | time, 269 | ) 270 | ) 271 | 272 | return relocated_events 273 | 274 | def track_number(self) -> int: 275 | """Track Number 276 | 277 | Returns: 278 | int: Track Number 279 | """ 280 | return self.id[3] 281 | 282 | def exists_channel_message(self, channel: int) -> bool: 283 | """Check if there exists a message for a specific channel in the P-Track chunk 284 | 285 | Args: 286 | channel: Channel number 287 | 288 | Returns: 289 | bool: True if a message exists for the specified channel, False otherwise 290 | """ 291 | return any( 292 | (event.status_byte & 0xF0) != 0xF0 and (event.status_byte & 0x0F) == channel 293 | for event in self.events 294 | ) 295 | 296 | def _payload_buffer(self) -> bytes: 297 | stream = BytesIO() 298 | for message in self.events: 299 | message.write(stream) 300 | stream.seek(0) 301 | return stream.read() 302 | 303 | def to_json_serializable(self): 304 | json_events = [] 305 | for message in self.events: 306 | json_events.append( 307 | { 308 | "delta_time": message.delta_time, 309 | "status_byte": format(message.status_byte, "02X"), 310 | "data": message.data_bytes.hex(" ").upper(), 311 | "duration": message.duration, 312 | } 313 | ) 314 | return {"events": json_events} 315 | 316 | def absolute_time_track( 317 | self, 318 | track_info: PTrackInfoChunk | ExtendedPTrackInfoChunk | P3TrackInfoChunk, 319 | ) -> list[PTrackAbsoluteTimeEvent]: 320 | if isinstance(track_info, (PTrackInfoChunk, ExtendedPTrackInfoChunk)): 321 | track_info_list = track_info.data 322 | elif isinstance(track_info, P3TrackInfoChunk): 323 | track_info_list = [track_info] 324 | else: 325 | raise ValueError( 326 | "Argument `track_info` must be PTrackInfoChunk, ExtendedPTrackInfoChunk or P3TrackInfoChunk." 327 | ) 328 | 329 | absolute_time_track: list[PTrackAbsoluteTimeEvent] = [] 330 | track_info_entry = next( 331 | ( 332 | entry 333 | for entry in track_info_list 334 | if entry.track_number == self.track_number() 335 | ), 336 | None, 337 | ) 338 | if track_info_entry is None: 339 | raise ValueError(f"P-Track Info for track {self.track_number()} not found.") 340 | 341 | is_lossless_track = track_info_entry.is_lossless_track() 342 | 343 | absolute_time_track: list[PTrackAbsoluteTimeEvent] = [] 344 | absolute_time = 0 345 | channel_grouping_enabled = False 346 | for event in self.events: 347 | absolute_time += event.delta_time 348 | 349 | status_type = event.status_byte_type() 350 | if status_type == 0x80: 351 | duration = event.duration 352 | if duration is None: 353 | continue 354 | 355 | channel = event.channel() 356 | note_number = event.data_bytes[0] 357 | note_on_velocity = event.data_bytes[1] 358 | note_off_velocity = event.data_bytes[2] 359 | if not is_lossless_track: 360 | duration <<= 2 361 | # Note on 362 | absolute_time_track += PTrackChunk.__relocate_event( 363 | track_info_entry, 364 | 0x90 | channel, 365 | bytes([note_number, note_on_velocity]), 366 | absolute_time, 367 | channel_grouping_enabled, 368 | ) 369 | # Note off 370 | absolute_time_track += PTrackChunk.__relocate_event( 371 | track_info_entry, 372 | 0x80 | channel, 373 | bytes([note_number, note_off_velocity]), 374 | absolute_time + duration, 375 | channel_grouping_enabled, 376 | ) 377 | elif status_type == 0x90: 378 | duration = event.duration 379 | if duration is None: 380 | continue 381 | 382 | channel = event.channel() 383 | note_number = event.data_bytes[0] 384 | note_on_velocity = event.data_bytes[1] 385 | if not is_lossless_track: 386 | duration <<= 2 387 | # Note on 388 | absolute_time_track += PTrackChunk.__relocate_event( 389 | track_info_entry, 390 | event.status_byte, 391 | event.data_bytes, 392 | absolute_time, 393 | channel_grouping_enabled, 394 | ) 395 | # Note off 396 | absolute_time_track += PTrackChunk.__relocate_event( 397 | track_info_entry, 398 | 0x80 | channel, 399 | bytes([note_number, 0x40]), 400 | absolute_time + duration, 401 | channel_grouping_enabled, 402 | ) 403 | elif status_type == 0xA0: 404 | # CC: channel_info_entry.control_change_ax 405 | channel = event.channel() 406 | channel_info_entry = track_info_entry.channel_info[channel] 407 | absolute_time_track += PTrackChunk.__relocate_event( 408 | track_info_entry, 409 | 0xB0 | channel, 410 | bytes([channel_info_entry.control_change_ax, event.data_bytes[0]]), 411 | absolute_time, 412 | channel_grouping_enabled, 413 | ) 414 | elif status_type == 0xC0: 415 | # CC: channel_info_entry.control_change_cx 416 | channel = event.channel() 417 | channel_info_entry = track_info_entry.channel_info[channel] 418 | absolute_time_track += PTrackChunk.__relocate_event( 419 | track_info_entry, 420 | 0xB0 | channel, 421 | bytes([channel_info_entry.control_change_cx, event.data_bytes[0]]), 422 | absolute_time, 423 | channel_grouping_enabled, 424 | ) 425 | else: 426 | absolute_time_track += PTrackChunk.__relocate_event( 427 | track_info_entry, 428 | event.status_byte, 429 | event.data_bytes, 430 | absolute_time, 431 | channel_grouping_enabled, 432 | ) 433 | 434 | channel_grouping_enabled = event.status_byte == 0xFD 435 | 436 | absolute_time_track.sort( 437 | key=lambda absolute_time_event: absolute_time_event.time 438 | ) 439 | 440 | return absolute_time_track 441 | -------------------------------------------------------------------------------- /okd/okd_file.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from dataclasses import dataclass 3 | from io import BytesIO 4 | from logging import getLogger 5 | from typing import Self, Union, BinaryIO 6 | 7 | from sprc.header import SprcHeader 8 | from .chunks import OkdChunk, ChunkBase, read_chunk 9 | from .okd_file_scramble import ( 10 | choose_scramble_pattern_index, 11 | scramble, 12 | detect_scramble_pattern_index, 13 | descramble, 14 | ) 15 | 16 | 17 | @dataclass 18 | class OkdHeaderBase(ABC): 19 | """OKD Header Base Class""" 20 | 21 | MAGIC_BYTES = b"YKS1" 22 | FIXED_PART_LENGTH = 40 23 | 24 | length: int 25 | version: str 26 | id_karaoke: int 27 | adpcm_offset: int 28 | encryption_mode: int 29 | 30 | @staticmethod 31 | def _read_common( 32 | stream: BinaryIO, 33 | scramble_pattern_index: int | None = None, 34 | ) -> tuple[int, str, int, int, int, bytes]: 35 | """Read Common Part 36 | 37 | Args: 38 | stream (BinaryIO): Input stream 39 | scramble_pattern_index (int): Scramble pattern index 40 | 41 | Raises: 42 | ValueError: Invalid `magic_bytes` 43 | 44 | Returns: 45 | tuple[int, str, int, int, int, bytes]: length, version, id_karaoke, adpcm_offset, encryption_mode and optional_data 46 | """ 47 | if scramble_pattern_index is None: 48 | fixed_part_buffer = stream.read(OkdHeaderBase.FIXED_PART_LENGTH) 49 | else: 50 | fixed_part_stream = BytesIO() 51 | scramble_pattern_index = descramble( 52 | stream, 53 | fixed_part_stream, 54 | scramble_pattern_index, 55 | OkdHeaderBase.FIXED_PART_LENGTH, 56 | ) 57 | fixed_part_stream.seek(0) 58 | fixed_part_buffer = fixed_part_stream.read() 59 | if len(fixed_part_buffer) < OkdHeaderBase.FIXED_PART_LENGTH: 60 | raise ValueError("Too less read bytes.") 61 | 62 | magic_bytes = fixed_part_buffer[0:4] 63 | if magic_bytes != OkdHeaderBase.MAGIC_BYTES: 64 | raise ValueError("Invalid `magic_bytes`.") 65 | length = int.from_bytes(fixed_part_buffer[4:8], "big") 66 | version = fixed_part_buffer[8:24].decode("ascii") 67 | id_karaoke = int.from_bytes(fixed_part_buffer[24:28], "big") 68 | adpcm_offset = int.from_bytes(fixed_part_buffer[28:32], "big") 69 | encryption_mode = int.from_bytes(fixed_part_buffer[32:36], "big") 70 | optional_data_length = int.from_bytes(fixed_part_buffer[36:40], "big") 71 | 72 | if scramble_pattern_index is None: 73 | variable_part_buffer = stream.read(optional_data_length) 74 | else: 75 | variable_part_stream = BytesIO() 76 | descramble( 77 | stream, 78 | variable_part_stream, 79 | scramble_pattern_index, 80 | optional_data_length, 81 | ) 82 | variable_part_stream.seek(0) 83 | variable_part_buffer = variable_part_stream.read() 84 | if len(variable_part_buffer) < optional_data_length: 85 | raise ValueError("Too less read bytes.") 86 | 87 | optional_data = variable_part_buffer 88 | 89 | return ( 90 | length, 91 | version, 92 | id_karaoke, 93 | adpcm_offset, 94 | encryption_mode, 95 | optional_data, 96 | ) 97 | 98 | @staticmethod 99 | @abstractmethod 100 | def optional_data_buffer_size() -> int: 101 | """Size of Optional Data Buffer 102 | 103 | Returns: 104 | bytes: Size of Optional Data Buffer 105 | """ 106 | pass 107 | 108 | @abstractmethod 109 | def _optional_data_buffer(self) -> bytes: 110 | """Optional Data Buffer 111 | 112 | Returns: 113 | bytes: Optional Data Buffer 114 | """ 115 | pass 116 | 117 | def write(self, stream: BinaryIO) -> None: 118 | """Write 119 | 120 | Args: 121 | stream (BinaryIO): Output stream 122 | """ 123 | stream.write(OkdHeaderBase.MAGIC_BYTES) 124 | stream.write(self.length.to_bytes(4, "big")) 125 | stream.write(self.version.encode("ascii").ljust(16, b"\x00")) 126 | stream.write(self.id_karaoke.to_bytes(4, "big")) 127 | stream.write(self.adpcm_offset.to_bytes(4, "big")) 128 | stream.write(self.encryption_mode.to_bytes(4, "big")) 129 | optional_data_buffer = self._optional_data_buffer() 130 | stream.write(len(optional_data_buffer).to_bytes(4, "big")) 131 | stream.write(optional_data_buffer) 132 | 133 | 134 | @dataclass 135 | class OkdGenericHeader(OkdHeaderBase): 136 | """OKD Generic Header""" 137 | 138 | optional_data: bytes 139 | 140 | @classmethod 141 | def read( 142 | cls, 143 | stream: BinaryIO, 144 | scramble_pattern_index: int | None = None, 145 | ) -> Self: 146 | """Read 147 | 148 | Args: 149 | stream (BinaryIO): Input stream 150 | scramble_pattern_index (int): Scramble pattern index 151 | 152 | Returns: 153 | Self: Instance of this class 154 | """ 155 | length, version, id_karaoke, adpcm_offset, encryption_mode, optional_data = ( 156 | OkdHeaderBase._read_common(stream, scramble_pattern_index) 157 | ) 158 | return cls( 159 | length, 160 | version, 161 | id_karaoke, 162 | adpcm_offset, 163 | encryption_mode, 164 | optional_data, 165 | ) 166 | 167 | @staticmethod 168 | def optional_data_buffer_size() -> int: 169 | raise NotImplementedError() 170 | 171 | def _optional_data_buffer(self) -> bytes: 172 | return self.optional_data 173 | 174 | 175 | @dataclass 176 | class YksOkdHeader(OkdHeaderBase): 177 | """YKS OKD Header""" 178 | 179 | @classmethod 180 | def from_generic(cls, generic: OkdGenericHeader) -> Self: 181 | """From Generic OKD Header 182 | 183 | Args: 184 | generic (OkdGenericHeader): Generic OKD Header 185 | 186 | Returns: 187 | Self: Instance of this class 188 | """ 189 | return cls( 190 | generic.length, 191 | generic.version, 192 | generic.id_karaoke, 193 | generic.adpcm_offset, 194 | generic.encryption_mode, 195 | ) 196 | 197 | @staticmethod 198 | def optional_data_buffer_size() -> int: 199 | return 0 200 | 201 | def _optional_data_buffer(self) -> bytes: 202 | return b"" 203 | 204 | 205 | @dataclass 206 | class MmtOkdHeader(OkdHeaderBase): 207 | """MMT OKD Header""" 208 | 209 | yks_chunks_length: int 210 | mmt_chunks_length: int 211 | yks_chunks_crc: int 212 | crc: int 213 | 214 | @classmethod 215 | def from_generic(cls, generic: OkdGenericHeader) -> Self: 216 | """From Generic OKD Header 217 | 218 | Args: 219 | generic (OkdGenericHeader): Generic OKD Header 220 | 221 | Returns: 222 | Self: Instance of this class 223 | """ 224 | yks_chunks_length = int.from_bytes(generic.optional_data[0:4], "big") 225 | mmt_chunks_length = int.from_bytes(generic.optional_data[4:8], "big") 226 | yks_chunks_crc = int.from_bytes(generic.optional_data[8:10], "big") 227 | crc = int.from_bytes(generic.optional_data[10:12], "big") 228 | return cls( 229 | generic.length, 230 | generic.version, 231 | generic.id_karaoke, 232 | generic.adpcm_offset, 233 | generic.encryption_mode, 234 | yks_chunks_length, 235 | mmt_chunks_length, 236 | yks_chunks_crc, 237 | crc, 238 | ) 239 | 240 | @staticmethod 241 | def optional_data_buffer_size() -> int: 242 | return 12 243 | 244 | def _optional_data_buffer(self) -> bytes: 245 | buffer = self.yks_chunks_length.to_bytes(4, "big") 246 | buffer += self.mmt_chunks_length.to_bytes(4, "big") 247 | buffer += self.yks_chunks_crc.to_bytes(2, "big") 248 | buffer += self.crc.to_bytes(2, "big") 249 | return buffer 250 | 251 | 252 | @dataclass 253 | class MmkOkdHeader(OkdHeaderBase): 254 | """MMK OKD Header""" 255 | 256 | yks_chunks_length: int 257 | mmt_chunks_length: int 258 | mmk_chunks_length: int 259 | yks_chunks_crc: int 260 | yks_mmt_chunks_crc: int 261 | crc: int 262 | 263 | @classmethod 264 | def from_generic(cls, generic: OkdGenericHeader) -> Self: 265 | """From Generic OKD Header 266 | 267 | Args: 268 | generic (OkdGenericHeader): Generic OKD Header 269 | 270 | Returns: 271 | Self: Instance of this class 272 | """ 273 | yks_chunks_length = int.from_bytes(generic.optional_data[0:4], "big") 274 | mmt_chunks_length = int.from_bytes(generic.optional_data[4:8], "big") 275 | mmk_chunks_length = int.from_bytes(generic.optional_data[8:12], "big") 276 | yks_chunks_crc = int.from_bytes(generic.optional_data[12:14], "big") 277 | yks_mmt_chunks_crc = int.from_bytes(generic.optional_data[14:16], "big") 278 | crc = int.from_bytes(generic.optional_data[16:18], "big") 279 | return cls( 280 | generic.length, 281 | generic.version, 282 | generic.id_karaoke, 283 | generic.adpcm_offset, 284 | generic.encryption_mode, 285 | yks_chunks_length, 286 | mmt_chunks_length, 287 | mmk_chunks_length, 288 | yks_chunks_crc, 289 | yks_mmt_chunks_crc, 290 | crc, 291 | ) 292 | 293 | @staticmethod 294 | def optional_data_buffer_size() -> int: 295 | return 20 296 | 297 | def _optional_data_buffer(self) -> bytes: 298 | buffer = self.yks_chunks_length.to_bytes(4, "big") 299 | buffer += self.mmt_chunks_length.to_bytes(4, "big") 300 | buffer += self.mmk_chunks_length.to_bytes(4, "big") 301 | buffer += self.yks_chunks_crc.to_bytes(2, "big") 302 | buffer += self.yks_mmt_chunks_crc.to_bytes(2, "big") 303 | buffer += self.crc.to_bytes(2, "big") 304 | # Padding 305 | buffer += b"\x00" * 2 306 | return buffer 307 | 308 | 309 | @dataclass 310 | class SprOkdHeader(OkdHeaderBase): 311 | """SPR OKD Header""" 312 | 313 | yks_chunks_length: int 314 | mmt_chunks_length: int 315 | mmk_chunks_length: int 316 | spr_chunks_length: int 317 | yks_chunks_crc: int 318 | yks_mmt_chunks_crc: int 319 | yks_mmt_mmk_chunks_crc: int 320 | crc: int 321 | 322 | @classmethod 323 | def from_generic(cls, generic: OkdGenericHeader) -> Self: 324 | """From Generic OKD Header 325 | 326 | Args: 327 | generic (OkdGenericHeader): Generic OKD Header 328 | 329 | Returns: 330 | Self: Instance of this class 331 | """ 332 | yks_chunks_length = int.from_bytes(generic.optional_data[0:4], "big") 333 | mmt_chunks_length = int.from_bytes(generic.optional_data[4:8], "big") 334 | mmk_chunks_length = int.from_bytes(generic.optional_data[8:12], "big") 335 | spr_chunks_length = int.from_bytes(generic.optional_data[12:16], "big") 336 | yks_chunks_crc = int.from_bytes(generic.optional_data[16:18], "big") 337 | yks_mmt_chunks_crc = int.from_bytes(generic.optional_data[18:20], "big") 338 | yks_mmt_mmk_chunks_crc = int.from_bytes(generic.optional_data[20:22], "big") 339 | crc = int.from_bytes(generic.optional_data[22:24], "big") 340 | 341 | return cls( 342 | generic.length, 343 | generic.version, 344 | generic.id_karaoke, 345 | generic.adpcm_offset, 346 | generic.encryption_mode, 347 | yks_chunks_length, 348 | mmt_chunks_length, 349 | mmk_chunks_length, 350 | spr_chunks_length, 351 | yks_chunks_crc, 352 | yks_mmt_chunks_crc, 353 | yks_mmt_mmk_chunks_crc, 354 | crc, 355 | ) 356 | 357 | @staticmethod 358 | def optional_data_buffer_size() -> int: 359 | return 24 360 | 361 | def _optional_data_buffer(self) -> bytes: 362 | buffer = self.yks_chunks_length.to_bytes(4, "big") 363 | buffer += self.mmt_chunks_length.to_bytes(4, "big") 364 | buffer += self.mmk_chunks_length.to_bytes(4, "big") 365 | buffer += self.spr_chunks_length.to_bytes(4, "big") 366 | buffer += self.yks_chunks_crc.to_bytes(2, "big") 367 | buffer += self.yks_mmt_chunks_crc.to_bytes(2, "big") 368 | buffer += self.yks_mmt_mmk_chunks_crc.to_bytes(2, "big") 369 | buffer += self.crc.to_bytes(2, "big") 370 | return buffer 371 | 372 | 373 | @dataclass 374 | class DioOkdHeader(OkdHeaderBase): 375 | """DIO OKD Header""" 376 | 377 | yks_chunks_length: int 378 | mmt_chunks_length: int 379 | mmk_chunks_length: int 380 | spr_chunks_length: int 381 | dio_chunks_length: int 382 | yks_chunks_crc: int 383 | yks_mmt_chunks_crc: int 384 | yks_mmt_mmk_chunks_crc: int 385 | yks_mmt_mmk_spr_chunks_crc: int 386 | crc: int 387 | 388 | @classmethod 389 | def from_generic(cls, generic: OkdGenericHeader) -> Self: 390 | """From Generic OKD Header 391 | 392 | Args: 393 | generic (OkdGenericHeader): Generic OKD Header 394 | 395 | Returns: 396 | Self: Instance of this class 397 | """ 398 | yks_chunks_length = int.from_bytes(generic.optional_data[0:4], "big") 399 | mmt_chunks_length = int.from_bytes(generic.optional_data[4:8], "big") 400 | mmk_chunks_length = int.from_bytes(generic.optional_data[8:12], "big") 401 | spr_chunks_length = int.from_bytes(generic.optional_data[12:16], "big") 402 | dio_chunks_length = int.from_bytes(generic.optional_data[16:20], "big") 403 | yks_chunks_crc = int.from_bytes(generic.optional_data[20:22], "big") 404 | yks_mmt_chunks_crc = int.from_bytes(generic.optional_data[22:24], "big") 405 | yks_mmt_mmk_chunks_crc = int.from_bytes(generic.optional_data[24:26], "big") 406 | yks_mmt_mmk_spr_chunks_crc = int.from_bytes(generic.optional_data[26:28], "big") 407 | crc = int.from_bytes(generic.optional_data[28:30], "big") 408 | return cls( 409 | generic.length, 410 | generic.version, 411 | generic.id_karaoke, 412 | generic.adpcm_offset, 413 | generic.encryption_mode, 414 | yks_chunks_length, 415 | mmt_chunks_length, 416 | mmk_chunks_length, 417 | spr_chunks_length, 418 | dio_chunks_length, 419 | yks_chunks_crc, 420 | yks_mmt_chunks_crc, 421 | yks_mmt_mmk_chunks_crc, 422 | yks_mmt_mmk_spr_chunks_crc, 423 | crc, 424 | ) 425 | 426 | @staticmethod 427 | def optional_data_buffer_size() -> int: 428 | return 32 429 | 430 | def _optional_data_buffer(self) -> bytes: 431 | buffer = self.yks_chunks_length.to_bytes(4, "big") 432 | buffer += self.mmt_chunks_length.to_bytes(4, "big") 433 | buffer += self.mmk_chunks_length.to_bytes(4, "big") 434 | buffer += self.spr_chunks_length.to_bytes(4, "big") 435 | buffer += self.dio_chunks_length.to_bytes(4, "big") 436 | buffer += self.yks_chunks_crc.to_bytes(2, "big") 437 | buffer += self.yks_mmt_chunks_crc.to_bytes(2, "big") 438 | buffer += self.yks_mmt_mmk_chunks_crc.to_bytes(2, "big") 439 | buffer += self.yks_mmt_mmk_spr_chunks_crc.to_bytes(2, "big") 440 | buffer += self.crc.to_bytes(2, "big") 441 | # Padding 442 | buffer += b"\x00" * 2 443 | return buffer 444 | 445 | 446 | OkdHeader = Union[OkdGenericHeader, YksOkdHeader, MmtOkdHeader, MmkOkdHeader, SprOkdHeader, DioOkdHeader] 447 | 448 | 449 | def read_okd_header( 450 | stream: BinaryIO, scramble_pattern_index: int | None = None 451 | ) -> OkdHeader: 452 | """Read OKD Header 453 | 454 | Args: 455 | stream (BinaryIO): Input stream 456 | scramble_pattern_index (int | None, optional): Scramble pattern index. Defaults to None. 457 | 458 | Returns: 459 | OkdHeader: OKD Header 460 | """ 461 | generic = OkdGenericHeader.read(stream, scramble_pattern_index) 462 | 463 | if len(generic.optional_data) == YksOkdHeader.optional_data_buffer_size(): 464 | return YksOkdHeader.from_generic(generic) 465 | elif len(generic.optional_data) == MmtOkdHeader.optional_data_buffer_size(): 466 | return MmtOkdHeader.from_generic(generic) 467 | elif len(generic.optional_data) == MmkOkdHeader.optional_data_buffer_size(): 468 | return MmkOkdHeader.from_generic(generic) 469 | elif len(generic.optional_data) == SprOkdHeader.optional_data_buffer_size(): 470 | return SprOkdHeader.from_generic(generic) 471 | elif len(generic.optional_data) == DioOkdHeader.optional_data_buffer_size(): 472 | return DioOkdHeader.from_generic(generic) 473 | 474 | return generic 475 | 476 | 477 | @dataclass 478 | class OkdFile: 479 | """OKD File""" 480 | 481 | __logger = getLogger(__name__) 482 | 483 | header: OkdHeader 484 | chunks: list[OkdChunk] 485 | 486 | @classmethod 487 | def read(cls, stream: BinaryIO) -> Self: 488 | """Read 489 | 490 | Args: 491 | stream (BinaryIO): Input stream 492 | 493 | Raises: 494 | ValueError: Invalid `magic_bytes` 495 | 496 | Returns: 497 | Self: Instance of this class 498 | """ 499 | if SprcHeader.has_sprc_header(stream): 500 | # Validate SPRC Header 501 | OkdFile.__logger.info("SPRC Header detected.") 502 | sprc_header = SprcHeader.read(stream) 503 | if not sprc_header.validate_crc(stream): 504 | raise ValueError("SPRC Header CRC validation failed.") 505 | OkdFile.__logger.info("SPRC Header CRC validation succeeded.") 506 | 507 | scramble_pattern_index = detect_scramble_pattern_index( 508 | stream, OkdHeaderBase.MAGIC_BYTES 509 | ) 510 | 511 | # Header 512 | header = read_okd_header(stream, scramble_pattern_index) 513 | if header.adpcm_offset == 0: 514 | scrambled_length = (header.length + 8) - ( 515 | OkdHeaderBase.FIXED_PART_LENGTH + header.optional_data_buffer_size() 516 | ) 517 | plaintext_length = 0 518 | else: 519 | scrambled_length = header.adpcm_offset - ( 520 | OkdHeaderBase.FIXED_PART_LENGTH + header.optional_data_buffer_size() 521 | ) 522 | plaintext_length = (header.length + 8) - header.adpcm_offset 523 | chunks_stream = BytesIO() 524 | if scramble_pattern_index is None: 525 | chunks_stream.write(stream.read()) 526 | else: 527 | descramble(stream, chunks_stream, scramble_pattern_index, scrambled_length) 528 | # Plaintext part 529 | chunks_stream.write(stream.read(plaintext_length)) 530 | 531 | chunks: list[OkdChunk] = [] 532 | chunks_stream.seek(0) 533 | while True: 534 | if ChunkBase.peek_header(chunks_stream) is None: 535 | # Reached to End of File 536 | break 537 | chunk = read_chunk(chunks_stream) 538 | chunks.append(chunk) 539 | 540 | return cls(header, chunks) 541 | 542 | def write(self, stream: BinaryIO, should_scramble: bool = False) -> None: 543 | """Write 544 | 545 | Args: 546 | stream (BinaryIO): Output stream 547 | scramble (bool, optional): Scramble. Defaults to False. 548 | """ 549 | # Make chunks buffer 550 | chunks_stream = BytesIO() 551 | for chunk in self.chunks: 552 | chunk.write(chunks_stream) 553 | self.header.length = ( 554 | OkdHeaderBase.FIXED_PART_LENGTH 555 | + len(self.header._optional_data_buffer()) 556 | + chunks_stream.tell() 557 | - 8 558 | ) 559 | self.header.encryption_mode = 1 if should_scramble else 0 560 | chunks_stream.seek(0) 561 | 562 | # Make header buffer 563 | header_stream = BytesIO() 564 | self.header.write(header_stream) 565 | header_stream.seek(0) 566 | 567 | if should_scramble: 568 | scramble_pattern_index = choose_scramble_pattern_index() 569 | scramble(header_stream, stream, scramble_pattern_index) 570 | scramble(chunks_stream, stream, scramble_pattern_index) 571 | else: 572 | stream.write(header_stream.read()) 573 | stream.write(chunks_stream.read()) 574 | # End of file 575 | stream.write(b"\x00\x00\x00\x00") 576 | --------------------------------------------------------------------------------