├── .github └── workflows │ └── main.yml ├── .gitignore ├── LICENSE ├── README.md ├── poetry.lock ├── pyproject.toml ├── src └── nutcracker │ ├── __init__.py │ ├── chiper │ ├── __init__.py │ └── xor.py │ ├── codex │ ├── __init__.py │ ├── base.py │ ├── bomp.py │ ├── bpp_codec.py │ ├── bpp_cost.py │ ├── codex.py │ ├── codex1.py │ ├── codex37_np.py │ ├── codex47_np.py │ ├── nutfont.py │ ├── rle.py │ └── smap.py │ ├── decode_san_audio.py │ ├── earwax │ ├── __init__.py │ ├── older.py │ ├── older_room.py │ ├── older_sizeonly.py │ ├── preset.py │ ├── resource.py │ ├── room.py │ ├── room_older.py │ ├── windex_v3.py │ ├── windex_v3_older.py │ └── windex_v4.py │ ├── graphics │ ├── __init__.py │ ├── frame.py │ ├── grid.py │ └── image.py │ ├── kernel │ ├── __init__.py │ ├── align.py │ ├── buffer.py │ ├── chunk.py │ ├── element.py │ ├── index.py │ ├── iterchunk.py │ ├── preset.py │ ├── resource.py │ ├── runner.py │ ├── settings.py │ ├── structured.py │ └── tree.py │ ├── kernel2 │ ├── __init__.py │ ├── chunk.py │ ├── element.py │ ├── fileio.py │ ├── preset.py │ └── tree.py │ ├── py.typed │ ├── runner.py │ ├── smush │ ├── __init__.py │ ├── ahdr.py │ ├── anim.py │ ├── compress.py │ ├── decode.py │ ├── element.py │ ├── encode.py │ ├── encode_san_seq.py │ ├── fobj.py │ ├── preset.py │ ├── runner.py │ ├── saud.py │ └── schema.py │ ├── sputm │ ├── __init__.py │ ├── build.py │ ├── char │ │ ├── decode.py │ │ └── encode.py │ ├── costume │ │ ├── __init__.py │ │ ├── akos.py │ │ ├── akos_encode.py │ │ ├── awiz.py │ │ ├── awiz_encode.py │ │ ├── banner.py │ │ └── cost.py │ ├── index.py │ ├── preset.py │ ├── resource.py │ ├── room │ │ ├── encode_image.py │ │ ├── orgroom.py │ │ ├── pproom.py │ │ ├── proom.py │ │ └── runner.py │ ├── runner.py │ ├── schema.py │ ├── script │ │ ├── __init__.py │ │ ├── bytecode.py │ │ ├── opcodes.py │ │ ├── opcodes_v5.py │ │ ├── parser.py │ │ └── shared.py │ ├── strings.py │ ├── tree.py │ ├── windex │ │ ├── __init__.py │ │ ├── runner.py │ │ └── scu.py │ ├── windex_v5.py │ └── windex_v6.py │ ├── sputm_old │ ├── song.py │ ├── sound.py │ └── tlkb.py │ └── utils │ ├── __init__.py │ ├── copyio.py │ ├── fileio.py │ ├── funcutils.py │ └── libio.py ├── test.bat ├── tests ├── __init__.py └── test_nutcracker.py └── vocal.yml /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | # This is a basic workflow to help you get started with Actions 2 | 3 | name: Release Binary 4 | 5 | on: 6 | push: 7 | branches: [ develop ] 8 | 9 | # Allows you to run this workflow manually from the Actions tab 10 | workflow_dispatch: 11 | 12 | jobs: 13 | build: 14 | runs-on: ${{ matrix.os }} 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | os: 19 | - windows-latest 20 | - ubuntu-latest 21 | - macos-latest 22 | - macos-13 23 | defaults: 24 | run: 25 | shell: bash 26 | 27 | steps: 28 | - uses: actions/checkout@v4 29 | - uses: actions/setup-python@v5 30 | with: 31 | python-version: 3.12.4 32 | - name: Install Poetry 33 | uses: snok/install-poetry@v1 34 | 35 | - run: | 36 | poetry install --no-root 37 | poetry build 38 | poetry run pip install --no-index --find-links=dist nutcracker 39 | mv dist dist_old 40 | poetry run pyinstaller --onefile -n nutcracker src/nutcracker/runner.py 41 | # Optionally verify that it works (provided that it does not need user interaction) 42 | - run: ./dist/nutcracker --help 43 | - run: cp README.md ./dist/README.md 44 | - run: poetry run pip-licenses --from=all --format=plain-vertical --with-license-file --no-license-path --with-urls --with-description --output-file ./dist/LICENSE 45 | 46 | - uses: actions/upload-artifact@v4 47 | with: 48 | name: nutcracker-${{ runner.os }}_${{ runner.arch }} 49 | path: dist/* 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | # VS Code 107 | .vscode/ 108 | 109 | tr.txt 110 | *.png 111 | *.pyc 112 | *.nut 113 | *.san 114 | *.xcf 115 | .vscode 116 | *.txt 117 | *.raw 118 | *.wav 119 | hebtest.bat 120 | test_he.bat 121 | samples/ 122 | CHARS/ 123 | ROOMS/ 124 | SCRIPTS/ 125 | SOUNDS/ 126 | AKOS/ 127 | DIGIS/ 128 | RMIMS/ 129 | RMDAS/ 130 | *LECF*/ 131 | rpdump.xml 132 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NUTCracker 2 | Tools for editing resources in SCUMM games. 3 | 4 | ## Features: 5 | * Extract and Edit fonts for v5-v7 + HE 6 | * Extract and Edit NUT fonts - v7-v8 7 | * Extract and Replace SMUSH video frames 8 | * Compress SMUSH videos (like scummvm-tools) 9 | * Extract and Rebuild game resources - v5-v8 + HE 10 | * Extract and Inject text strings - v5-v8 + HE 11 | * Extract and Replace background and objects images - v5-v8 + HE (option to extract EGA backgrounds) 12 | * Decompile game scripts to Windex[*]-like syntax - v5-v8 + HE 13 | 14 | [*] SCUMM debugger from https://quickandeasysoftware.net/monkey-island-2-talkie-prototype and HE demos for DOS 15 | 16 | ## Installation 17 | Latest development binaries can be downloaded here 18 | 19 | Linux: https://nightly.link/BLooperZ/nutcracker/workflows/main/develop/nutcracker-Linux_X64.zip 20 | 21 | macOS (Apple silicon) https://nightly.link/BLooperZ/nutcracker/workflows/main/develop/nutcracker-macOS_ARM64.zip 22 | 23 | macOS (Intel-based): https://nightly.link/BLooperZ/nutcracker/workflows/main/develop/nutcracker-macOS_X64.zip 24 | 25 | Windows: https://nightly.link/BLooperZ/nutcracker/workflows/main/develop/nutcracker-Windows_X64.zip 26 | 27 | 28 | Thanks to https://nightly.link/ 29 | 30 | ## Resources 31 | 32 | ### Extract and rebuild 33 | 34 | Supported games: V5-V8, HE 35 | 36 | Extract game resources to patch files using: 37 | ``` 38 | nutcracker sputm extract PATH/TO/GAME.000 39 | ``` 40 | *Replace `PATH/TO/GAME.000` to actual game index file (Usually ends with `.000`, `.LA0` or `.HE0`) 41 | 42 | This also creates XML-like file `rpdump.xml` to show which files were extracted. 43 | 44 | Rebuild game resources from patches (using original resource as reference): 45 | ``` 46 | nutcracker sputm build --ref PATH/TO/GAME.000 GAME 47 | ``` 48 | 49 | ## Fonts 50 | 51 | ### SPUTM Font (`CHAR` chunks) 52 | 53 | Supported games: V5-V7, HE 54 | 55 | Extract the fonts using: 56 | ``` 57 | nutcracker sputm fonts_extract PATH/TO/GAME.000 58 | ``` 59 | 60 | *Replace `PATH/TO/GAME.000` to actual game index file (Usually ends with `.000`, `.LA0` or `.HE0`) 61 | 62 | fonts will be extracted as PNG images to directory `GAME/chars` relative to workdir. 63 | 64 | *Replace `GAME` with name of the game (e.g. `ATLANTIS` if game index file is `ATLANTIS.000`) 65 | 66 | Modify the font images with any image editor. 67 | 68 | Create patch files for the modified font: 69 | ``` 70 | nutcracker sputm fonts_inject --ref PATH/TO/GAME.000 GAME 71 | ``` 72 | Rebuild game resources 73 | ``` 74 | nutcracker sputm build --ref PATH/TO/GAME.000 GAME 75 | ``` 76 | 77 | ### NUT Fonts 78 | 79 | Supported games: V7-V8 80 | 81 | #### Decoding 82 | Decode all NUT files in given directory DATADIR 83 | ``` 84 | nutcracker smush decode DATADIR/*.NUT --nut --target OUTDIR 85 | ``` 86 | Creates a font image file named chars.png in OUTDIR which can be edited using regular image editing software (e.g. GIMP) 87 | 88 | #### Encoding 89 | Encode given font image (PNG_FILE) with given codec number (CODEC) using REF_NUT_FILE as reference 90 | ``` 91 | python -m nutcracker.smush.encode PNG_FILE --target NEW_NUT_FILE --ref REF_NUT_FILE --codec CODEC [--fake CODEC] 92 | ``` 93 | This will convert font image file back to font file (NEW_NUT_FILE) which can be used in game. 94 | 95 | Available codecs: 96 | * 21 (FT + The Dig*) 97 | * 44 (COMI*) 98 | 99 | *FONT3.NUT and the fonts in The Dig was actually encoded using codec 21 method but marked as 44. 100 | It can be achieved using `--codec 21 --fake 44`. 101 | see examples in [test.bat](test.bat) 102 | 103 | ## SMUSH Videos 104 | 105 | ### Decode and Re-encode 106 | 107 | Supported games: V7-V8 108 | 109 | Decode frames using 110 | ``` 111 | nutcracker smush decode DATADIR/*.SAN --target OUTDIR 112 | ``` 113 | Frames will be extracted as PNG files to `OUTDIR/VIDEO.SAN` 114 | where `VIDEO.SAN` matches the filename of the video. 115 | 116 | Re-encode the video using: 117 | ``` 118 | python -m nutcracker.smush.encode_san_seq DATADIR/VIDEO.SAN 119 | ``` 120 | where DATADIR/VIDEO.SAN is path to original SMUSH video file 121 | 122 | The new video will be created as `NEW_VIDEO2.SAN` in workdir 123 | 124 | *To reduce result file size, it is recommended to only re-encode modified frames, this can be done by removing unaltered frames from `OUTDIR/VIDEO.SAN` 125 | 126 | ### Compress 127 | 128 | Supported games: V7-V8 129 | 130 | Compress video frames using zlib compression, as in scummvm-tools 131 | ``` 132 | nutcracker smush compress DATADIR/*.SAN 133 | ``` 134 | 135 | ## Text 136 | 137 | ### Extract and Inject script text 138 | 139 | Supported games: V5-V8, HE 140 | 141 | Extract all texts from game to text file using: 142 | ``` 143 | nutcracker sputm strings_extract --textfile strings.txt PATH/TO/GAME.000 144 | ``` 145 | *Replace `PATH/TO/GAME.000` to actual game index file (Usually ends with `.000`, `.LA0` or `.HE0`) 146 | 147 | Edit the text file using regular text editor. 148 | 149 | Inject the modified text in game resources using: 150 | ``` 151 | nutcracker sputm strings_inject --textfile strings.txt PATH/TO/GAME.000 152 | ``` 153 | 154 | ### Decompile game script 155 | 156 | Supported games: V5-V8, HE 157 | 158 | Decompile game scripts to script file with Windex-like syntax: 159 | 160 | ``` 161 | nutcracker sputm script decompile PATH/TO/GAME.000 162 | ``` 163 | *Replace `PATH/TO/GAME.000` to actual game index file (Usually ends with `.000`, `.LA0` or `.HE0`) 164 | 165 | 166 | ## Graphics 167 | 168 | ### Room background and object images 169 | 170 | Supported games: V5-V8, HE 171 | 172 | Extract room background and object images using: 173 | 174 | ``` 175 | nutcracker sputm room decode [--ega] PATH/TO/GAME.000 176 | ``` 177 | *Replace `PATH/TO/GAME.000` to actual game index file (Usually ends with `.000`, `.LA0` or `.HE0`) 178 | 179 | *Use the `--ega` if you wish to simulate EGA graphics on games with EGA backward compatibility mode, don't use it if you wish to modify the graphics for injecting modified graphics later 180 | 181 | Room backgrounds and Object images will be extracted as PNG images in `GAME/backgrounds` and `GAME/objects` respectively, where `GAME` is replaced with the name of the game. 182 | 183 | Modify the image files, it's recommended to use image editor without palette optimization, such as GraphicsGale. 184 | 185 | Create patch files for the modified images using: 186 | ``` 187 | nutcracker sputm room encode --ref PATH/TO/GAME.000 GAME 188 | ``` 189 | Rebuild game resources 190 | ``` 191 | nutcracker sputm build --ref PATH/TO/GAME.000 GAME 192 | ``` 193 | 194 | Source available at https://github.com/blooperz/nutcracker 195 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "nutcracker" 3 | version = "0.3.141591" 4 | description = "Tools for editing resources in SCUMM games." 5 | authors = ["BLooperZ "] 6 | license = "GPL-3.0-or-later" 7 | readme = "README.md" 8 | classifiers = [ 9 | "Development Status :: 4 - Beta", 10 | "Programming Language :: Python :: 3", 11 | "Environment :: Console", 12 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", 13 | "Intended Audience :: Developers", 14 | "Operating System :: OS Independent", 15 | "Topic :: Software Development :: Build Tools", 16 | "Topic :: Games/Entertainment", 17 | "Topic :: Utilities", 18 | ] 19 | keywords = ["scumm", "sputm", "smush", "lucasarts", "humongous"] 20 | repository = "https://github.com/blooperz/nutcracker" 21 | 22 | [tool.poetry.dependencies] 23 | python = ">=3.12" 24 | numpy = "^2.0.0" 25 | parse = "^1.20.1" 26 | Pillow = "^10.2.0.20240520" 27 | typer = "^0.12.3" 28 | PyYAML = "^6.0.1" 29 | deal = "^4.24.4" 30 | 31 | [tool.poetry.scripts] 32 | nutcracker = "nutcracker.runner:app" 33 | smush = "nutcracker.smush.runner:app" 34 | 35 | [tool.poetry.group.dev.dependencies] 36 | mypy = "^1.10.0" 37 | ruff = "^0.4.9" 38 | pytest = "^8.1.1" 39 | pyinstaller = { version = "^6.13.0", python = "<3.14" } 40 | pip-licenses = "^5.0.0" 41 | types-pillow = "^10.2.0.20240520" 42 | 43 | [build-system] 44 | requires = ["poetry-core>=1.0.0"] 45 | build-backend = "poetry.core.masonry.api" 46 | 47 | [tool.mypy] 48 | strict = true 49 | plugins = ['numpy.typing.mypy_plugin'] 50 | 51 | [tool.ruff] # https://beta.ruff.rs/docs/settings/ 52 | target-version = "py310" 53 | line-length = 88 54 | fix = false 55 | src = ["src", "tests"] 56 | 57 | [tool.ruff.lint] 58 | 59 | select = [ # https://beta.ruff.rs/docs/rules/ 60 | "BLE", # flake8-blind-except 61 | "FBT", # flake8-boolean-trap 62 | "A", # flake8-builtins 63 | "COM", # flake8-commas 64 | "DTZ", # flake8-datetimez 65 | "T10", # flake8-debugger 66 | # "DJ", # flake8-django 67 | # "EM", # flake8-errmsg 68 | "EXE", # flake8-executable 69 | "ISC", # flake8-implicit-str-concat 70 | "ICN", # flake8-import-conventions 71 | "G", # flake8-logging-format 72 | "INP", # flake8-no-pep420 73 | "PIE", # flake8-pie 74 | "T20", # flake8-print 75 | "PYI", # flake8-pyi 76 | "RSE", # flake8-raise 77 | "RET", # flake8-return 78 | "SLF", # flake8-self 79 | "ARG", # flake8-unused-arguments 80 | "PTH", # flake8-use-pathlib 81 | "ERA", # eradicate 82 | # "PD", # pandas-vet 83 | "PGH", # pygrep-hooks 84 | "PL", # Pylint 85 | "PLC", # Convention 86 | "PLE", # Error 87 | "PLR", # Refactor 88 | "PLW", # Warning 89 | "TRY", # tryceratops 90 | "NPY", # NumPy-specific rules 91 | "ANN", # flake8-annotations 92 | "YTT", # flake8-2020 93 | "C90", # mccabe 94 | "E", # pycodestyle 95 | "W", # pycodestyle 96 | "F", # Pyflakes 97 | "B", # flake8-bugbear 98 | "C4", # flake8-comprehensions 99 | "D", # pydocstyle 100 | "I", # isort 101 | "N", # pep8-naming 102 | "S", # flake8-bandit 103 | "SIM", # flake8-simplify 104 | "TCH", # flake8-type-checking 105 | "TID", # flake8-tidy-imports 106 | "Q", # flake8-quotes 107 | "UP", # pyupgrade 108 | "PT", # flake8-pytest-style 109 | "RUF", # Ruff-specific rules 110 | ] 111 | 112 | fixable = ["I", "COM"] 113 | unfixable = ["ERA001", "F401", "F841", "T201", "T203"] 114 | 115 | ignore = [ 116 | "RET504", # Allow assigning before return to ease debugging 117 | "S101", # Allow using assert 118 | "UP015", # Prefer explicitly configuring open mode 119 | "ANN101", "ANN102", # Do not require annotating self and cls 120 | "PTH103", # `os.mkdirs` is more readable 121 | "D107", # No need to docstring `__init__` method 122 | "D202", # No blank lines allowed after function docstring -> clashes with Black 123 | "ARG001", "ARG002", # Unused arguments are generally used for API compatibility 124 | "D", # TODO: Add docstrings 125 | "COM812", "ISC001", # conflicts with formatter 126 | "BLE001", # Expecting `Exception` is totally fine 127 | ] 128 | 129 | [tool.ruff.lint.pycodestyle] 130 | max-doc-length = 88 131 | 132 | [tool.ruff.lint.flake8-annotations] 133 | allow-star-arg-any = true 134 | 135 | [tool.ruff.lint.flake8-bugbear] 136 | # Allow default arguments like, e.g., `data: List[str] = fastapi.Query(None)`. 137 | extend-immutable-calls = ["typer.Option", "typer.Argument"] 138 | 139 | [tool.ruff.lint.flake8-boolean-trap] 140 | extend-allowed-calls = ["typer.Option"] 141 | 142 | [tool.ruff.lint.flake8-tidy-imports] 143 | ban-relative-imports = "parents" 144 | 145 | [tool.ruff.lint.flake8-type-checking] 146 | strict = true 147 | 148 | [tool.ruff.lint.flake8-unused-arguments] 149 | ignore-variadic-names = true 150 | 151 | [tool.ruff.lint.flake8-quotes] 152 | docstring-quotes = "double" 153 | inline-quotes = "single" 154 | multiline-quotes = "double" 155 | 156 | [tool.ruff.lint.pydocstyle] 157 | # Use Google-style docstrings. 158 | convention = "google" 159 | 160 | [tool.ruff.lint.per-file-ignores] 161 | "__init__.py" = ["D104"] 162 | "tests/**/*.py" = ["D100"] 163 | 164 | [tool.ruff.format] 165 | # Prefer single quotes over double quotes. 166 | quote-style = "single" 167 | -------------------------------------------------------------------------------- /src/nutcracker/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.3.141' 2 | -------------------------------------------------------------------------------- /src/nutcracker/chiper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/src/nutcracker/chiper/__init__.py -------------------------------------------------------------------------------- /src/nutcracker/chiper/xor.py: -------------------------------------------------------------------------------- 1 | from typing import IO 2 | 3 | CHIPER_KEY = 0x69 4 | 5 | 6 | def read(stream: IO[bytes], size: int | None = None, key: int = CHIPER_KEY) -> bytes: 7 | # None reads until EOF 8 | return bytes(b ^ key for b in stream.read(size)) # type: ignore[arg-type] 9 | 10 | 11 | def write(stream: IO[bytes], data: bytes, key: int = CHIPER_KEY) -> int: 12 | return stream.write(bytes(b ^ key for b in data)) 13 | 14 | 15 | if __name__ == '__main__': 16 | import argparse 17 | from functools import partial 18 | 19 | from nutcracker.utils import copyio 20 | 21 | parser = argparse.ArgumentParser(description='read smush file') 22 | parser.add_argument('filename', help='filename to read from') 23 | parser.add_argument('output', help='filename to read from') 24 | parser.add_argument('-c', '--chiper-key', default='0x69', type=str, help='xor key') 25 | args = parser.parse_args() 26 | 27 | with open(args.filename, 'rb') as infile, open(args.output, 'wb') as outfile: 28 | for buffer in copyio.buffered( 29 | partial(read, infile, key=int(args.chiper_key, 16)), 30 | ): 31 | outfile.write(buffer) 32 | -------------------------------------------------------------------------------- /src/nutcracker/codex/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/src/nutcracker/codex/__init__.py -------------------------------------------------------------------------------- /src/nutcracker/codex/base.py: -------------------------------------------------------------------------------- 1 | import struct 2 | from functools import partial 3 | from typing import IO 4 | 5 | UINT16LE = struct.Struct(' bytes: 9 | return structure.pack(len(data)) + data 10 | 11 | 12 | def unwrap(structure: struct.Struct, stream: IO[bytes]) -> bytes: 13 | return stream.read(structure.unpack(stream.read(structure.size))[0]) 14 | 15 | 16 | wrap_uint16le = partial(wrap, UINT16LE) 17 | unwrap_uint16le = partial(unwrap, UINT16LE) 18 | -------------------------------------------------------------------------------- /src/nutcracker/codex/bomp.py: -------------------------------------------------------------------------------- 1 | import io 2 | import itertools 3 | from collections.abc import Iterable, Iterator, Sequence 4 | 5 | import numpy as np 6 | 7 | from nutcracker.codex import base 8 | from nutcracker.kernel.buffer import BufferLike, UnexpectedBufferSize 9 | 10 | 11 | def iter_decode(src: BufferLike) -> Iterator[tuple[int, list[int]]]: 12 | with io.BytesIO(src) as stream: 13 | while stream.tell() < len(src): 14 | code = stream.read(1)[0] 15 | run_len = (code // 2) + 1 16 | run_line = ( 17 | stream.read(1) # * run_len 18 | if code & 1 19 | else stream.read(run_len) 20 | ) 21 | yield code, list(run_line) 22 | 23 | 24 | def decode_line( 25 | src: BufferLike, 26 | decoded_size: int | None = None, 27 | fill_value: bytes | None = None, 28 | ) -> bytes: 29 | buffer = bytearray() 30 | with io.BytesIO(src) as stream: 31 | while stream.tell() < len(src): 32 | if decoded_size and len(buffer) >= decoded_size: 33 | rest = stream.read() 34 | if rest not in { 35 | b'', 36 | b'\x00', 37 | b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', 38 | }: 39 | print(f'WARNING: {rest!r}', decoded_size) 40 | decoded_rest = decode_line(rest) 41 | print('WARNING:', decoded_rest, len(decoded_rest)) 42 | 43 | break 44 | 45 | code = stream.read(1)[0] 46 | run_len = (code // 2) + 1 47 | run_line = stream.read(1) * run_len if code & 1 else stream.read(run_len) 48 | buffer += run_line 49 | 50 | if decoded_size and len(buffer) != decoded_size: 51 | if len(buffer) < decoded_size and fill_value is not None: 52 | buffer += fill_value * (decoded_size - len(buffer)) 53 | else: 54 | raise UnexpectedBufferSize(decoded_size, len(buffer), buffer) 55 | 56 | return bytes(buffer) 57 | 58 | 59 | def decode_image( 60 | data: BufferLike, 61 | width: int, 62 | height: int, 63 | fill_value: bytes | None = None, 64 | ) -> Sequence[Sequence[int]]: 65 | with io.BytesIO(data) as stream: 66 | lines = [base.unwrap_uint16le(stream) for _ in range(height)] 67 | rest = stream.read() 68 | # \xff appears in DIG at AKOS_0102, same also missing bytes on last line which have to be filled 69 | assert rest in {b'', b'\0', b'\xff'}, rest 70 | 71 | buffer = [list(decode_line(line, width, fill_value)) for line in lines] 72 | return np.array(buffer, dtype=np.uint8) 73 | 74 | 75 | BUFFER_LIMIT = 128 76 | 77 | 78 | def compressed_group(buf: Sequence[int]) -> tuple[int, Sequence[int]]: 79 | return (2 * (len(buf) - 1) + 1, buf[:1]) 80 | 81 | 82 | def raw_group(buf: Sequence[int]) -> tuple[int, Sequence[int]]: 83 | return (2 * (len(buf) - 1), list(buf)) 84 | 85 | 86 | def encode_groups( 87 | groups: Iterable[Sequence[int]], 88 | buf: Sequence[int] = (), 89 | limit: int = 4, 90 | carry: bool = True, 91 | end_limit: int = 1, 92 | seps: bytes | None = None, 93 | ) -> Iterator[tuple[int, Sequence[int]]]: 94 | buf = list(buf) 95 | # print('GROUPS', [tuple(g) for g in groups]) 96 | groups = iter(groups) 97 | for group in groups: 98 | if len(set(buf)) == 1 and len(buf) > 1: 99 | yield compressed_group(buf) 100 | buf = [] 101 | 102 | if seps and bytes(buf) == seps: 103 | yield compressed_group(buf) 104 | if len(group) <= limit: 105 | yield raw_group(group) 106 | else: 107 | yield raw_group(group[:1]) 108 | yield compressed_group(group[1:]) 109 | buf = [] 110 | continue 111 | 112 | assert isinstance(buf, list) 113 | if len(group) < limit or len(buf) + limit > BUFFER_LIMIT: 114 | if seps and bytes(group) == seps: 115 | if buf: 116 | yield raw_group(buf) 117 | buf = group 118 | continue 119 | 120 | buf += group 121 | 122 | if len(buf) > BUFFER_LIMIT: 123 | yield raw_group(buf[:BUFFER_LIMIT]) 124 | buf = buf[BUFFER_LIMIT:] 125 | 126 | continue 127 | 128 | if buf: 129 | if carry: 130 | buf += group[:1] 131 | group = group[1:] 132 | yield raw_group(buf) 133 | buf = [] 134 | 135 | if len(group) > BUFFER_LIMIT: 136 | yield compressed_group(group[:BUFFER_LIMIT]) 137 | group = group[BUFFER_LIMIT:] 138 | assert not buf 139 | yield from encode_groups( 140 | [group, *groups], 141 | buf=(), 142 | limit=limit, 143 | carry=carry, 144 | end_limit=end_limit, 145 | seps=seps, 146 | ) 147 | else: 148 | # print('AAA 1', (2 * (len(group) - 1) + 1, group[:1])) 149 | if len(group) > 1 or set(group) == {0}: 150 | # print('AAA 1', (2 * (len(group) - 1) + 1, group[:1])) 151 | yield compressed_group(group) 152 | else: 153 | # print('AAA 2', (2 * (len(group) - 1), list(group))) 154 | yield raw_group(group) 155 | if buf: 156 | if len(set(buf)) == 1 and len(buf) > end_limit: 157 | yield compressed_group(buf) 158 | elif seps and bytes(buf) == seps: 159 | yield compressed_group(buf) 160 | else: 161 | yield raw_group(buf) 162 | 163 | 164 | def encode_image( 165 | bmap: Sequence[Sequence[int]], 166 | limit: int = 4, 167 | carry: bool = True, 168 | end_limit: int = 1, 169 | seps: bytes | None = None, 170 | ) -> bytes: 171 | buffer = bytearray() 172 | for line in bmap: 173 | grouped = [list(group) for c, group in itertools.groupby(line)] 174 | eg = list( 175 | encode_groups( 176 | grouped, 177 | buf=(), 178 | limit=limit, 179 | carry=carry, 180 | end_limit=end_limit, 181 | seps=seps, 182 | ), 183 | ) 184 | # print('ENCODED', eg) 185 | linedata = b''.join(bytes([ll, *g]) for ll, g in eg) 186 | buffer += base.wrap_uint16le(linedata) 187 | # if len(buffer) % 2: 188 | # buffer += b'\x00' 189 | return bytes(buffer) 190 | -------------------------------------------------------------------------------- /src/nutcracker/codex/bpp_codec.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from collections.abc import Sequence 3 | 4 | from nutcracker.utils.funcutils import flatten, grouper 5 | 6 | 7 | def decode_bpp_char( 8 | data: bytes, 9 | width: int, 10 | height: int, 11 | bpp: int = 1, 12 | ) -> Sequence[Sequence[int]]: 13 | assert width != 0 and height != 0 14 | # print([f'{x:08b}' for x in data]) 15 | bits = ''.join(f'{x:08b}' for x in data) 16 | gbits = grouper(bits, bpp) 17 | bmap = [int(''.join(next(gbits)), 2) for _ in range(height * width)] 18 | 19 | char = list(grouper(bmap, width)) 20 | 21 | left = [int(''.join(gb), 2) for gb in gbits] # why there is still data left? 22 | # print('DATA', data) 23 | # print('CHAR', char) 24 | print('LEFT', left, height, width) 25 | 26 | # char += list(grouper(left, width, fillvalue=0)) 27 | 28 | # char = list(grouper(bmap, width, fillvalue=0)) 29 | # encoded = encode_bpp_char(char, bpp=bpp) 30 | # assert encoded == data, (encoded, data) 31 | return char 32 | 33 | 34 | def encode_bpp_char(bmap: Sequence[Sequence[int]], bpp: int = 1) -> bytes: 35 | # height = len(bmap) 36 | # width = len(bmap[0]) 37 | fbits = flatten(''.join(f'{x:0{bpp}b}' for x in flatten(bmap))) 38 | bits = list(fbits) 39 | gbits = grouper(bits, 8, fillvalue='0') 40 | # gbits = list(gbits) 41 | # print([''.join(x) for x in gbits]) 42 | data = bytes(int(''.join(byte), 2) for byte in gbits) 43 | # assert len(data) == width * height * 8 // bpp 44 | print(len(bits) % 8) 45 | extra = b'\0' if len(bits) % 8 == 0 else b'' 46 | return data + extra 47 | -------------------------------------------------------------------------------- /src/nutcracker/codex/bpp_cost.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | from typing import IO, Any 3 | 4 | import numpy as np 5 | 6 | 7 | def decode1( 8 | width: int, 9 | height: int, 10 | num_colors: int, 11 | stream: IO[bytes], 12 | *, 13 | strict: bool = True, 14 | ) -> np.ndarray[Any, np.uint8]: 15 | masks = {16: (4, 0x0F), 32: (3, 0x07), 64: (2, 0x03)} 16 | shift, mask = masks[num_colors] 17 | 18 | out = bytearray() 19 | decoded_size = width * height 20 | 21 | try: 22 | while len(out) < decoded_size: 23 | rep = stream.read(1)[0] 24 | color = rep >> shift 25 | rep &= mask 26 | if rep == 0: 27 | rep = stream.read(1)[0] 28 | out += bytes([color]) * rep 29 | 30 | return np.frombuffer(out[:decoded_size], dtype=np.uint8).reshape( 31 | (height, width), 32 | order='F', 33 | ) 34 | 35 | except IndexError: 36 | if strict: 37 | raise 38 | out += b'\0' * (decoded_size - len(out)) 39 | return np.frombuffer(out[:decoded_size], dtype=np.uint8).reshape( 40 | (height, width), 41 | order='F', 42 | ) 43 | 44 | 45 | def encode1( 46 | image: np.ndarray[Any, np.uint8], 47 | num_colors: int, 48 | ) -> bytes: 49 | masks = {16: (4, 0x0F), 32: (3, 0x07), 64: (2, 0x03)} 50 | assert num_colors in masks, num_colors 51 | shift, mask = masks[num_colors] 52 | 53 | buffer = image.T.tobytes() 54 | output = bytearray() 55 | 56 | grouped = [list(group) for _, group in itertools.groupby(buffer)] 57 | for group in grouped: 58 | glen = len(group) 59 | value = group[0] 60 | if value >= num_colors: 61 | raise ValueError(f'Invalid color value: {value} >= {num_colors}') 62 | while glen > 255: 63 | output += bytes([value << shift, 255]) 64 | glen -= 255 65 | if glen > mask: 66 | output += bytes([value << shift, glen]) 67 | else: 68 | output += bytes([value << shift | glen]) 69 | return bytes(output) 70 | -------------------------------------------------------------------------------- /src/nutcracker/codex/codex.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from .codex1 import decode1, encode1 3 | from .codex37_np import decode37 as e_decode37 4 | from .codex37_np import fake_encode37 5 | from .codex47_np import decode47 as e_decode47 6 | from .codex47_np import fake_encode47 7 | from .nutfont import codec21, codec44, unidecoder 8 | 9 | # DECODE 10 | 11 | encode1 = encode1 12 | 13 | 14 | def decode47(width, height, f): 15 | return e_decode47(f, width, height) 16 | 17 | 18 | def decode37(width, height, f): 19 | return e_decode37(f, width, height) 20 | 21 | 22 | decoders = { 23 | 1: decode1, 24 | 21: unidecoder, 25 | 44: unidecoder, 26 | 47: decode47, 27 | 37: decode37, 28 | } 29 | 30 | 31 | def get_decoder(codec): 32 | if codec in decoders: 33 | return decoders[codec] 34 | return NotImplemented 35 | 36 | 37 | # ENCODE 38 | 39 | encoders = { 40 | 21: codec21, 41 | 44: codec44, 42 | 37: fake_encode37, 43 | 47: fake_encode47, 44 | } 45 | 46 | 47 | def get_encoder(codec): 48 | if codec in encoders: 49 | return encoders[codec] 50 | print(codec) 51 | return NotImplemented 52 | -------------------------------------------------------------------------------- /src/nutcracker/codex/codex1.py: -------------------------------------------------------------------------------- 1 | import io 2 | import itertools 3 | 4 | # import numpy as np 5 | from nutcracker.codex import base, bomp 6 | 7 | 8 | def encode1(bmap): 9 | return bomp.encode_image(bmap) 10 | 11 | 12 | PARAMS = [ 13 | # SAMNMAX/ROOMS-BOMP, COMI/ROOMS-BOMP, FT/ROOMS-BOMP, DIG/ROOMS-BOMP # FT/ICONS2.NUT 14 | (3, False, 1, None), 15 | # FT/ROOMS-BOMP(*) 16 | (3, False, 0, None), 17 | # FT/ICONS.NUT, FT/BENCUT.NUT, FT/BENSGOGG.NUT 18 | (4, True, 1, None), 19 | # MORTIMER/F_GATE_H.NUT 20 | (3, False, 0, b'\x00'), 21 | ] 22 | 23 | 24 | def decode1(width, height, f, verify=False): 25 | # BG = 39 26 | 27 | # print(mat) 28 | mat = bomp.decode_image(f, width, height) 29 | 30 | if verify: 31 | with io.BytesIO(f) as stream: 32 | lines = [base.unwrap_uint16le(stream) for _ in range(height)] 33 | print([list(bomp.iter_decode(line)) for line in lines]) 34 | 35 | print() 36 | 37 | g = [[list(group) for c, group in itertools.groupby(line)] for line in mat] 38 | 39 | encs = [] 40 | 41 | for limit, carry, end_limit, seps in PARAMS: 42 | encs.append( 43 | bomp.encode_image( 44 | mat, 45 | limit=limit, 46 | carry=carry, 47 | end_limit=end_limit, 48 | seps=seps, 49 | ), 50 | ) 51 | print( 52 | list( 53 | list( 54 | bomp.encode_groups( 55 | l, 56 | limit=limit, 57 | carry=carry, 58 | end_limit=end_limit, 59 | seps=seps, 60 | ), 61 | ) 62 | for l in g 63 | ), 64 | ) 65 | 66 | assert any(x == f[: len(x)] for x in encs), (encs, f) 67 | 68 | # mat = np.where(mat==0, BG, mat) 69 | return mat 70 | -------------------------------------------------------------------------------- /src/nutcracker/codex/nutfont.py: -------------------------------------------------------------------------------- 1 | import io 2 | import itertools 3 | 4 | from .base import UINT16LE, unwrap_uint16le, wrap_uint16le 5 | 6 | BG = 39 7 | 8 | 9 | def decode_line(line, width, bg): 10 | with io.BytesIO(line) as stream, io.BytesIO() as ostr: 11 | while ostr.tell() < width: 12 | off = UINT16LE.unpack(stream.read(2))[0] 13 | if ostr.tell() + off > width: 14 | break 15 | ostr.write(bytes([bg for _ in range(off)])) 16 | w = UINT16LE.unpack(stream.read(2))[0] + 1 17 | ostr.write(stream.read(w)) 18 | ostr.write(bytes([bg for _ in range(width - ostr.tell())])) 19 | return list(ostr.getvalue())[:width] 20 | 21 | 22 | def unidecoder(width, height, f): 23 | with io.BytesIO(f) as stream: 24 | lines = [ 25 | decode_line(unwrap_uint16le(stream), width, BG) for _ in range(height + 1) 26 | ][:height] 27 | tail = stream.read() 28 | assert tail in {b'', b'\00'}, tail 29 | return lines 30 | 31 | 32 | def join_segments(segments): 33 | return b''.join( 34 | UINT16LE.pack(off) + (UINT16LE.pack(len(lst) - 1) + lst if lst else b'') 35 | for off, lst in segments 36 | ) 37 | 38 | 39 | def split_segments_base(line, bg): 40 | off = 0 41 | for is_bg, group in itertools.groupby(line, key=lambda val: val == bg): 42 | lst = bytes(group) 43 | if not is_bg: 44 | yield off, lst 45 | off = len(lst) 46 | if is_bg: 47 | yield off, b'' 48 | 49 | 50 | def split_segments_44(line, bg): 51 | pos = 0 52 | width = len(line) 53 | for off, lst in split_segments_base(line, bg): 54 | pos += off + len(lst) 55 | yield off, lst + (b'' if pos < width else b'\x00') 56 | 57 | 58 | def encode_line_44(width, line, bg): 59 | assert width == len(line) 60 | return join_segments(split_segments_44(line, bg)) 61 | 62 | 63 | def codec44(width, height, out): 64 | assert height == len(out) 65 | buf = b''.join( 66 | wrap_uint16le(encode_line_44(width, line, BG)) 67 | for line in out + [[0 for _ in range(width)]] 68 | ) 69 | return buf + b'\x00' * (len(buf) % 2) 70 | 71 | 72 | def split_segments_21(line, bg): 73 | for off, lst in split_segments_base(line, bg): 74 | yield off + (0 if lst else 1), lst 75 | if lst: 76 | yield 1, b'' 77 | 78 | 79 | def encode_line_21(width, line, bg): 80 | assert width == len(line) 81 | return join_segments(split_segments_21(line, bg)) 82 | 83 | 84 | def codec21(width, height, out): 85 | assert height == len(out) 86 | buf = b''.join( 87 | wrap_uint16le(encode_line_21(width, line, BG)) 88 | for line in out + [[BG for _ in range(width)]] 89 | ) 90 | return buf + b'\x00' * (len(buf) % 2) 91 | -------------------------------------------------------------------------------- /src/nutcracker/codex/rle.py: -------------------------------------------------------------------------------- 1 | import io 2 | import itertools 3 | from collections.abc import Sequence 4 | 5 | from .base import unwrap_uint16le, wrap_uint16le 6 | 7 | 8 | def encode_lined_rle(bmap: Sequence[Sequence[int]]) -> bytes: 9 | with io.BytesIO() as stream: 10 | for line in bmap: 11 | if set(line) == {0}: 12 | stream.write(b'\00\00') 13 | continue 14 | 15 | grouped = [list(group) for _, group in itertools.groupby(line)] 16 | eg = list(encode_rle_groups(grouped)) 17 | # print('ENCODED', eg) 18 | stream.write( 19 | wrap_uint16le( 20 | b''.join(bytes([ll, *(() if ll & 1 else g)]) for ll, g in eg), 21 | ), 22 | ) 23 | return stream.getvalue() 24 | 25 | 26 | def decode_rle_group(line, width): 27 | out = [0 for _ in range(width)] 28 | currx = 0 29 | with io.BytesIO(line) as stream: 30 | while stream.tell() < len(line) and currx < width: 31 | code = ord(stream.read(1)) 32 | if code & 1: # skip count 33 | currx += code >> 1 34 | else: 35 | count = (code >> 2) + 1 36 | out[currx : currx + count] = ( 37 | stream.read(1) * count if code & 2 else stream.read(count) 38 | ) 39 | currx += count 40 | return out 41 | 42 | 43 | def decode_rle_group_gen(line, width): 44 | with io.BytesIO(line) as stream: 45 | while stream.tell() < len(line): 46 | code = ord(stream.read(1)) 47 | if code & 1: # skip count 48 | yield (code, [0] * (code >> 1)) 49 | else: 50 | count = (code >> 2) + 1 51 | yield ( 52 | code, 53 | list(stream.read(1) * count if code & 2 else stream.read(count)), 54 | ) 55 | 56 | 57 | def to_byte(num): 58 | return bytes([num]) 59 | 60 | 61 | def encode_rle_groups(groups, buf=()): 62 | buf = list(buf) 63 | groups = iter(groups) 64 | for group in groups: 65 | if set(group) == {0}: 66 | if buf: 67 | # if len(set(buf)) == 1: 68 | # yield (4 * (len(buf) - 1) + 2, buf[:1]) 69 | # else: 70 | yield (4 * (len(buf) - 1), list(buf)) 71 | buf = [] 72 | 73 | if len(group) > 127: 74 | yield (2 * 127 + 1, group[:1]) 75 | group = group[127:] 76 | assert not buf 77 | if group: 78 | yield (2 * 1 + 1, group[:1]) 79 | group = group[1:] 80 | if group: 81 | yield from encode_rle_groups([group, *groups]) 82 | 83 | elif group: 84 | yield (2 * len(group) + 1, group[:1]) 85 | else: 86 | raw = 1 + len(buf) + len(group) 87 | encoded = 1 + len(buf) + 2 88 | if raw < encoded or (raw == encoded and buf): 89 | buf += group 90 | 91 | if len(buf) > 64: 92 | yield (4 * (64 - 1), buf[:64]) 93 | buf = buf[64:] 94 | if len(set(buf)) == 1: 95 | yield from encode_rle_groups([buf, *groups]) 96 | buf = [] 97 | else: 98 | yield from encode_rle_groups(groups, buf=buf) 99 | 100 | else: 101 | if buf: 102 | yield (4 * (len(buf) - 1), list(buf)) 103 | buf = [] 104 | 105 | if len(group) > 64: 106 | yield (4 * (64 - 1) + 2, group[:1]) 107 | group = group[64:] 108 | assert not buf 109 | if len(group) == 1: 110 | yield (2, group) 111 | else: 112 | yield from encode_rle_groups([group, *groups]) 113 | else: 114 | yield (4 * (len(group) - 1) + 2, group[:1]) 115 | if buf: 116 | yield (4 * (len(buf) - 1), list(buf)) 117 | 118 | 119 | def decode_lined_rle(data, width, height, verify=True): 120 | with io.BytesIO(data) as stream: 121 | lines = [unwrap_uint16le(stream) for _ in range(height)] 122 | output = [decode_rle_group(line, width) for line in lines] 123 | output2 = [list(decode_rle_group_gen(line, width)) for line in lines] 124 | 125 | for ll, o in zip(lines, output2): 126 | g = [ 127 | list(group) 128 | for c, group in itertools.groupby(b''.join(bytes(oo) for _, oo in o)) 129 | ] 130 | e = [t for t in encode_rle_groups(g)] 131 | o = [(c, gl[:1]) if c & (1 | 2) else (c, gl) for c, gl in o] 132 | if e != o: 133 | print('================') 134 | print('ORIG', list(ll)) 135 | print('REGROUPED', g) 136 | print('OGROUPS', o) 137 | print('ENCODED', e) 138 | if verify: 139 | encoded = encode_lined_rle(output) 140 | 141 | with io.BytesIO(encoded) as stream: 142 | elines = [unwrap_uint16le(stream) for _ in range(height)] 143 | ex = False 144 | for idx, (ll, e) in enumerate(zip(lines, elines)): 145 | if not ll == e: 146 | print(idx) 147 | print('ORIGiNA', ll) 148 | print('ENCODED', e) 149 | ex = True 150 | if ex: 151 | print('ERROR: ENCODED DATA DOES NOT MATCH ORIGINAL') 152 | exit(1) 153 | 154 | assert encoded == data, (encoded, data) 155 | return output 156 | -------------------------------------------------------------------------------- /src/nutcracker/decode_san_audio.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import struct 4 | 5 | FLAG_UNSIGNED = 1 << 0 6 | FLAG_16BITS = 1 << 1 7 | FLAG_LITTLE_ENDIAN = 1 << 2 8 | 9 | 10 | def read_le_uint16(f): 11 | return struct.unpack(' nframes: 41 | raise ValueError('too many frames') 42 | yield frame 43 | 44 | 45 | if __name__ == '__main__': 46 | import argparse 47 | 48 | from nutcracker.smush import anim, smush 49 | 50 | parser = argparse.ArgumentParser(description='read smush file') 51 | parser.add_argument('filename', help='filename to read from') 52 | parser.add_argument('--target', '-t', help='target directory', default='sound') 53 | args = parser.parse_args() 54 | 55 | basename = os.path.basename(args.filename) 56 | output_dir = os.path.join(args.target, basename) 57 | os.makedirs(output_dir, exist_ok=True) 58 | print(f'Decoding file: {basename}') 59 | with open(args.filename, 'rb') as res: 60 | header, frames = anim.parse(res) 61 | 62 | for idx, frame in enumerate(frames): 63 | for _, (tag, chunk) in smush.print_chunks(frame, level=1): 64 | if tag == 'PSAD': 65 | ( 66 | track_id, 67 | index, 68 | max_frames, 69 | flags, 70 | vol, 71 | pan, 72 | chunk, 73 | frame_no, 74 | ) = handle_sound_frame(chunk, idx) 75 | fname = os.path.join(output_dir, f'PSAD_{track_id:04d}.SAD') 76 | mode = 'ab' if index != 0 else 'wb' 77 | with open(fname, mode) as aud: 78 | aud.write(chunk) 79 | else: 80 | continue 81 | -------------------------------------------------------------------------------- /src/nutcracker/earwax/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/src/nutcracker/earwax/__init__.py -------------------------------------------------------------------------------- /src/nutcracker/earwax/older.py: -------------------------------------------------------------------------------- 1 | import os 2 | import struct 3 | from pprint import pprint 4 | 5 | from nutcracker.earwax.resource import ( 6 | dump_resources, 7 | read_dir, 8 | read_inner_uint16le, 9 | read_room_names, 10 | ) 11 | from nutcracker.sputm.index import compare_pid_off, read_uint8le 12 | from nutcracker.utils.fileio import read_file 13 | from nutcracker.utils.funcutils import flatten 14 | 15 | from .preset import earwax 16 | 17 | UINT16LE = struct.Struct(' Iterator[tuple[int, tuple[int, int]]]: 20 | num = ord(stream.read(1)) 21 | rnums = list(stream.read(num)) 22 | offs = [UINT16LE.unpack(stream.read(UINT16LE.size))[0] for _ in range(num)] 23 | return enumerate(zip(rnums, offs, strict=True)) 24 | 25 | 26 | def read_block(stream): 27 | return stream.read(UINT16LE.unpack(stream.read(UINT16LE.size))[0] - UINT16LE.size) 28 | 29 | 30 | def write_block(data: bytes) -> bytes: 31 | return UINT16LE.pack(len(data) + UINT16LE.size) + data 32 | 33 | 34 | def dump_resources( 35 | root: Iterable[Element], 36 | basename: str, 37 | ) -> None: 38 | os.makedirs(basename, exist_ok=True) 39 | with open(os.path.join(basename, 'rpdump.xml'), 'w') as f: 40 | for disk in root: 41 | earwax.render(disk, stream=f) 42 | save_tree_data_only(earwax, disk, basedir=basename) 43 | 44 | 45 | def save_tree_data_only(cfg, element, basedir='.'): 46 | if not element: 47 | return 48 | path = os.path.join(basedir, element.attribs['path']) 49 | if element.children: 50 | os.makedirs(path, exist_ok=True) 51 | for c in element.children: 52 | save_tree_data_only(cfg, c, basedir=basedir) 53 | else: 54 | with open(path, 'wb') as f: 55 | # f.write(cfg.mktag(element.tag, element.data)) 56 | f.write(element.data) 57 | 58 | 59 | def mkblock(data: bytes) -> bytes: 60 | return UINT16LE.pack(len(data) + UINT16LE.size) + data 61 | 62 | 63 | def open_game_resource(filename: str, chiper_key: int = 0x00) -> Iterator[Element]: 64 | index = read_file(filename, key=chiper_key) 65 | 66 | basename = os.path.basename(filename) 67 | print(basename) 68 | 69 | if basename != '00.LFL': 70 | raise ValueError(basename) 71 | 72 | # print(index) 73 | 74 | with io.BytesIO(index) as stream: 75 | magic = UINT16LE.unpack(stream.read(UINT16LE.size))[0] 76 | if magic != 0x100: 77 | raise ValueError(f'bad magic: {magic}') 78 | 79 | num_objects = UINT16LE.unpack(stream.read(UINT16LE.size))[0] 80 | objects = [ 81 | UINT32LE.unpack(stream.read(UINT32LE.size))[0] for _ in range(num_objects) 82 | ] 83 | 84 | rooms = dict(read_dir(stream)) 85 | costumes = dict(read_dir(stream)) 86 | scripts = dict(read_dir(stream)) 87 | sounds = dict(read_dir(stream)) 88 | 89 | print(rooms) 90 | print(costumes) 91 | print(scripts) 92 | print(sounds) 93 | 94 | ind = defaultdict(list) 95 | for cost, (rm, off) in costumes.items(): 96 | ind[rm].append((off, cost, 'CO')) 97 | for scr, (rm, off) in scripts.items(): 98 | ind[rm].append((off, scr, 'SC')) 99 | for sou, (rm, off) in sounds.items(): 100 | ind[rm].append((off, sou, 'SO')) 101 | 102 | for rm_id, reses in ind.items(): 103 | print(rm_id, reses) 104 | 105 | room_pattern = '{room:02d}.LFL' 106 | for room_id, rm_info in rooms.items(): 107 | if not 0 < room_id < 99: 108 | continue 109 | fname = room_pattern.format(room=room_id) 110 | 111 | fullname = os.path.join(os.path.dirname(filename), fname) 112 | if not os.path.exists(fullname): 113 | print(f'warning: {fname} does not exist, {rm_info}') 114 | continue 115 | room_data = read_file(fullname, key=chiper_key) 116 | 117 | print(fname, rm_info) 118 | room = create_element( 119 | 0, 120 | earwax.mktag('LF', room_data), 121 | gid=room_id, 122 | path=f'LFv3_{room_id:04d}', 123 | ) 124 | with io.BytesIO(room_data) as stream: 125 | rm_block = read_block(stream) 126 | rm_elem = create_element( 127 | 0, 128 | earwax.mktag('RO', mkblock(rm_block)), 129 | path=os.path.join(room.attribs['path'], 'ROv3'), 130 | ) 131 | room.add_child(rm_elem) 132 | 133 | rm_elem = read_room(rm_elem) 134 | 135 | while stream.tell() < len(room_data): 136 | offset = stream.tell() 137 | idx = next( 138 | ( 139 | (off, bid, tag) 140 | for off, bid, tag in ind[room_id] 141 | if off == offset 142 | ), 143 | None, 144 | ) 145 | block = read_block(stream) 146 | 147 | if not idx: 148 | idx = (offset, None, 'UN') 149 | # print(offset, idx, block[:16], ind[room_id]) 150 | assert offset == idx[0], (offset, idx[0]) 151 | room.add_child( 152 | create_element( 153 | offset, 154 | earwax.mktag(idx[2], mkblock(block)), 155 | path=os.path.join( 156 | room.attribs['path'], 157 | f'{idx[2]}v3_{idx[1]:04d}' 158 | if idx[1] 159 | else f'UN_{offset:04x}', 160 | ), 161 | **({'gid': idx[1]} if idx[1] else {}), 162 | ), 163 | ) 164 | 165 | yield room 166 | 167 | 168 | if __name__ == '__main__': 169 | import argparse 170 | import glob 171 | 172 | parser = argparse.ArgumentParser(description='read smush file') 173 | parser.add_argument('files', nargs='+', help='files to read from') 174 | parser.add_argument('--chiper-key', default='0x00', type=str, help='xor key') 175 | args = parser.parse_args() 176 | 177 | files = set(flatten(glob.iglob(r) for r in args.files)) 178 | for filename in files: 179 | root = open_game_resource(filename, chiper_key=int(args.chiper_key, 16)) 180 | dump_resources(root, os.path.basename(os.path.dirname(filename))) 181 | -------------------------------------------------------------------------------- /src/nutcracker/earwax/preset.py: -------------------------------------------------------------------------------- 1 | from typing import cast 2 | 3 | import numpy as np 4 | 5 | from nutcracker.kernel2 import preset 6 | from nutcracker.kernel2.chunk import ChunkHeader, HeaderDType 7 | 8 | 9 | class OldSPUTMChunkHeader(ChunkHeader): 10 | dtype = cast( 11 | type[HeaderDType], 12 | np.dtype( 13 | [ 14 | ('size', '> 4, 'DITHER') 36 | output[y : y + run] = bytes( 37 | ((color & 0xF) if z & 1 else (color >> 4)) for z in range(run) 38 | ) 39 | y += run 40 | else: 41 | if run == 0: 42 | run = ord(s.read(1)) 43 | # print(run, 'COPY') 44 | for _ in range(run): 45 | output[y] = output[y - height] 46 | y += 1 47 | else: 48 | run = color >> 4 49 | if run == 0: 50 | run = ord(s.read(1)) 51 | # print(run, color & 0xF, 'NIBBLE') 52 | output[y : y + run] = bytes([color & 0xF] * run) 53 | y += run 54 | return np.asarray(output, dtype=np.uint8).reshape(strip_width, height).T 55 | 56 | 57 | def decode_smap(height: int, width: int, data: bytes) -> Sequence[Sequence[int]]: 58 | strip_width = 8 59 | 60 | if width == 0 or height == 0: 61 | return None 62 | 63 | num_strips = width // strip_width 64 | data = data[2:] 65 | 66 | with io.BytesIO(data) as s: 67 | offs = [(read_uint16les(s) - 2) for _ in range(num_strips)] 68 | index = list(zip(offs, offs[1:] + [len(data)])) 69 | 70 | strips = (data[offset:end] for offset, end in index) 71 | return np.hstack([parse_strip_ega(height, strip_width, data) for data in strips]) 72 | 73 | 74 | EGA_PALETTE = list(EGA.ravel()) + [59 for _ in range(0x300 - 0x30)] 75 | 76 | 77 | if __name__ == '__main__': 78 | import argparse 79 | 80 | from .preset import earwax 81 | 82 | parser = argparse.ArgumentParser(description='read smush file') 83 | parser.add_argument('files', nargs='+', help='files to read from') 84 | parser.add_argument('--chiper-key', default='0xFF', type=str, help='xor key') 85 | args = parser.parse_args() 86 | 87 | files = set(flatten(glob.iglob(r) for r in args.files)) 88 | for filename in files: 89 | rnam = {} 90 | root = open_game_resource(filename, chiper_key=int(args.chiper_key, 16)) 91 | 92 | basename = pathlib.Path(os.path.basename(os.path.dirname(filename))) 93 | os.makedirs(basename / 'backgrounds', exist_ok=True) 94 | os.makedirs(basename / 'objects', exist_ok=True) 95 | 96 | for room in root: 97 | room_id = room.attribs['gid'] 98 | ro = room.children[0] 99 | assert ro.tag == 'RO', ro.tag 100 | hd = earwax.find('HD', ro) 101 | width = read_uint16le(hd.data, 0) 102 | height = read_uint16le(hd.data, 2) 103 | 104 | im = earwax.find('IM', ro) 105 | bgim = decode_smap(height, width, im.data) 106 | imx = convert_to_pil_image(bgim) 107 | imx.putpalette(EGA_PALETTE) 108 | imx.save(basename / 'backgrounds' / f'room_{room_id:02d}.png') 109 | # assert np.array_equal(decode_smap(height, width, im.data[:2] + encode_smap(bgim)), bgim) 110 | 111 | for oi in earwax.findall('OI', ro): 112 | for oc in earwax.findall('OC', ro): 113 | obj_id = oi.attribs['gid'] 114 | if oc.attribs['gid'] == obj_id: 115 | assert read_uint16le(oc.data, 4) == obj_id, ( 116 | read_uint16le(oc.data, 4), 117 | obj_id, 118 | ) 119 | width = oc.data[9] * 8 120 | height = oc.data[15] & 0xF8 121 | oiim = decode_smap(height, width, oi.data) 122 | imx = convert_to_pil_image(oiim) 123 | imx.putpalette(EGA_PALETTE) 124 | imx.save( 125 | basename 126 | / 'objects' 127 | / f'room_{room_id:02d}_object_{obj_id:04d}.png', 128 | ) 129 | -------------------------------------------------------------------------------- /src/nutcracker/graphics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/src/nutcracker/graphics/__init__.py -------------------------------------------------------------------------------- /src/nutcracker/graphics/frame.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from collections.abc import Iterator, Sequence 3 | from operator import attrgetter 4 | 5 | from nutcracker.graphics.grid import get_bg_color 6 | from nutcracker.graphics.image import ( 7 | ImagePosition, 8 | Matrix, 9 | TImage, 10 | convert_to_pil_image, 11 | ) 12 | 13 | BGS = [b'\05', b'\04'] 14 | 15 | 16 | def resize_pil_image( 17 | w: int, 18 | h: int, 19 | bg: int, 20 | im: TImage, 21 | loc: ImagePosition, 22 | ) -> TImage: 23 | nbase = convert_to_pil_image([[bg] * w] * h) 24 | # nbase.paste(im, box=itemgetter('x1', 'y1', 'x2', 'y2')(loc)) 25 | nbase.paste(im, box=attrgetter('x1', 'y1')(loc)) 26 | return nbase 27 | 28 | 29 | def save_frame_image( 30 | frames: Sequence[tuple[ImagePosition, TImage]], 31 | ) -> Iterator[TImage]: 32 | rlocs, frames = zip(*frames) 33 | im_frames = (convert_to_pil_image(frame) for frame in frames) 34 | 35 | get_bg = get_bg_color(1, lambda idx: idx + int(idx), bgs=BGS) 36 | 37 | locs = list(rlocs) 38 | for idx, loc in enumerate(locs): 39 | print(f"FRAME {idx} - x1: {loc['x1']}, x2: {loc['x2']}") 40 | 41 | w = max(loc['x1'] + loc['x2'] for loc in locs) 42 | h = max(loc['y1'] + loc['y2'] for loc in locs) 43 | 44 | w = next(loc['x1'] + loc['x2'] for loc in locs) 45 | h = next(loc['y1'] + loc['y2'] for loc in locs) 46 | print((w, h)) 47 | 48 | yield from ( 49 | resize_pil_image(w, h, get_bg(idx), frame, loc) 50 | for idx, (frame, loc) in enumerate(zip(im_frames, locs)) 51 | ) 52 | 53 | 54 | def save_single_frame_image( 55 | frame: tuple[ImagePosition, Matrix], 56 | resize: tuple[int, int] | None = None, 57 | ) -> TImage: 58 | loc, frame_data = frame 59 | if not resize: 60 | return convert_to_pil_image(frame_data) 61 | 62 | idx = 0 63 | get_bg = get_bg_color(1, lambda idx: idx + int(idx), bgs=BGS) 64 | 65 | w, h = resize 66 | 67 | # w = loc['x1'] + loc['x2'] 68 | # h = loc['y1'] + loc['y2'] 69 | 70 | # w = 320 71 | # h = 200 72 | 73 | return resize_pil_image(w, h, get_bg(idx), frame_data, loc) 74 | -------------------------------------------------------------------------------- /src/nutcracker/graphics/grid.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | from collections.abc import Callable, Iterable, Iterator, Sequence 3 | from typing import ( 4 | TypeVar, 5 | cast, 6 | ) 7 | 8 | import numpy as np 9 | from PIL import Image 10 | 11 | from nutcracker.graphics.image import ImagePosition, TImage, convert_to_pil_image 12 | from nutcracker.utils import funcutils 13 | 14 | T = TypeVar('T') 15 | 16 | 17 | BGS = [b'0', b'n'] 18 | BASE_XOFF = 8 19 | BASE_YOFF = 8 20 | TILE_W = 48 + BASE_XOFF 21 | TILE_H = 48 + BASE_YOFF 22 | GRID_SIZE = 16 23 | 24 | 25 | def get_bg_color( 26 | row_size: int, 27 | f: Callable[[int], int], 28 | bgs: Sequence[bytes] = BGS, 29 | ) -> Callable[[int], int]: 30 | def get_bg(idx: int) -> int: 31 | return ord(bgs[f(idx) % len(bgs)]) 32 | 33 | return get_bg 34 | 35 | 36 | def read_image_grid( 37 | filename: str, 38 | grid_size: int = GRID_SIZE, 39 | ) -> Iterator[TImage]: 40 | bim = Image.open(filename) 41 | 42 | w = bim.width // grid_size 43 | h = bim.height // grid_size 44 | 45 | for row in range(grid_size): 46 | for col in range(grid_size): 47 | area = (col * w, row * h, (col + 1) * w, (row + 1) * h) 48 | yield bim.crop(area) 49 | 50 | 51 | def checkered_grid( 52 | nchars: int, 53 | w: int = TILE_W, 54 | h: int = TILE_H, 55 | grid_size: int = GRID_SIZE, 56 | transparency: int = 0, 57 | bgs: Sequence[bytes] = BGS, 58 | ) -> TImage: 59 | assert nchars <= grid_size**2, nchars 60 | 61 | bim = convert_to_pil_image([[transparency] * w * grid_size] * h * grid_size) 62 | get_bg = get_bg_color(grid_size, lambda idx: idx + int(idx / grid_size), bgs=bgs) 63 | 64 | # nchars does not have to match real number of characters nor max. index 65 | for i in range(nchars): 66 | ph = convert_to_pil_image([[get_bg(i)] * w] * h) 67 | bim.paste(ph, box=((i % grid_size) * w, int(i / grid_size) * h)) 68 | 69 | return bim 70 | 71 | 72 | def create_char_grid( 73 | nchars: int, 74 | chars: Iterable[tuple[int, tuple[int, int, TImage]]], 75 | w: int = TILE_W, 76 | h: int = TILE_H, 77 | grid_size: int = GRID_SIZE, 78 | base_xoff: int = BASE_XOFF, 79 | base_yoff: int = BASE_YOFF, 80 | transparency: int = 0, 81 | bgs: Sequence[bytes] = BGS, 82 | ) -> TImage: 83 | bim = checkered_grid( 84 | nchars, 85 | w=w, 86 | h=h, 87 | grid_size=grid_size, 88 | transparency=transparency, 89 | bgs=bgs, 90 | ) 91 | 92 | # idx is character index in ascii table 93 | for idx, (xoff, yoff, im) in chars: 94 | assert idx < nchars 95 | xbase = (idx % grid_size) * w + base_xoff 96 | ybase = (idx // grid_size) * h + base_yoff 97 | bim.paste(im, box=(xbase + xoff, ybase + yoff)) 98 | 99 | return bim 100 | 101 | 102 | def count_in_row(pred: Callable[[T], bool], row: Iterable[T]) -> int: 103 | return sum(1 for _ in itertools.takewhile(pred, row)) 104 | 105 | 106 | def resize_frame( 107 | im: TImage, 108 | base_xoff: int = BASE_XOFF, 109 | base_yoff: int = BASE_YOFF, 110 | ) -> tuple[ImagePosition, np.ndarray] | None: 111 | frame = list(np.asarray(im)) 112 | BG = cast(int, frame[-1][-1]) 113 | 114 | def char_is_bg(c: int) -> bool: 115 | return c == BG 116 | 117 | def line_is_bg(line: list[int]) -> bool: 118 | return all(char_is_bg(c) for c in line) 119 | 120 | if set(funcutils.flatten(frame)) == {BG}: 121 | return None 122 | 123 | x1 = min(count_in_row(char_is_bg, line) for line in frame) 124 | x2 = len(frame[0]) - min(count_in_row(char_is_bg, reversed(line)) for line in frame) 125 | y1 = count_in_row(line_is_bg, frame) 126 | y2 = len(frame) - count_in_row(line_is_bg, reversed(frame)) 127 | 128 | crop_area = (x1, y1, x2, y2) 129 | 130 | if crop_area == (0, 0, len(frame[0]), len(frame)): 131 | return None 132 | 133 | loc = ImagePosition( 134 | x1=x1 - base_xoff, 135 | y1=y1 - base_yoff, 136 | x2=x2 - base_xoff, 137 | y2=y2 - base_yoff, 138 | ) 139 | 140 | return loc, np.asarray(im.crop(crop_area)) 141 | -------------------------------------------------------------------------------- /src/nutcracker/graphics/image.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Sequence 2 | from dataclasses import dataclass 3 | 4 | import numpy as np 5 | from PIL import Image 6 | 7 | Origin = tuple[int, int] 8 | Box = tuple[int, int, int, int] 9 | Matrix = Sequence[Sequence[int]] 10 | 11 | 12 | TImage = Image.Image 13 | 14 | @dataclass 15 | class ImagePosition: 16 | x1: int = 0 17 | y1: int = 0 18 | x2: int = 0 19 | y2: int = 0 20 | 21 | 22 | def convert_to_pil_image( 23 | char: Sequence[Sequence[int]], 24 | size: tuple[int, int] | None = None, 25 | ) -> TImage: 26 | # print('CHAR:', char) 27 | npp = np.array(list(char), dtype=np.uint8) 28 | if size: 29 | width, height = size 30 | npp.resize(height, width) 31 | im = Image.fromarray(npp, mode='P') 32 | return im 33 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/src/nutcracker/kernel/__init__.py -------------------------------------------------------------------------------- /src/nutcracker/kernel/align.py: -------------------------------------------------------------------------------- 1 | import deal 2 | 3 | from .buffer import BufferLike, splice 4 | 5 | 6 | class NonZeroPaddingError(ValueError): 7 | def __init__(self, pad: BufferLike) -> None: 8 | super().__init__(f'non-zero padding between chunks: {str(pad)}') 9 | self.pad = pad 10 | 11 | 12 | @deal.chain( 13 | deal.raises(NonZeroPaddingError), 14 | deal.reason(NonZeroPaddingError, lambda _: _.pad and set(_.pad) != {0}), 15 | deal.has(), 16 | ) 17 | def assert_zero(pad: bytes) -> bytes: 18 | if pad and set(pad) != {0}: 19 | raise NonZeroPaddingError(pad) 20 | return pad 21 | 22 | 23 | @deal.chain( 24 | deal.pre(lambda _: _.align >= 1), 25 | deal.pre(lambda _: _.offset >= 0), 26 | deal.ensure(lambda _: 0 <= _.result < _.align), 27 | deal.ensure(lambda _: (_.offset + _.result) % _.align == 0), 28 | deal.pure, 29 | ) 30 | def calc_align(offset: int, align: int) -> int: 31 | """Calculate difference from given offset to next aligned offset.""" 32 | return (align - offset) % align 33 | 34 | 35 | @deal.chain( 36 | deal.pre(lambda _: _.align >= 1), 37 | deal.pre(lambda _: 0 <= _.pos <= len(_.buffer)), 38 | deal.ensure(lambda _: _.pos <= _.result < _.pos + _.align), 39 | deal.ensure(lambda _: _.result % _.align == 0), 40 | deal.raises(NonZeroPaddingError), 41 | deal.reason( 42 | NonZeroPaddingError, 43 | lambda _: set(splice(_.buffer, _.pos, calc_align(_.pos, _.align))) != {0}, 44 | ), 45 | deal.has(), 46 | ) 47 | def align_read(buffer: BufferLike, pos: int, align: int = 1) -> int: 48 | """Align given read stream to next aligned position. 49 | Verify padding between chunks is zero. 50 | """ 51 | pad = assert_zero(splice(buffer, pos, calc_align(pos, align))) 52 | return pos + len(pad) 53 | 54 | 55 | @deal.chain( 56 | deal.pre(lambda _: _.align >= 1), 57 | deal.ensure(lambda _: _.result.startswith(_.buffer)), 58 | deal.ensure( 59 | lambda _: len(_.buffer) % _.align == 0 or set(_.result[len(_.buffer) :]) == {0}, 60 | ), 61 | deal.ensure(lambda _: len(_.buffer) <= len(_.result) < len(_.buffer) + _.align), 62 | deal.ensure(lambda _: len(_.result) % _.align == 0), 63 | deal.pure, 64 | ) 65 | def align_write(buffer: BufferLike, align: int = 1) -> bytes: 66 | """Align given write stream to next aligned position. 67 | Pad skipped bytes with zero. 68 | """ 69 | pos = len(buffer) 70 | return bytes(buffer) + bytes(calc_align(pos, align)) 71 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/buffer.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | import deal 4 | 5 | BufferLike = bytes | bytearray | memoryview 6 | 7 | 8 | class UnexpectedBufferSize(EOFError): 9 | def __init__(self, expected: int, given: int, buffer: BufferLike) -> None: 10 | super().__init__(f'Expected buffer of size {expected} but got size {given}') 11 | self.expected = expected 12 | self.given = given 13 | self.buffer = buffer 14 | 15 | 16 | class NegativeSliceError(ValueError): 17 | def __init__(self, offset: int, size: int) -> None: 18 | super().__init__( 19 | f'Expected non-negative slice values, got offset={offset} size={size}', 20 | ) 21 | self.size = size 22 | self.offset = offset 23 | 24 | 25 | @deal.chain( 26 | deal.pre(lambda _: _.size >= 0), 27 | deal.raises(UnexpectedBufferSize), 28 | deal.reason(UnexpectedBufferSize, lambda _: _.size != len(_.buffer)), 29 | ) 30 | def validate_buffer_size(buffer: BufferLike, size: int | None = None) -> BufferLike: 31 | if size and len(buffer) != size: 32 | raise UnexpectedBufferSize(size, len(buffer), buffer) 33 | return buffer 34 | 35 | 36 | @deal.chain( 37 | deal.pre(lambda _: _.size >= 0), 38 | deal.pre(lambda _: 0 <= _.offset <= len(_.buffer)), 39 | deal.raises(UnexpectedBufferSize), 40 | deal.reason(UnexpectedBufferSize, lambda _: _.offset + _.size > len(_.buffer)), 41 | deal.has(), 42 | ) 43 | def splice(buffer: BufferLike, offset: int, size: int) -> BufferLike: 44 | return validate_buffer_size(buffer[offset : offset + size], size) 45 | 46 | 47 | @dataclass(frozen=True) 48 | class Splicer: 49 | offset: int 50 | size: int 51 | 52 | def __call__(self, buffer: BufferLike) -> BufferLike: 53 | return splice(buffer, self.offset, self.size) 54 | 55 | def __post_init__(self) -> None: 56 | if self.offset < 0 or self.size < 0: 57 | raise NegativeSliceError(self.offset, self.size) 58 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/chunk.py: -------------------------------------------------------------------------------- 1 | import builtins 2 | from collections.abc import Iterator, Sequence 3 | from dataclasses import dataclass 4 | from functools import cached_property 5 | from typing import IO, NamedTuple, Protocol, overload 6 | 7 | from .buffer import BufferLike, Splicer, splice 8 | from .structured import Structured 9 | 10 | 11 | class ChunkHeader(NamedTuple): 12 | etag: bytes 13 | size: int 14 | 15 | 16 | @dataclass(frozen=True) 17 | class Chunk: 18 | tag: str 19 | buffer: BufferLike 20 | slice: Splicer 21 | 22 | @cached_property 23 | def data(self) -> bytes: 24 | return bytes(self.slice(self.buffer)) 25 | 26 | def __len__(self) -> int: 27 | return len(self.buffer) 28 | 29 | def __bytes__(self) -> bytes: 30 | return bytes(self.buffer) 31 | 32 | def __iter__(self) -> Iterator[str | bytes]: 33 | return iter((self.tag, self.data)) 34 | 35 | @overload 36 | def __getitem__(self, index: int) -> str | bytes: ... 37 | 38 | @overload 39 | def __getitem__(self, index: builtins.slice) -> Sequence[str | bytes]: ... 40 | 41 | def __getitem__( 42 | self, 43 | index: builtins.slice | int, 44 | ) -> Sequence[str | bytes] | str | bytes: 45 | return tuple(self)[index] 46 | 47 | def __repr__(self) -> str: 48 | return f'Chunk<{self.tag}>[{len(self)}]' 49 | 50 | 51 | class ChunkFactory(Protocol): 52 | def untag(self, buffer: BufferLike, offset: int = 0) -> Chunk: ... 53 | 54 | def mktag(self, tag: str, data: bytes) -> bytes: ... 55 | 56 | 57 | @dataclass(frozen=True) 58 | class _StructuredChunkHeader(Structured[ChunkHeader]): 59 | _struct: Structured[ChunkHeader] 60 | 61 | @property 62 | def size(self) -> int: 63 | return self._struct.size 64 | 65 | def unpack(self, stream: IO[bytes]) -> ChunkHeader: 66 | return self.unpack_from(stream.read(self.size)) 67 | 68 | def unpack_from(self, buffer: BufferLike, offset: int = 0) -> ChunkHeader: 69 | return self._struct.unpack_from(buffer, offset) 70 | 71 | def pack(self, data: ChunkHeader) -> bytes: 72 | return self._struct.pack(data) 73 | 74 | 75 | @dataclass(frozen=True) 76 | class StructuredChunk(ChunkFactory, _StructuredChunkHeader): 77 | def untag(self, buffer: BufferLike, offset: int = 0) -> Chunk: 78 | header = self.unpack_from(buffer, offset) 79 | splicer = Splicer(self.size, header.size - self.size) 80 | return Chunk( 81 | header.etag.decode('ascii'), 82 | splice(buffer, offset, header.size), 83 | splicer, 84 | ) 85 | 86 | def mktag(self, tag: str, data: bytes) -> bytes: 87 | return self.pack(ChunkHeader(tag.encode('ascii'), len(data) + self.size)) + data 88 | 89 | 90 | NULL_TAG = b'_' 91 | 92 | 93 | @dataclass(frozen=True) 94 | class SizeFixedChunk(StructuredChunk): 95 | size_fix: int = 0 96 | 97 | def unpack_from(self, buffer: BufferLike, offset: int = 0) -> ChunkHeader: 98 | etag, size = self._struct.unpack_from(buffer, offset) 99 | if set(etag) == {0}: 100 | return ChunkHeader(NULL_TAG, len(buffer) - offset) 101 | return ChunkHeader(etag, size + self.size_fix) 102 | 103 | def pack(self, data: ChunkHeader) -> bytes: 104 | if data.etag == NULL_TAG: 105 | return self._struct.pack(ChunkHeader(b'', 0)) 106 | return self._struct.pack(ChunkHeader(data.etag, data.size - self.size_fix)) 107 | 108 | 109 | @dataclass(frozen=True) 110 | class OldSputmChunk(StructuredChunk): 111 | size_fix: int = 0 112 | 113 | def unpack_from(self, buffer: BufferLike, offset: int = 0) -> ChunkHeader: 114 | etag, size = self._struct.unpack_from(buffer, offset) 115 | if set(etag) == {0}: 116 | etag = NULL_TAG 117 | return ChunkHeader(etag, size + self.size_fix) 118 | 119 | def pack(self, data: ChunkHeader) -> bytes: 120 | etag = data.etag 121 | if etag == NULL_TAG: 122 | etag = b'\0\0' 123 | return self._struct.pack(ChunkHeader(etag, data.size - self.size_fix)) 124 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/element.py: -------------------------------------------------------------------------------- 1 | from collections import Counter 2 | from collections.abc import Iterable, Iterator, Sequence 3 | from dataclasses import dataclass, field, replace 4 | from typing import Any, Union 5 | 6 | from .chunk import Chunk 7 | 8 | ElementTree = Union[Iterator['Element'], 'Element', None] 9 | 10 | 11 | @dataclass 12 | class Element: 13 | """Indexing metadata for chunk containers 14 | 15 | chunk: Chunk 16 | 17 | attribs: helper attributes 18 | 19 | children: Contained elements 20 | """ 21 | 22 | chunk: Chunk 23 | attribs: dict[str, Any] 24 | children: Sequence['Element'] 25 | 26 | _data: bytes | None = field(default=None, repr=False, init=False) 27 | 28 | @property 29 | def tag(self) -> str: 30 | return self.chunk.tag 31 | 32 | @property 33 | def data(self) -> bytes: 34 | if self._data is None: 35 | return self.chunk.data 36 | return self._data 37 | 38 | @data.setter 39 | def data(self, value: bytes) -> None: 40 | self._data = value 41 | 42 | def __iter__(self) -> Iterator['Element']: 43 | return iter(self.children) 44 | 45 | def content(self, children: Iterable['Element']) -> 'Element': 46 | return replace(self, children=list(children)) 47 | 48 | def __repr__(self) -> str: 49 | attribs = ' '.join(f'{key}={val}' for key, val in self.attribs.items()) 50 | children = ','.join(_format_children(self, max_show=4)) 51 | return f'Element<{self.tag}>[{attribs}, children={{{children}}}]' 52 | 53 | 54 | def _format_children( 55 | root: Iterable[Element], 56 | max_show: int | None = None, 57 | ) -> Iterator[str]: 58 | counts = Counter(child.tag for child in root) 59 | for idx, (tag, count) in enumerate(counts.items()): 60 | if not (max_show is None or idx < max_show): 61 | yield '...' 62 | return 63 | yield f'{tag}*{count}' if count > 1 else tag 64 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/index.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from collections.abc import Callable, Iterator 4 | from contextlib import contextmanager 5 | from dataclasses import replace 6 | from typing import Any 7 | 8 | from .chunk import Chunk 9 | from .element import Element 10 | from .resource import read_chunks 11 | from .settings import _IndexSetting 12 | 13 | 14 | class MissingSchemaKey(Exception): 15 | def __init__(self, tag: str) -> None: 16 | super().__init__(f'Missing key in schema: {tag}') 17 | self.tag = tag 18 | 19 | 20 | class MissingSchemaEntry(Exception): 21 | def __init__(self, ptag: str, tag: str) -> None: 22 | super().__init__(f'Missing entry for {tag} in {ptag} schema') 23 | self.ptag = ptag 24 | self.tag = tag 25 | 26 | 27 | @contextmanager 28 | def exception_ptag_context(ptag: str | None) -> Iterator[None]: 29 | try: 30 | yield 31 | except Exception as exc: 32 | if not hasattr(exc, 'ptag'): 33 | exc.ptag = ptag # type: ignore 34 | raise exc 35 | 36 | 37 | def check_schema(cfg: _IndexSetting, ptag: str | None, tag: str) -> None: 38 | try: 39 | if ptag and tag not in cfg.schema[ptag]: 40 | raise MissingSchemaEntry(ptag, tag) 41 | if tag not in cfg.schema: 42 | raise MissingSchemaKey(tag) 43 | except (MissingSchemaKey, MissingSchemaEntry) as exc: 44 | if cfg.strict: 45 | raise exc 46 | else: 47 | cfg.logger.warning(exc) 48 | 49 | 50 | def create_element(offset: int, chunk: Chunk, **attrs: Any) -> Element: 51 | return Element(chunk, {'offset': offset, 'size': len(chunk.data), **attrs}, []) 52 | 53 | 54 | def map_chunks( 55 | cfg: _IndexSetting, 56 | data: bytes, 57 | parent: Element | None = None, 58 | level: int = 0, 59 | extra: Callable[[Element | None, Chunk, int], dict[str, Any]] | None = None, 60 | offset: int = 0, 61 | ) -> Iterator[Element]: 62 | ptag = parent.tag if parent else None 63 | if cfg.max_depth and level >= cfg.max_depth: 64 | return 65 | if parent and not cfg.schema.get(parent.tag): 66 | return 67 | data = memoryview(data) 68 | with exception_ptag_context(ptag): 69 | for offset, chunk in read_chunks(cfg, data, offset=offset): 70 | check_schema(cfg, ptag, chunk.tag) 71 | 72 | elem = create_element( 73 | offset, 74 | chunk, 75 | **(extra(parent, chunk, offset) if extra else {}), 76 | ) 77 | yield elem.content( 78 | map_chunks( 79 | cfg, 80 | chunk.slice(chunk.buffer), 81 | parent=elem, 82 | level=level + 1, 83 | extra=extra, 84 | ), 85 | ) 86 | 87 | 88 | def generate_schema(cfg: _IndexSetting, data: bytes) -> dict[str, set[str]]: 89 | EMPTY: frozenset[str] = frozenset() 90 | DUMMY: frozenset[str] = frozenset(('__DUMMY__',)) 91 | 92 | schema: dict[str, frozenset[str]] = {} 93 | 94 | # TODO: check if partial iterations are possible 95 | while True: 96 | cfg = replace(cfg, schema=schema, strict=True) 97 | try: 98 | # generate schema for 1 level deeper 99 | for _ in map_chunks(cfg, data, level=-1): 100 | pass 101 | return {ptag: set(tags) for ptag, tags in schema.items() if tags != DUMMY} 102 | except MissingSchemaKey as miss: 103 | schema[miss.tag] = DUMMY # creates new copy 104 | except MissingSchemaEntry as miss: 105 | schema[miss.ptag] -= DUMMY 106 | schema[miss.ptag] |= {miss.tag} 107 | except Exception as exc: 108 | # pylint: disable=no-member 109 | assert hasattr(exc, 'ptag') 110 | if schema.get(exc.ptag) == EMPTY: # type: ignore 111 | raise ValueError( 112 | 'Cannot create schema for given file with given configuration', 113 | ) 114 | schema[exc.ptag] = EMPTY # type: ignore 115 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/iterchunk.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Iterable, Iterator 2 | 3 | from .chunk import Chunk 4 | 5 | 6 | def assert_tag(target: str, chunk: Chunk) -> bytes: 7 | """Return chunk data if chunk has target 4CC tag.""" 8 | if chunk.tag != target: 9 | raise ValueError(f'expected tag to be {target} but got {chunk.tag}') 10 | return chunk.data 11 | 12 | 13 | def print_chunks( 14 | chunks: Iterable[tuple[int, Chunk]], 15 | level: int = 0, 16 | base: int = 0, 17 | ) -> Iterator[tuple[int, Chunk]]: 18 | indent = ' ' * level 19 | for offset, chunk in chunks: 20 | print(f'{indent}{base + offset} {chunk.tag} {len(chunk.data)}') 21 | yield base + offset, chunk 22 | 23 | 24 | def drop_offsets(chunks: Iterable[tuple[int, Chunk]]) -> Iterator[Chunk]: 25 | """Drop offset from each (offset, chunk) tuple in given iterator""" 26 | return (chunk for _, chunk in chunks) 27 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/preset.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, replace 2 | from typing import Any, TypeVar 3 | 4 | from . import iterchunk, settings, tree 5 | 6 | _SettingT = TypeVar('_SettingT', bound='_DefaultOverride') 7 | 8 | 9 | @dataclass(frozen=True) 10 | class _DefaultOverride: 11 | def __call__(self: _SettingT, **kwargs: Any) -> _SettingT: 12 | return replace(self, **kwargs) 13 | 14 | 15 | @dataclass(frozen=True) 16 | class _ChunkPreset(settings._ChunkSetting, _DefaultOverride): 17 | # static pass through 18 | assert_tag = staticmethod(iterchunk.assert_tag) 19 | drop_offsets = staticmethod(iterchunk.drop_offsets) 20 | print_chunks = staticmethod(iterchunk.print_chunks) 21 | 22 | # isort: off 23 | from .resource import ( 24 | read_chunks, 25 | write_chunks, 26 | ) 27 | 28 | # isort: on 29 | 30 | 31 | @dataclass(frozen=True) 32 | class _ShellPreset(settings._IndexSetting, _ChunkPreset): 33 | # static pass through 34 | find = staticmethod(tree.find) 35 | findall = staticmethod(tree.findall) 36 | findpath = staticmethod(tree.findpath) 37 | render = staticmethod(tree.render) 38 | 39 | # isort: off 40 | from .index import ( 41 | map_chunks, 42 | generate_schema, 43 | ) 44 | 45 | # isort: on 46 | 47 | 48 | preset = _ChunkPreset() 49 | shell = _ShellPreset() 50 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/resource.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from collections.abc import Iterable, Iterator 3 | 4 | from .align import align_read, align_write 5 | from .buffer import BufferLike 6 | from .chunk import Chunk 7 | from .settings import _ChunkSetting 8 | 9 | 10 | def read_chunks( 11 | cfg: _ChunkSetting, 12 | buffer: BufferLike, 13 | offset: int = 0, 14 | ) -> Iterator[tuple[int, Chunk]]: 15 | """Read all chunks from given bytes.""" 16 | data = memoryview(buffer) 17 | max_size = len(data) 18 | while offset < max_size: 19 | offset = workaround_x80(cfg, buffer, offset) 20 | chunk = cfg.untag(data, offset) 21 | yield offset, chunk 22 | offset = align_read(data, offset + len(chunk), align=cfg.align) 23 | assert offset == max_size 24 | 25 | 26 | def workaround_x80(cfg: _ChunkSetting, buffer: BufferLike, offset: int = 0) -> int: 27 | """WORKAROUND: in Pajama Sam 2, some DIGI chunks are off by 1. 28 | header appears as '\\x80DIG' and index indicate they should start 1 byte afterwards. 29 | since header tag needs to be ASCII, it's low risk. 30 | """ 31 | if cfg.skip_byte is not None and buffer[offset] == cfg.skip_byte: 32 | getattr(cfg, 'logger', logging).warning( 33 | f'found \\x{cfg.skip_byte:02x} between chunks, skipping 1 byte...', 34 | ) 35 | return offset + 1 36 | return offset 37 | 38 | 39 | def write_chunks(cfg: _ChunkSetting, chunks: Iterable[bytes | Chunk]) -> bytes: 40 | """Write chunks sequence to bytes with given data alignment.""" 41 | stream = bytearray() 42 | for chunk in chunks: 43 | assert chunk 44 | stream += align_write(bytes(chunk), align=cfg.align) 45 | return bytes(stream) 46 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/runner.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from dataclasses import replace 4 | from pprint import pprint 5 | 6 | import yaml 7 | 8 | from nutcracker.kernel.chunk import Chunk 9 | from nutcracker.kernel.element import Element 10 | from nutcracker.utils.fileio import read_file 11 | 12 | from . import index, settings, tree 13 | from .chunk import SizeFixedChunk 14 | 15 | HEX_BASE = 16 16 | 17 | if __name__ == '__main__': 18 | parser = argparse.ArgumentParser(description='read smush file') 19 | parser.add_argument('filename', help='filename to read from') 20 | parser.add_argument('--size-fix', default=0, type=int, help='header size fix') 21 | parser.add_argument('--align', default=1, type=int, help='alignment between chunks') 22 | parser.add_argument('--schema', type=str, help='load saved schema from file') 23 | parser.add_argument('--chiper-key', default='0x00', type=str, help='xor key') 24 | parser.add_argument('--max-depth', default=None, type=int, help='max depth') 25 | parser.add_argument('--schema-dump', type=str, help='save schema to file') 26 | args = parser.parse_args() 27 | 28 | cfg = settings._IndexSetting( 29 | chunk=SizeFixedChunk( 30 | settings.IFF_CHUNK_HEADER, 31 | size_fix=args.size_fix, 32 | ), 33 | align=args.align, 34 | max_depth=args.max_depth, 35 | ) 36 | 37 | schema = None 38 | if args.schema: 39 | with open(args.schema, 'r') as schema_in: 40 | schema = yaml.safe_load(schema_in) 41 | 42 | data = read_file(args.filename, key=int(args.chiper_key, HEX_BASE)) 43 | 44 | schema = schema or index.generate_schema(cfg, data) 45 | 46 | pprint(schema) 47 | 48 | if args.schema_dump: 49 | with open(args.schema_dump, 'w') as schema_out: 50 | yaml.dump(schema, schema_out) 51 | 52 | def update_element_path( 53 | parent: Element | None, 54 | chunk: Chunk, 55 | offset: int, 56 | ) -> dict[str, str]: 57 | dirname = parent.attribs['path'] if parent else '' 58 | return {'path': os.path.join(dirname, chunk.tag)} 59 | 60 | root = index.map_chunks( 61 | replace(cfg, schema=schema), 62 | data, 63 | extra=update_element_path, 64 | ) 65 | for elem in root: 66 | tree.render(elem) 67 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/settings.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from collections.abc import Mapping 3 | from dataclasses import dataclass, field 4 | from operator import attrgetter 5 | from struct import Struct 6 | 7 | from .buffer import BufferLike 8 | from .chunk import Chunk, ChunkFactory, ChunkHeader, OldSputmChunk, SizeFixedChunk 9 | from .structured import StructuredTuple 10 | 11 | SCUMM_CHUNK_HEADER = StructuredTuple(('size', 'etag'), Struct('4sI'), ChunkHeader) 13 | 14 | SCUMM_CHUNK = OldSputmChunk(SCUMM_CHUNK_HEADER) 15 | IFF_CHUNK_IN = SizeFixedChunk(IFF_CHUNK_HEADER) 16 | IFF_CHUNK_EX = SizeFixedChunk(IFF_CHUNK_HEADER, size_fix=IFF_CHUNK_HEADER.size) 17 | 18 | 19 | @dataclass(frozen=True) 20 | class _ChunkSetting(ChunkFactory): 21 | """Setting for resource chunks 22 | 23 | align: int (default 2) - 24 | data alignment for chunk start offsets. 25 | 26 | chunk: stream <-> Chunk (default IFF_CHUNK_EX) - 27 | factory to read/write chunk header 28 | """ 29 | 30 | align: int = 2 31 | chunk: ChunkFactory = IFF_CHUNK_EX 32 | skip_byte: int | None = None 33 | logger: logging.Logger = logging.root 34 | 35 | def untag(self, buffer: BufferLike, offset: int = 0) -> Chunk: 36 | """Read chunk from given buffer.""" 37 | chunk = self.chunk.untag(buffer, offset=offset) 38 | if self.chunk.mktag(chunk.tag, chunk.data) != bytes(chunk): 39 | self.logger.warning(f'Possible mismatch when re-encoding {chunk}') 40 | return chunk 41 | 42 | def mktag(self, tag: str, data: bytes) -> bytes: 43 | """Create chunk bytes from given tag and data.""" 44 | buffer = self.chunk.mktag(tag, data) 45 | assert attrgetter('tag', 'data')(self.chunk.untag(buffer)) == (tag, data) 46 | return buffer 47 | 48 | 49 | @dataclass(frozen=True) 50 | class _IndexSetting(_ChunkSetting): 51 | """Setting for indexing chunk resources 52 | 53 | contains all fields from _ChunkSetting, and the following: 54 | 55 | schema: mapping of containers tags to set of child tags 56 | 57 | strict: if set to True, throws error on schema mismatch, otherwise log warning 58 | 59 | max_depth: limit levels of container chunks to index, None for unlimited 60 | """ 61 | 62 | schema: Mapping[str, set[str]] = field(default_factory=dict) 63 | strict: bool = False 64 | max_depth: int | None = None 65 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/structured.py: -------------------------------------------------------------------------------- 1 | import struct 2 | from collections.abc import Callable, Sequence 3 | from dataclasses import dataclass 4 | from typing import IO, Generic, Protocol, TypeVar, cast 5 | 6 | T_Struct = TypeVar('T_Struct') 7 | 8 | 9 | class Structured(Protocol[T_Struct]): 10 | @property 11 | def size(self) -> int: ... 12 | 13 | def unpack(self, stream: IO[bytes]) -> T_Struct: ... 14 | 15 | def unpack_from(self, data: bytes, offset: int = 0) -> T_Struct: ... 16 | 17 | def pack(self, data: T_Struct) -> bytes: ... 18 | 19 | 20 | @dataclass(frozen=True) 21 | class StructuredTuple(Structured, Generic[T_Struct]): 22 | _fields: Sequence[str] 23 | _structure: struct.Struct 24 | _factory: Callable[..., T_Struct] 25 | 26 | @property 27 | def size(self) -> int: 28 | return self._structure.size 29 | 30 | def unpack(self, stream: IO[bytes]) -> T_Struct: 31 | return self.unpack_from(stream.read(self._structure.size)) 32 | 33 | def unpack_from(self, data: bytes, offset: int = 0) -> T_Struct: 34 | factory = cast(Callable[..., T_Struct], self._factory) 35 | values = self._structure.unpack_from(data, offset=offset) 36 | return factory(**dict(zip(self._fields, values))) 37 | 38 | def pack(self, data: T_Struct) -> bytes: 39 | return self._structure.pack(*[getattr(data, field) for field in self._fields]) 40 | -------------------------------------------------------------------------------- /src/nutcracker/kernel/tree.py: -------------------------------------------------------------------------------- 1 | import io 2 | import os 3 | import sys 4 | from collections.abc import Iterator 5 | from typing import IO 6 | 7 | from parse import parse 8 | 9 | from .element import Element, ElementTree 10 | 11 | 12 | def findall(tag: str, root: ElementTree) -> Iterator[Element]: 13 | if not root: 14 | return 15 | for elem in root: 16 | if parse(tag, elem.tag, evaluate_result=False): 17 | yield elem 18 | 19 | 20 | def find(tag: str, root: ElementTree) -> Element | None: 21 | return next(findall(tag, root), None) 22 | 23 | 24 | def findpath(path: str, root: Element | None) -> Element | None: 25 | path = os.path.normpath(path) 26 | if not path or path == '.': 27 | return root 28 | dirname, basename = os.path.split(path) 29 | return find(basename, findpath(dirname, root)) 30 | 31 | 32 | def render( 33 | element: Element | None, 34 | level: int = 0, 35 | stream: IO[str] = sys.stdout, 36 | ) -> None: 37 | if not element: 38 | return 39 | attribs = ''.join( 40 | f' {key}="{value}"' 41 | for key, value in element.attribs.items() 42 | if value is not None 43 | ) 44 | indent = ' ' * level 45 | closing = '' if element.children else ' /' 46 | print(f'{indent}<{element.tag}{attribs}{closing}>', file=stream) 47 | if element.children: 48 | for elem in element.children: 49 | render(elem, level=level + 1, stream=stream) 50 | print(f'{indent}', file=stream) 51 | 52 | 53 | def renders(element: Element | None) -> str: 54 | with io.StringIO() as stream: 55 | render(element, stream=stream) 56 | return stream.getvalue() 57 | -------------------------------------------------------------------------------- /src/nutcracker/kernel2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/src/nutcracker/kernel2/__init__.py -------------------------------------------------------------------------------- /src/nutcracker/kernel2/element.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from collections import Counter, defaultdict 3 | from collections.abc import Callable, Iterable, Iterator 4 | from contextlib import contextmanager 5 | from dataclasses import dataclass, field 6 | from typing import ( 7 | Any, 8 | ) 9 | 10 | from nutcracker.kernel2.chunk import ( 11 | ArrayBuffer, 12 | Chunk, 13 | ChunkSettings, 14 | mktag, 15 | read_chunks, 16 | write_chunks, 17 | ) 18 | 19 | 20 | class Element: 21 | __slots__ = ('cfg', 'chunk', 'attribs', 'parent', '_children', '_data') 22 | 23 | _children: list['Element'] | None 24 | _data: bytes | None 25 | 26 | def __init__( 27 | self, 28 | cfg: 'IndexerSettings', 29 | chunk: Chunk, 30 | attribs: dict[str, Any] | None = None, 31 | parent: 'Element | None' = None, 32 | ) -> None: 33 | self.cfg = cfg 34 | self.chunk = chunk 35 | self.attribs = attribs or {} 36 | self.parent = parent 37 | self._children = None 38 | self._data = None 39 | 40 | def update_children(self, children: Iterable['Element']) -> None: 41 | children = list(children) 42 | # children should have been mapped already to avoid index offset issues 43 | assert self._children is not None 44 | self._children = children 45 | self.update_raw( 46 | write_chunks( 47 | self.cfg, 48 | (mktag(self.cfg, child.tag, child.data) for child in self.children()), 49 | ) 50 | ) 51 | 52 | def update_raw(self, value: bytes) -> None: 53 | self._data = value 54 | 55 | def children(self) -> Iterator['Element']: 56 | schema = self.cfg.schema.get(self.tag) 57 | if self._children is None: 58 | if not schema: 59 | return 60 | self._children = list(map_chunks(self.cfg, self.data, parent=self)) 61 | yield from self._children 62 | 63 | def add_child(self, child: 'Element') -> None: 64 | if self._children is None: 65 | self._children = list(self.children()) 66 | self._children.append(child) 67 | 68 | @property 69 | def tag(self) -> str: 70 | return self.chunk.tag 71 | 72 | @property 73 | def data(self) -> ArrayBuffer: 74 | if self._data is None: 75 | return self.chunk.data 76 | return memoryview(self._data) 77 | 78 | def __repr__(self) -> str: 79 | attribs = ' '.join(f'{key}={val}' for key, val in self.attribs.items()) 80 | children = ','.join(_format_children(self.children(), max_show=4)) 81 | return f'Element<{self.tag}>[{attribs}, children={{{children}}}]' 82 | 83 | 84 | def _format_children( 85 | root: Iterable[Element], 86 | max_show: int | None = None, 87 | ) -> Iterator[str]: 88 | counts = Counter(str(child.tag) for child in root) 89 | for idx, (tag, count) in enumerate(counts.items()): 90 | if not (max_show is None or idx < max_show): 91 | yield '...' 92 | return 93 | yield f'{tag}*{count}' if count > 1 else tag 94 | 95 | 96 | class MissingSchemaKeyError(Exception): 97 | def __init__(self, tag: str) -> None: 98 | super().__init__(f'Missing key in schema: {tag}') 99 | self.tag = tag 100 | 101 | 102 | class MissingSchemaEntryError(Exception): 103 | def __init__(self, tag: str, child_tag: str) -> None: 104 | super().__init__(f'Missing entry for {tag} in {child_tag} schema') 105 | self.tag = tag 106 | self.child_tag = child_tag 107 | 108 | 109 | @contextmanager 110 | def schema_check(cfg: 'IndexerSettings') -> Iterator[None]: 111 | try: 112 | yield 113 | except (MissingSchemaKeyError, MissingSchemaEntryError) as exc: 114 | if cfg.errors == 'strict': 115 | raise 116 | getattr(cfg, 'logger', logging).warning(exc) 117 | 118 | 119 | def map_chunks( 120 | cfg: 'IndexerSettings', 121 | buffer: ArrayBuffer, 122 | *, 123 | parent: Element | None = None, 124 | offset: int = 0, 125 | ) -> Iterator[Element]: 126 | for coffset, chunk in read_chunks(cfg, buffer, offset): 127 | elem = Element( 128 | cfg, 129 | chunk, 130 | { 131 | 'offset': coffset, 132 | 'size': len(chunk.data), 133 | **(cfg.extra(parent, chunk, coffset) if cfg.extra else {}), 134 | }, 135 | ) 136 | with schema_check(cfg): 137 | if elem.tag not in cfg.schema: 138 | raise MissingSchemaKeyError(elem.tag) 139 | if parent and elem.tag not in cfg.schema[parent.tag]: 140 | raise MissingSchemaEntryError(parent.tag, elem.tag) 141 | yield elem 142 | 143 | 144 | ExtraFunc = Callable[[Element | None, Chunk, int], dict[str, Any]] 145 | 146 | 147 | @dataclass(frozen=True) 148 | class IndexerSettings(ChunkSettings): 149 | schema: dict[str, set[str]] = field(default_factory=dict) 150 | errors: str = 'strict' 151 | extra: ExtraFunc | None = None 152 | 153 | 154 | def generate_schema( 155 | cfg: ChunkSettings, 156 | buffer: ArrayBuffer, 157 | parent_tag: str | None = None, 158 | schema: dict[str, set[str]] | None = None, 159 | ) -> dict[str, set[str]]: 160 | if schema is None: 161 | schema = defaultdict(set) 162 | 163 | for _, chunk in read_chunks(cfg, buffer): 164 | tag = chunk.tag 165 | 166 | if parent_tag is not None: 167 | schema[parent_tag].add(tag) 168 | 169 | if tag in schema and not schema[tag]: 170 | continue 171 | 172 | try: 173 | generate_schema(cfg, chunk.data, tag, schema) 174 | except Exception: 175 | schema[tag] = set() 176 | 177 | return dict(schema) 178 | -------------------------------------------------------------------------------- /src/nutcracker/kernel2/fileio.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | from collections.abc import Iterator 3 | from contextlib import AbstractContextManager, contextmanager 4 | from types import TracebackType 5 | from typing import cast, overload 6 | 7 | import numpy as np 8 | from numpy.typing import ArrayLike 9 | 10 | 11 | class ResourceFile(AbstractContextManager[memoryview]): 12 | __slots__ = ('buffer', 'closed') 13 | 14 | def __init__(self, buffer: ArrayLike) -> None: 15 | self.buffer = memoryview(buffer) # type: ignore[arg-type] 16 | self.closed = False 17 | 18 | def __len__(self) -> int: 19 | return len(self.buffer) 20 | 21 | def __buffer__(self, _flags: int) -> memoryview: 22 | return self.buffer 23 | 24 | def __exit__( 25 | self, 26 | __exc_type: type[BaseException] | None, 27 | __exc_value: BaseException | None, 28 | __traceback: TracebackType | None, 29 | ) -> bool | None: 30 | self.close() 31 | return None 32 | 33 | @overload 34 | def __getitem__(self, index: slice) -> ArrayLike: ... 35 | @overload 36 | def __getitem__(self, index: int) -> int: ... 37 | def __getitem__(self, index: slice | int) -> ArrayLike | int: 38 | if not self.closed: 39 | return self.buffer[index] 40 | raise OSError('I/O operation on closed file') # noqa: TRY003 41 | 42 | @classmethod 43 | @contextmanager 44 | def load(cls, file_path: str, key: int = 0x00) -> Iterator[memoryview]: 45 | data = np.memmap(file_path, dtype='u1', mode='r') 46 | 47 | # if key == 0x00: 48 | # yield cast(memoryview, cls(data)) 49 | # return 50 | 51 | with tempfile.TemporaryFile() as tmp: 52 | result = np.memmap(tmp, dtype='u1', mode='w+', shape=data.shape) 53 | result[:] = data ^ key 54 | result.flush() 55 | del result 56 | with cls(np.memmap(tmp, dtype='u1', mode='r', shape=data.shape)) as f: 57 | yield f 58 | 59 | def close(self) -> None: 60 | self.closed = True 61 | 62 | 63 | def read_file(file_path: str, key: int = 0x00) -> bytes: 64 | with ResourceFile.load(file_path, key=key) as res: 65 | return bytes(res) 66 | -------------------------------------------------------------------------------- /src/nutcracker/kernel2/preset.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, replace 2 | from typing import Any, Self 3 | 4 | from nutcracker.kernel2 import tree 5 | from nutcracker.kernel2.chunk import ( 6 | IFFChunkHeader, 7 | assert_tag, 8 | mktag, 9 | read_chunks, 10 | untag, 11 | write_chunks, 12 | ) 13 | from nutcracker.kernel2.element import ( 14 | IndexerSettings, 15 | generate_schema, 16 | map_chunks, 17 | ) 18 | 19 | 20 | @dataclass(frozen=True) 21 | class _DefaultOverride: 22 | def __call__(self, **kwargs: Any) -> Self: 23 | return replace(self, **kwargs) 24 | 25 | 26 | @dataclass(frozen=True) 27 | class Preset(IndexerSettings, _DefaultOverride): 28 | read_chunks = read_chunks 29 | write_chunks = write_chunks 30 | map_chunks = map_chunks 31 | generate_schema = generate_schema 32 | mktag = mktag 33 | untag = untag 34 | 35 | # static pass through 36 | find = staticmethod(tree.find) 37 | findall = staticmethod(tree.findall) 38 | findpath = staticmethod(tree.findpath) 39 | render = staticmethod(tree.render) 40 | renders = staticmethod(tree.renders) 41 | assert_tag = staticmethod(assert_tag) 42 | 43 | 44 | shell = Preset( 45 | header_dtype=IFFChunkHeader, 46 | alignment=2, 47 | inclheader=True, 48 | skip_byte=None, 49 | ) 50 | -------------------------------------------------------------------------------- /src/nutcracker/kernel2/tree.py: -------------------------------------------------------------------------------- 1 | import io 2 | import os 3 | import sys 4 | from collections.abc import Iterable, Iterator 5 | from typing import IO 6 | 7 | from parse import parse # type: ignore[import-untyped] 8 | 9 | from nutcracker.kernel2.element import Element 10 | 11 | 12 | def findall(tag: str, root: Iterable[Element] | Element | None) -> Iterator[Element]: 13 | if not root: 14 | return 15 | if isinstance(root, Element): 16 | root = root.children() 17 | for elem in root: 18 | if parse(tag, elem.tag, evaluate_result=False): 19 | yield elem 20 | 21 | 22 | def find(tag: str, root: Iterable[Element] | Element | None) -> Element | None: 23 | return next(findall(tag, root), None) 24 | 25 | 26 | def findpath( 27 | path: str, root: Iterable[Element] | Element | None 28 | ) -> Iterable[Element] | Element | None: 29 | path = os.path.normpath(path) 30 | if not path or path == '.': 31 | return root 32 | dirname, basename = os.path.split(path) 33 | return find(basename, findpath(dirname, root)) 34 | 35 | 36 | def render( 37 | element: Element, 38 | level: int = 0, 39 | stream: IO[str] = sys.stdout, 40 | ) -> None: 41 | attribs = ''.join( 42 | f' {key}="{value}"' 43 | for key, value in element.attribs.items() 44 | if value is not None 45 | ) 46 | indent = ' ' * level 47 | children = list(element.children()) 48 | closing = '' if children else ' /' 49 | print(f'{indent}<{element.tag}{attribs}{closing}>', file=stream) 50 | if children: 51 | for elem in children: 52 | render(elem, level=level + 1, stream=stream) 53 | print(f'{indent}', file=stream) 54 | 55 | 56 | def renders(element: Element) -> str: 57 | with io.StringIO() as stream: 58 | render(element, stream=stream) 59 | return stream.getvalue() 60 | -------------------------------------------------------------------------------- /src/nutcracker/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/src/nutcracker/py.typed -------------------------------------------------------------------------------- /src/nutcracker/runner.py: -------------------------------------------------------------------------------- 1 | import typer 2 | 3 | from nutcracker.smush import runner as smush 4 | from nutcracker.sputm import runner as sputm 5 | 6 | app = typer.Typer() 7 | app.add_typer(smush.app, name='smush') 8 | app.add_typer(sputm.app, name='sputm') 9 | 10 | if __name__ == '__main__': 11 | app() 12 | -------------------------------------------------------------------------------- /src/nutcracker/smush/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/src/nutcracker/smush/__init__.py -------------------------------------------------------------------------------- /src/nutcracker/smush/ahdr.py: -------------------------------------------------------------------------------- 1 | import io 2 | import struct 3 | from dataclasses import dataclass, replace 4 | 5 | from nutcracker.kernel.structured import StructuredTuple 6 | 7 | PALETTE_SIZE = 0x300 8 | 9 | 10 | @dataclass(frozen=True) 11 | class AnimationHeaderV2: 12 | framerate: int | None = None 13 | maxframe: int | None = None 14 | samplerate: int | None = None 15 | dummy2: int | None = None 16 | dummy3: int | None = None 17 | 18 | NO_AHDR_V2 = AnimationHeaderV2() 19 | 20 | @dataclass(frozen=True) 21 | class AnimationHeader: 22 | version: int 23 | nframes: int 24 | dummy: int 25 | palette: bytes 26 | v2: AnimationHeaderV2 = NO_AHDR_V2 27 | 28 | 29 | AHDR_V1 = StructuredTuple( 30 | ('version', 'nframes', 'dummy', 'palette'), 31 | struct.Struct(f'<3H{PALETTE_SIZE}s'), 32 | AnimationHeader, 33 | ) 34 | 35 | AHDR_V2 = StructuredTuple( 36 | ('framerate', 'maxframe', 'samplerate', 'dummy2', 'dummy3'), 37 | struct.Struct('<5I'), 38 | AnimationHeaderV2, 39 | ) 40 | 41 | 42 | def from_bytes(data: bytes) -> AnimationHeader: 43 | with io.BytesIO(data) as stream: 44 | header = AHDR_V1.unpack(stream) 45 | if header.version == 2: 46 | header = replace(header, v2=AHDR_V2.unpack(stream)) 47 | if stream.read(): 48 | raise ValueError('got extra trailing data') 49 | if header.v2.dummy2 or header.v2.dummy3: 50 | raise ValueError('non-zero value in header dummies') 51 | return header 52 | 53 | 54 | def to_bytes(header: AnimationHeader) -> bytes: 55 | optional_part = AHDR_V2.pack(header.v2) if header.version == 2 else b'' 56 | return AHDR_V1.pack(header) + optional_part 57 | -------------------------------------------------------------------------------- /src/nutcracker/smush/anim.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | from collections.abc import Iterable, Iterator 3 | from typing import Any, NamedTuple 4 | 5 | from nutcracker.kernel2.chunk import ArrayBuffer, Chunk 6 | from nutcracker.kernel2.element import Element 7 | from nutcracker.kernel2.fileio import ResourceFile 8 | from nutcracker.smush import ahdr 9 | from nutcracker.smush.element import read_data, read_elements 10 | from nutcracker.smush.preset import smush 11 | 12 | 13 | class SmushAnimation(NamedTuple): 14 | header: ahdr.AnimationHeader 15 | frames: Iterator[Element] 16 | 17 | 18 | def verify_nframes(frames: Iterator[Element], nframes: int) -> Iterator[Element]: 19 | for idx, frame in enumerate(frames): 20 | if nframes and idx > nframes - 1: 21 | raise ValueError('too many frames') 22 | yield frame 23 | 24 | 25 | def verify_maxframe( 26 | frames: Iterator[Element], 27 | limit: int | None, 28 | ) -> Iterator[Element]: 29 | maxframe = 0 30 | for elem in frames: 31 | maxframe = max(elem.attribs['size'], maxframe) 32 | yield elem 33 | if limit and maxframe > limit: 34 | raise ValueError(f'expected maxframe of {limit} but got {maxframe}') 35 | 36 | 37 | def parse(root: Element) -> SmushAnimation: 38 | anim = read_elements('ANIM', root) 39 | header = ahdr.from_bytes(read_data('AHDR', next(anim))) 40 | 41 | frames = verify_nframes(verify_maxframe(anim, header.v2.maxframe), header.nframes) 42 | 43 | return SmushAnimation(header, frames) 44 | 45 | 46 | def compose(header: ahdr.AnimationHeader, frames: Iterable[Chunk]) -> bytes: 47 | bheader = smush.mktag('AHDR', memoryview(ahdr.to_bytes(header))) 48 | return bytes( 49 | smush.mktag( 50 | 'ANIM', memoryview(smush.write_chunks(itertools.chain([bheader], frames))) 51 | ), 52 | ) 53 | 54 | 55 | def from_bytes(resource: ArrayBuffer) -> Element: 56 | it = itertools.count() 57 | 58 | def set_frame_id( 59 | parent: Element | None, 60 | chunk: Chunk, 61 | offset: int, 62 | ) -> dict[str, Any]: 63 | if chunk.tag != 'FRME': 64 | return {} 65 | return {'id': next(it)} 66 | 67 | return next(smush(extra=set_frame_id).map_chunks(resource)) 68 | 69 | 70 | def from_path(path: str) -> Element: 71 | with ResourceFile.load(path) as res: 72 | return from_bytes(res) 73 | -------------------------------------------------------------------------------- /src/nutcracker/smush/compress.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Iterable, Iterator 2 | 3 | from nutcracker.kernel2.chunk import Chunk 4 | from nutcracker.kernel2.element import Element 5 | from nutcracker.smush import anim 6 | from nutcracker.smush.fobj import compress 7 | from nutcracker.smush.preset import smush 8 | 9 | 10 | def compress_frame_data(frame: Element) -> Iterator[Chunk]: 11 | first_fobj = True 12 | for comp in frame.children(): 13 | if comp.tag == 'FOBJ' and first_fobj: 14 | first_fobj = False 15 | yield smush.mktag('ZFOB', memoryview(compress(comp.data))) 16 | elif comp.tag == 'PSAD': 17 | continue 18 | # print('skipping sound stream') 19 | else: 20 | first_fobj = first_fobj and comp.tag != 'ZFOB' 21 | yield smush.mktag(comp.tag, comp.data) 22 | 23 | 24 | def compress_frames(frames: Iterable[Element]) -> Iterator[Chunk]: 25 | yield from ( 26 | smush.mktag('FRME', memoryview(smush.write_chunks(compress_frame_data(frame)))) 27 | for frame in frames 28 | ) 29 | 30 | 31 | def strip_compress_san(root: Element) -> bytes: 32 | header, frames = anim.parse(root) 33 | compressed_frames = compress_frames(frames) 34 | return anim.compose(header, compressed_frames) 35 | -------------------------------------------------------------------------------- /src/nutcracker/smush/decode.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import struct 5 | from collections.abc import Callable, Iterator, Mapping, Sequence 6 | from dataclasses import dataclass, replace 7 | from functools import partial 8 | 9 | import numpy as np 10 | 11 | from nutcracker.codex.codex import get_decoder 12 | from nutcracker.graphics import grid, image 13 | from nutcracker.graphics.frame import save_single_frame_image 14 | from nutcracker.kernel2.chunk import ArrayBuffer 15 | from nutcracker.kernel2.element import Element 16 | from nutcracker.smush import anim 17 | from nutcracker.smush.ahdr import AnimationHeader 18 | from nutcracker.smush.fobj import decompress, unobj 19 | 20 | 21 | def clip(lower: int, upper: int, value: int) -> int: 22 | return lower if value < lower else upper if value > upper else value 23 | 24 | 25 | clip_byte = partial(clip, 0, 255) 26 | 27 | 28 | def delta_color(org_color: int, delta: int) -> int: 29 | return clip_byte((129 * org_color + delta) // 128) 30 | 31 | 32 | @dataclass(frozen=True) 33 | class FrameGenCtx: 34 | palette: ArrayBuffer 35 | screen: tuple[image.ImagePosition, image.Matrix] = ( 36 | image.ImagePosition(), 37 | (), 38 | ) 39 | delta_pal: Sequence[int] = () 40 | frame: Element | None = None 41 | 42 | 43 | def npal(ctx: FrameGenCtx, data: ArrayBuffer) -> FrameGenCtx: 44 | return replace(ctx, palette=np.frombuffer(data, dtype=np.uint8)) 45 | 46 | 47 | def xpal(ctx: FrameGenCtx, data: ArrayBuffer) -> FrameGenCtx: 48 | sub_size = len(data) 49 | 50 | if sub_size == 0x300 * 3 + 4: 51 | # print('LARGE XPAL', data[: 4]) 52 | assert data[:4] == b'\00\00\00\02', (ctx.frame, data[:4]) 53 | delta_pal = struct.unpack(f'<{0x300}h', data[4 : 4 + 2 * 0x300]) 54 | palette = data[4 + 2 * 0x300 :] 55 | return replace(ctx, delta_pal=delta_pal, palette=palette) 56 | 57 | if sub_size == 6: 58 | # print('SMALL XPAL', data) 59 | assert data[:4] == b'\00\00\00\01', (ctx.frame, data[:4]) 60 | # what about data[4:]? (two last bytes) 61 | # seems like UINT16LE, value is usually 0, FT have counter examples 62 | assert len(ctx.delta_pal) == 0x300 63 | assert len(ctx.palette) == 0x300 64 | palette = bytes( 65 | delta_color(pal, delta) 66 | for pal, delta in zip(ctx.palette, ctx.delta_pal, strict=True) 67 | ) 68 | return replace(ctx, palette=palette) 69 | 70 | assert False 71 | 72 | 73 | def decode_frame_object(ctx: FrameGenCtx, data: ArrayBuffer) -> FrameGenCtx: 74 | screen = convert_fobj(data) 75 | # im = save_single_frame_image(ctx.screen) 76 | # im.putpalette(ctx.palette) 77 | # im.save(f'out/FRME_{idx:05d}_{cidx:05d}.png') 78 | return replace(ctx, screen=screen) 79 | 80 | 81 | def decode_compressed_frame_object(ctx: FrameGenCtx, data: ArrayBuffer) -> FrameGenCtx: 82 | return decode_frame_object(ctx, decompress(data)) 83 | 84 | 85 | def unsupported_frame_comp(ctx: FrameGenCtx, data: ArrayBuffer) -> FrameGenCtx: 86 | # print(f'support for tag {tag} not implemented yet') 87 | return ctx 88 | 89 | 90 | DECODE_FRAME_IMAGE = { 91 | 'NPAL': npal, 92 | 'XPAL': xpal, 93 | 'ZFOB': decode_compressed_frame_object, 94 | 'FOBJ': decode_frame_object, 95 | } 96 | 97 | 98 | def generate_frames( 99 | header: AnimationHeader, 100 | frames: Iterator[Element], 101 | parser: Mapping[str, Callable[[FrameGenCtx, bytes], FrameGenCtx]], 102 | ) -> Iterator[FrameGenCtx]: 103 | ctx = FrameGenCtx(header.palette) 104 | for frame in frames: 105 | ctx = replace(ctx, frame=frame) 106 | for comp in frame.children(): 107 | ctx = DECODE_FRAME_IMAGE.get(comp.tag, unsupported_frame_comp)( 108 | ctx, 109 | comp.data, 110 | ) 111 | assert ctx.screen is not None 112 | yield ctx 113 | 114 | 115 | def decode_nut(root: Element, output_dir: str) -> None: 116 | header, frames = anim.parse(root) 117 | os.makedirs(output_dir, exist_ok=True) 118 | chars = [ctx.screen for ctx in generate_frames(header, frames, DECODE_FRAME_IMAGE)] 119 | lchars = [(loc.x1, loc.y1, image.convert_to_pil_image(im)) for loc, im in chars] 120 | nchars = len(lchars) 121 | transparency = 39 122 | BGS = [b'\05', b'\04'] 123 | bim = grid.create_char_grid( 124 | nchars, 125 | enumerate(lchars), 126 | transparency=transparency, 127 | bgs=BGS, 128 | ) 129 | palette = list(header.palette) 130 | palette[3 * transparency :][:3] = [109, 109, 109] 131 | bim.putpalette(palette) 132 | bim.save(os.path.join(output_dir, 'chars.png')) 133 | 134 | 135 | def decode_san(root: Element, output_dir: str) -> None: 136 | header, frames = anim.parse(root) 137 | os.makedirs(output_dir, exist_ok=True) 138 | for idx, ctx in enumerate(generate_frames(header, frames, DECODE_FRAME_IMAGE)): 139 | if ctx.screen: 140 | im = save_single_frame_image(ctx.screen) 141 | # im = im.crop(box=(0,0,320,200)) 142 | im.putpalette(ctx.palette) 143 | im.save(os.path.join(output_dir, f'FRME_{idx:05d}.png')) 144 | 145 | 146 | def convert_fobj(datam: bytes) -> tuple[image.ImagePosition, bytes] | None: 147 | meta, data = unobj(datam) 148 | width = meta.x2 - meta.x1 if meta.codec != 1 else meta.x2 149 | height = meta.y2 - meta.y1 if meta.codec != 1 else meta.y2 150 | decode = get_decoder(meta.codec) 151 | if decode == NotImplemented: 152 | print(f'Codec not implemented: {meta.codec}') 153 | return None 154 | 155 | # assert len(datam) % 2 == 0, (basename, meta['codec']) 156 | 157 | if meta.x1 != 0 or meta.y1 != 0: 158 | print('TELL ME') 159 | 160 | print(meta) 161 | 162 | locs = image.ImagePosition(x1=meta.x1, y1=meta.y1, x2=meta.x2, y2=meta.y2) 163 | return locs, decode(width, height, data) 164 | -------------------------------------------------------------------------------- /src/nutcracker/smush/element.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Iterator 2 | 3 | from nutcracker.kernel2.element import Element 4 | 5 | 6 | def check_tag(target: str, elem: Element) -> Element: 7 | if elem.tag != target: 8 | raise ValueError(f'expected tag to be {target} but got {elem.tag}') 9 | return elem 10 | 11 | 12 | def read_elements(target: str, elem: Element) -> Iterator[Element]: 13 | return check_tag(target, elem).children() 14 | 15 | 16 | def read_data(target: str, elem: Element) -> bytes: 17 | return check_tag(target, elem).data 18 | -------------------------------------------------------------------------------- /src/nutcracker/smush/encode.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | from collections.abc import Iterable, Iterator 4 | from dataclasses import asdict, replace 5 | 6 | from nutcracker.codex.codex import get_encoder 7 | from nutcracker.graphics.image import ImagePosition, TImage 8 | from nutcracker.kernel2.chunk import Chunk 9 | from nutcracker.smush import anim 10 | from nutcracker.smush.ahdr import AnimationHeader 11 | from nutcracker.smush.fobj import FrameObjectHeader, mkobj 12 | from nutcracker.smush.preset import smush 13 | from nutcracker.utils.fileio import write_file 14 | 15 | 16 | # LEGACY 17 | def make_nut_file( 18 | header: AnimationHeader, 19 | num_chars: int, 20 | chars: Iterable[Chunk], 21 | ) -> bytes: 22 | chars = (smush.mktag('FRME', memoryview(bytes(char))) for char in chars) 23 | header = replace(header, nframes=num_chars) 24 | return anim.compose(header, chars) 25 | 26 | 27 | def encode_frame_objects( 28 | frames: Iterable[tuple[ImagePosition, TImage]], 29 | codec: int, 30 | fake: int, 31 | ) -> Iterator[Chunk]: 32 | for loc, frame in frames: 33 | meta = FrameObjectHeader(codec=fake, **asdict(loc)) 34 | print(meta) 35 | 36 | encode = get_encoder(codec) 37 | 38 | width = meta.x2 - meta.x1 39 | height = meta.y2 - meta.y1 40 | 41 | encoded_frame = encode(width, height, frame.tolist()) 42 | 43 | fobj = mkobj(meta, encoded_frame) 44 | # print(mktag('FOBJ', fobj)) 45 | 46 | yield smush.mktag('FOBJ', memoryview(fobj)) 47 | 48 | 49 | if __name__ == '__main__': 50 | import argparse 51 | 52 | from nutcracker.graphics import grid 53 | 54 | parser = argparse.ArgumentParser(description='read smush file') 55 | parser.add_argument('filename', help='filename to read from') 56 | parser.add_argument( 57 | '--codec', 58 | '-c', 59 | action='store', 60 | type=int, 61 | required=True, 62 | help='codec for encoding', 63 | choices=[21, 44], 64 | ) 65 | parser.add_argument( 66 | '--fake', 67 | '-f', 68 | action='store', 69 | type=int, 70 | help='fake codec for FOBJ header', 71 | choices=[21, 44], 72 | ) 73 | parser.add_argument( 74 | '--ref', 75 | '-r', 76 | action='store', 77 | type=str, 78 | help='reference SMUSH file', 79 | ) 80 | parser.add_argument('--target', '-t', help='target file', default='out/NEWFONT.NUT') 81 | 82 | args = parser.parse_args() 83 | 84 | if args.fake is None: 85 | args.fake = args.codec 86 | 87 | frames = grid.read_image_grid(args.filename) 88 | frames = (grid.resize_frame(frame) for frame in frames) 89 | frames = [frame for frame in frames if frame is not None] 90 | 91 | num_frames = len(frames) 92 | print(num_frames) 93 | 94 | fobjs = list(encode_frame_objects(frames, args.codec, args.fake)) 95 | 96 | root = anim.from_path(args.ref) 97 | header, _ = anim.parse(root) 98 | 99 | os.makedirs(os.path.dirname(args.target), exist_ok=True) 100 | write_file(args.target, make_nut_file(header, len(fobjs), fobjs)) 101 | -------------------------------------------------------------------------------- /src/nutcracker/smush/encode_san_seq.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import glob 4 | import os 5 | import struct 6 | from collections import deque 7 | from collections.abc import Iterable, Iterator, Sequence 8 | from dataclasses import asdict, dataclass, replace 9 | from itertools import chain 10 | 11 | import numpy as np 12 | from PIL import Image 13 | 14 | from nutcracker.codex.codex import get_encoder 15 | from nutcracker.graphics.image import ImagePosition 16 | from nutcracker.kernel2.element import Element 17 | from nutcracker.smush import ahdr, anim, fobj 18 | from nutcracker.smush.preset import smush 19 | from nutcracker.utils.fileio import write_file 20 | 21 | UINT16LE = struct.Struct(' int: 32 | meta, data = fobj.unobj(datam) 33 | if meta.codec == 47: 34 | return UINT16LE.unpack(data[:2])[0] 35 | elif meta.codec == 37: 36 | return UINT16LE.unpack(data[2:4])[0] 37 | return 0 38 | 39 | 40 | def decode_frame(header: ahdr.AnimationHeader, idx: int, frame: Element) -> FrameGenCtx: 41 | ctx = FrameGenCtx(idx=idx, frame=frame) 42 | for comp in frame.children(): 43 | if comp.tag == 'FOBJ': 44 | decoded = convert_fobj_meta(comp.data) 45 | ctx = replace(ctx, seq_ind=decoded) 46 | elif comp.tag == 'ZFOB': 47 | data = fobj.decompress(comp.data) 48 | decoded = convert_fobj_meta(data) 49 | ctx = replace(ctx, seq_ind=decoded) 50 | return ctx 51 | 52 | 53 | def get_sequence_frames( 54 | header: ahdr.AnimationHeader, 55 | frames: Iterator[FrameGenCtx], 56 | saved: deque[FrameGenCtx], 57 | ) -> Iterator[FrameGenCtx]: 58 | assert not saved 59 | for frame in frames: 60 | if frame.seq_ind == 0: 61 | saved.append(frame) 62 | break 63 | yield frame 64 | 65 | 66 | def get_frame_image(directory: str, idx: int) -> Sequence[Sequence[int]]: 67 | im = Image.open(os.path.join(directory, f'FRME_{idx:05d}.png')) 68 | return list(np.asarray(im)) 69 | 70 | 71 | def encode_fake(image: Sequence[Sequence[int]], chunk: bytes) -> bytes: 72 | meta = fobj.unobj(chunk).header 73 | codec = meta.codec 74 | if codec == 1: 75 | return chunk 76 | encode = get_encoder(codec) 77 | loc = ImagePosition(x1=0, y1=0, x2=len(image[0]), y2=len(image)) 78 | meta = fobj.FrameObjectHeader(codec=codec, **asdict(loc)) 79 | print('CODEC', meta) 80 | encoded = encode(image) 81 | return fobj.mkobj(meta, encoded) 82 | 83 | 84 | def encode_seq(sequence: Iterable[FrameGenCtx], directory: str) -> Iterator[bytes]: 85 | for frame in sequence: 86 | fdata: list[bytes] = [] 87 | for comp in frame.frame.children(): 88 | if comp.tag == 'ZFOB': 89 | screen = get_frame_image(directory, frame.idx) 90 | encoded = encode_fake(screen, fobj.decompress(comp.data)) 91 | fdata += [smush.mktag('ZFOB', fobj.compress(encoded))] 92 | elif comp.tag == 'FOBJ': 93 | screen = get_frame_image(directory, frame.idx) 94 | fdata += [smush.mktag('FOBJ', encode_fake(screen, comp.data))] 95 | else: 96 | fdata += [smush.mktag(comp.tag, comp.data)] 97 | yield smush.write_chunks(fdata) 98 | 99 | 100 | def split_sequences( 101 | header: ahdr.AnimationHeader, 102 | frames: Iterable[Element], 103 | ) -> Iterator[Iterator[FrameGenCtx]]: 104 | saved = deque(maxlen=1) 105 | frames = (decode_frame(header, idx, frame) for idx, frame in enumerate(frames)) 106 | saved.append(next(frames)) 107 | while saved: 108 | frame = saved.pop() 109 | assert frame.seq_ind == 0, frame.seq_ind 110 | it = chain([frame], get_sequence_frames(header, frames, saved)) 111 | yield it 112 | # Consume iterator 113 | for _ in it: 114 | pass 115 | 116 | 117 | def check_dirty(frame_range: Iterable[int], files: set[str]) -> bool: 118 | return any(f'FRME_{num:05d}.png' in files for num in frame_range) 119 | 120 | 121 | def replace_dirty_sequences( 122 | header: ahdr.AnimationHeader, 123 | frames: Iterable[Element], 124 | directory: str, 125 | ) -> Iterator[bytes]: 126 | # split frames to sequences 127 | # for frames in each sequence (range?) 128 | # if any of the frame images in the sequence exists in parameter 129 | # re-encode sequence 130 | # yield each frame 131 | files = { 132 | os.path.basename(file) for file in glob.iglob(os.path.join(directory, '*.png')) 133 | } 134 | for sequence in split_sequences(header, frames): 135 | seq = list(sequence) 136 | frame_range = range(seq[0].idx, 1 + seq[-1].idx) 137 | dirty = check_dirty(frame_range, files) 138 | if dirty: 139 | yield from encode_seq(seq, directory) 140 | else: 141 | yield from (frame.frame.data for frame in seq) 142 | 143 | 144 | def encode_san(root: Element, directory: str) -> bytes: 145 | header, frames = anim.parse(root) 146 | frames = replace_dirty_sequences(header, frames, directory) 147 | return anim.compose(header, (smush.mktag('FRME', frame) for frame in frames)) 148 | 149 | 150 | if __name__ == '__main__': 151 | import argparse 152 | 153 | parser = argparse.ArgumentParser(description='read smush file') 154 | parser.add_argument('filename', help='filename to read from') 155 | args = parser.parse_args() 156 | 157 | root = anim.from_path(args.filename) 158 | write_file( 159 | 'NEW_VIDEO2.SAN', 160 | encode_san(root, os.path.join('out', os.path.basename(args.filename))), 161 | ) 162 | 163 | print('ALL OK') 164 | -------------------------------------------------------------------------------- /src/nutcracker/smush/fobj.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import struct 4 | import zlib 5 | from dataclasses import dataclass 6 | from typing import NamedTuple 7 | 8 | from nutcracker.kernel.structured import StructuredTuple 9 | from nutcracker.kernel2.chunk import ArrayBuffer 10 | 11 | UINT32BE = struct.Struct('>I') 12 | 13 | 14 | @dataclass(frozen=True, order=True) 15 | class FrameObjectHeader: 16 | codec: int 17 | x1: int 18 | y1: int 19 | x2: int 20 | y2: int 21 | unk1: int = 0 22 | unk2: int = 0 23 | 24 | 25 | FOBJ_META = StructuredTuple( 26 | ('codec', 'x1', 'y1', 'x2', 'y2', 'unk1', 'unk2'), 27 | struct.Struct('<7H'), 28 | FrameObjectHeader, 29 | ) 30 | 31 | 32 | class FrameObject(NamedTuple): 33 | header: FrameObjectHeader 34 | data: bytes 35 | 36 | 37 | def unobj(data: bytes) -> FrameObject: 38 | header = FOBJ_META.unpack_from(data) 39 | return FrameObject(header, data[FOBJ_META.size :]) 40 | 41 | 42 | def mkobj(meta: FrameObjectHeader, data: bytes) -> bytes: 43 | return FOBJ_META.pack(meta) + data 44 | 45 | 46 | def decompress(data: bytes) -> bytes: 47 | decompressed_size = UINT32BE.unpack(data[:4])[0] 48 | data = zlib.decompress(data[4:]) 49 | assert len(data) == decompressed_size 50 | return data 51 | 52 | 53 | def compress(data: ArrayBuffer) -> bytes: 54 | decompressed_size = UINT32BE.pack(len(data)) 55 | compressed = zlib.compress(data, 9) 56 | return decompressed_size + compressed 57 | -------------------------------------------------------------------------------- /src/nutcracker/smush/preset.py: -------------------------------------------------------------------------------- 1 | from nutcracker.kernel2 import preset 2 | from nutcracker.kernel2.chunk import IFFChunkHeader 3 | 4 | from .schema import SCHEMA 5 | 6 | smush = preset.shell( 7 | alignment=2, 8 | header_dtype=IFFChunkHeader, 9 | inclheader=False, 10 | schema=SCHEMA, 11 | errors='ignore', 12 | ) 13 | -------------------------------------------------------------------------------- /src/nutcracker/smush/runner.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | from collections.abc import Iterable 4 | 5 | import typer 6 | 7 | from nutcracker.smush import anim 8 | from nutcracker.smush.compress import strip_compress_san 9 | from nutcracker.smush.decode import decode_nut, decode_san 10 | from nutcracker.smush.preset import smush 11 | from nutcracker.utils.fileio import write_file 12 | from nutcracker.utils.funcutils import flatten 13 | 14 | app = typer.Typer() 15 | 16 | 17 | def get_files(globs: Iterable[str]) -> set[str]: 18 | return set(flatten(glob.iglob(fname) for fname in globs)) 19 | 20 | 21 | @app.command('map') 22 | def map_elements( 23 | files: list[str] = typer.Argument(..., help='Files to read from'), 24 | ) -> None: 25 | for filename in get_files(files): 26 | basename = os.path.basename(filename) 27 | print(f'Mapping file: {basename}') 28 | root = anim.from_path(filename) 29 | smush.render(root) 30 | 31 | 32 | @app.command('decode') 33 | def decode( 34 | files: list[str] = typer.Argument(..., help='Files to read from'), 35 | nut: bool = typer.Option(False, '--nut', help='Decode to grid image'), 36 | target_dir: str = typer.Option('out', '--target', '-t', help='Target directory'), 37 | ) -> None: 38 | for filename in get_files(files): 39 | basename = os.path.basename(filename) 40 | print(f'Decoding file: {basename}') 41 | root = anim.from_path(filename) 42 | output_dir = os.path.join(target_dir, basename) 43 | if nut: 44 | decode_nut(root, output_dir) 45 | else: 46 | decode_san(root, output_dir) 47 | 48 | 49 | @app.command('compress') 50 | def compress( 51 | files: list[str] = typer.Argument(..., help='Files to read from'), 52 | target_dir: str = typer.Option('out', '--target', '-t', help='Target directory'), 53 | ) -> None: 54 | for filename in get_files(files): 55 | basename = os.path.basename(filename) 56 | print(f'Compressing file: {basename}') 57 | root = anim.from_path(filename) 58 | output = os.path.join(target_dir, basename) 59 | compressed = strip_compress_san(root) 60 | os.makedirs(target_dir, exist_ok=True) 61 | write_file(output, compressed) 62 | 63 | 64 | if __name__ == '__main__': 65 | app() 66 | -------------------------------------------------------------------------------- /src/nutcracker/smush/saud.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import glob 3 | import os 4 | import struct 5 | import wave 6 | 7 | from nutcracker.utils import funcutils 8 | 9 | from . import smush 10 | 11 | 12 | def read_le_uint16(f): 13 | return struct.unpack(' Sequence[Sequence[int]]: 30 | return np.asarray(self.data).tolist() 31 | 32 | 33 | def char_from_bytes(data: bytes, decoder: Callable[[bytes, int, int], bytes]) -> DataFrame: 34 | width, cheight, xoff, yoff = CHAR_HEADER.unpack(data[: CHAR_HEADER.size]) 35 | data = decoder(data[CHAR_HEADER.size :], width, cheight) 36 | return DataFrame( 37 | width=width, 38 | height=cheight, 39 | xoff=xoff, 40 | yoff=yoff, 41 | data=image.convert_to_pil_image(data, size=(width, cheight)), 42 | ) 43 | 44 | 45 | def read_chars(stream: IO[bytes], index: int, bpp: int) -> Iterator[tuple[int, DataFrame]]: 46 | decoder = ( 47 | partial(decode_bpp_char, bpp=bpp) if bpp in (1, 2, 4) else decode_lined_rle 48 | ) 49 | unique_vals: set[int] = set() 50 | for (idx, off), nextoff in index: 51 | assert stream.tell() == off 52 | data = stream.read(nextoff - off) 53 | char = char_from_bytes(data, decoder) 54 | 55 | if not (char.xoff == 0 and char.yoff == 0): 56 | print('OFFSET', idx, char.xoff, char.yoff, char.width, char.height) 57 | 58 | # assert cheight + yoff <= height, (cheight, yoff, height) 59 | 60 | unique_vals |= set(chain.from_iterable(char.tolist())) 61 | yield idx, char 62 | 63 | assert stream.read() == b'' 64 | print(unique_vals) 65 | 66 | 67 | def handle_char(data: bytes) -> tuple[int, Sequence[tuple[int, DataFrame]]]: 68 | header_size = 21 69 | 70 | header = data[:header_size] 71 | char_data = data[header_size:] 72 | 73 | dataend_real = len(char_data) 74 | 75 | with io.BytesIO(header) as header_stream: 76 | dataend = ( 77 | int.from_bytes(header_stream.read(4), byteorder='little', signed=False) - 6 78 | ) 79 | print(dataend) 80 | # assert dataend == dataend_real - datastart # true for SOMI, not true for HE 81 | version = ord(header_stream.read(1)) 82 | print(version) 83 | color_map = header_stream.read(16) # noqa: F841 84 | assert header_stream.tell() == header_size 85 | 86 | print(dataend, dataend_real) 87 | 88 | with io.BytesIO(char_data) as stream: 89 | bpp = ord(stream.read(1)) 90 | print(f'{bpp}bpp') 91 | assert bpp in {0, 1, 2, 4, 8}, bpp 92 | 93 | height = ord(stream.read(1)) # noqa: F841 94 | 95 | nchars = int.from_bytes(stream.read(2), byteorder='little', signed=False) 96 | 97 | assert stream.tell() == 4 98 | 99 | offs = [ 100 | int.from_bytes(stream.read(4), byteorder='little', signed=False) 101 | for i in range(nchars) 102 | ] 103 | offs = [off for off in enumerate(offs) if off[1] != 0] 104 | 105 | index = list(zip(offs, [off[1] for off in offs[1:]] + [dataend_real])) 106 | print(len(index)) 107 | # print(version, color_map, bpp, height, nchars) 108 | 109 | frames = list(read_chars(stream, index, bpp)) 110 | assert stream.read() == b'' 111 | return nchars, frames 112 | 113 | 114 | CHAR_PALETTE = [((59 + x) ** 2 * 83 // 67) % 256 for x in range(256 * 3)] 115 | 116 | 117 | def get_chars(root: Iterable[Element]) -> Iterator[Element]: 118 | for elem in root: 119 | if elem.tag in {'LECF', 'LFLF', 'CHAR'}: 120 | if elem.tag in {'CHAR'}: 121 | yield elem 122 | else: 123 | yield from get_chars(elem.children()) 124 | 125 | 126 | def decode_font(char: Element) -> image.TImage: 127 | data = sputm.assert_tag('CHAR', char) 128 | 129 | nchars, chars = handle_char(data) 130 | chars = [(idx, (char.xoff, char.yoff, char.data)) for idx, char in chars] 131 | widths, heights = zip(*(char[1][2].size for char in chars)) 132 | bim = grid.create_char_grid( 133 | nchars, 134 | chars, 135 | w=max(max(widths) + 2 * grid.BASE_XOFF, grid.TILE_W), 136 | h=max(max(heights) + 2 * grid.BASE_YOFF, grid.TILE_H), 137 | ) 138 | bim.putpalette(CHAR_PALETTE) 139 | return bim 140 | 141 | 142 | def decode_all_fonts(root: Iterable[Element]) -> Iterator[tuple[str, image.TImage]]: 143 | for char in get_chars(root): 144 | yield os.path.basename(char.attribs['path']), decode_font(char) 145 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/char/encode.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import io 3 | import struct 4 | from collections.abc import Callable, Iterable, Iterator 5 | from functools import partial 6 | from typing import TypeVar 7 | 8 | import numpy as np 9 | 10 | from nutcracker.codex.bpp_codec import encode_bpp_char 11 | from nutcracker.codex.rle import encode_lined_rle 12 | from nutcracker.graphics import grid, image 13 | from nutcracker.kernel2.element import Element 14 | from nutcracker.sputm.preset import sputm 15 | from nutcracker.utils.funcutils import flatten 16 | 17 | 18 | def calc_bpp(x: int) -> int: 19 | return 1 << max((x - 1).bit_length() - 1, 0).bit_length() 20 | 21 | 22 | # TODO: replace with itertools.takewhile 23 | def filter_empty_frames(frames: Iterable[image.TImage]) -> Iterator[image.TImage]: 24 | for im in frames: 25 | frame = list(np.asarray(im)) 26 | if set(flatten(frame)) == {0}: 27 | break 28 | yield im 29 | 30 | 31 | T = TypeVar('T') 32 | 33 | 34 | def bind( 35 | func: Callable[[image.TImage], T], 36 | frames: Iterable[image.TImage], 37 | ) -> Iterator[tuple[image.TImage, T]]: 38 | for frame in frames: 39 | yield frame, func(frame) if frame else None 40 | 41 | 42 | def get_frame_bpp(frame: image.TImage) -> int: 43 | return calc_bpp(len(set(flatten(frame[1])))) 44 | 45 | 46 | def encode_frames( 47 | frames: Iterable[image.TImage], 48 | encoder: Callable[[image.TImage], bytes], 49 | ) -> Iterator[bytes]: 50 | for idx, frame in enumerate(frames): 51 | if not frame: 52 | yield None 53 | else: 54 | print(idx) 55 | loc, img = frame 56 | width = loc.x2 - loc.x1 57 | assert width == len(img[0]) 58 | cheight = loc.y2 - loc.y1 59 | assert cheight == len(img) 60 | xoff = loc.x1 61 | yoff = loc.y1 62 | yield struct.pack('<2B2b', width, cheight, xoff, yoff) + encoder(img) 63 | 64 | 65 | def encode_char(ref: Element, filename: str) -> bytes: 66 | data = sputm.assert_tag('CHAR', ref) 67 | with io.BytesIO(data) as stream: 68 | stream.seek(0, io.SEEK_END) 69 | dataend_real = stream.tell() - 4 70 | stream.seek(0, io.SEEK_SET) 71 | dataend = int.from_bytes(stream.read(4), byteorder='little', signed=False) 72 | dataend_diff = dataend_real - dataend 73 | version = ord(stream.read(1)) 74 | color_map = stream.read(16) 75 | bpp = ord(stream.read(1)) 76 | height = ord(stream.read(1)) 77 | print(dataend_diff, version, color_map, bpp, height) 78 | 79 | frames = grid.read_image_grid(filename) 80 | frames = list(filter_empty_frames(frames)) 81 | while not frames[-1]: 82 | frames = frames[:-1] 83 | nchars = len(frames) 84 | print(nchars) 85 | frames = (grid.resize_frame(frame) for frame in frames) 86 | frames, bpps = zip(*bind(get_frame_bpp, frames), strict=True) 87 | 88 | v_bpp = max(val for val in bpps if val) 89 | print(f'{v_bpp}v_bpp, {bpp}bpp') 90 | assert v_bpp <= bpp, (bpp, v_bpp) 91 | encoder = ( 92 | partial(encode_bpp_char, bpp=bpp) if bpp in (1, 2, 4) else encode_lined_rle 93 | ) 94 | frames = list(encode_frames(frames, encoder)) 95 | assert nchars == len(frames) 96 | with io.BytesIO() as data_stream, io.BytesIO() as idx_stream: 97 | idx_stream.write(version.to_bytes(1, byteorder='little', signed=False)) 98 | idx_stream.write(color_map) 99 | idx_stream.write(bpp.to_bytes(1, byteorder='little', signed=False)) 100 | idx_stream.write(height.to_bytes(1, byteorder='little', signed=False)) 101 | idx_stream.write(nchars.to_bytes(2, byteorder='little', signed=False)) 102 | offset = idx_stream.tell() - 17 + 4 * nchars 103 | for frame in frames: 104 | if not frame: 105 | idx_stream.write(b'\00\00\00\00') 106 | else: 107 | print(frame) 108 | data_stream.write(frame) 109 | idx_stream.write(offset.to_bytes(4, byteorder='little', signed=False)) 110 | offset += len(frame) 111 | out_data = idx_stream.getvalue() + data_stream.getvalue() 112 | out = (len(out_data) - dataend_diff).to_bytes( 113 | 4, 114 | byteorder='little', 115 | signed=False, 116 | ) + out_data 117 | return out 118 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/costume/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/src/nutcracker/sputm/costume/__init__.py -------------------------------------------------------------------------------- /src/nutcracker/sputm/costume/awiz.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3\ 2 | import os 3 | from struct import Struct 4 | from typing import NamedTuple 5 | 6 | from nutcracker.kernel.structured import StructuredTuple 7 | from nutcracker.sputm.costume.akos import decode32 8 | from nutcracker.sputm.room.pproom import get_rooms, read_room_settings 9 | from nutcracker.sputm.tree import open_game_resource 10 | from nutcracker.utils.funcutils import flatten 11 | 12 | from ..preset import sputm 13 | 14 | 15 | class WizHeader(NamedTuple): 16 | comp: int 17 | width: int 18 | height: int 19 | 20 | 21 | WIZ_HEADER = StructuredTuple(('comp', 'width', 'height'), Struct('<3I'), WizHeader) 22 | 23 | 24 | def read_wiz_header(data: bytes): 25 | return WIZ_HEADER.unpack_from(data) 26 | 27 | 28 | def read_awiz_resource(awiz, room_palette): 29 | print(awiz.children) 30 | # awiz = iter(awiz) 31 | rgbs = sputm.find('RGBS', awiz) 32 | cnvs = sputm.find('CNVS', awiz) 33 | relo = sputm.find('RELO', awiz) 34 | wizh = sputm.find('WIZH', awiz) 35 | wizd = sputm.find('WIZD', awiz) 36 | comp, width, height = read_wiz_header(wizh.data) 37 | print(comp, width, height) 38 | palette = rgbs.data if rgbs is not None else room_palette 39 | if comp == 1: 40 | im = decode32(width, height, None, wizd.data) 41 | im.putpalette(palette) 42 | return im 43 | else: 44 | raise ValueError(comp) 45 | 46 | 47 | if __name__ == '__main__': 48 | import argparse 49 | import glob 50 | import os 51 | 52 | parser = argparse.ArgumentParser(description='read smush file') 53 | parser.add_argument('files', nargs='+', help='files to read from') 54 | args = parser.parse_args() 55 | 56 | files = sorted(set(flatten(glob.iglob(r) for r in args.files))) 57 | print(files) 58 | for filename in files: 59 | print(filename) 60 | 61 | gameres = open_game_resource(filename) 62 | basename = gameres.basename 63 | 64 | root = gameres.read_resources( 65 | # schema=narrow_schema( 66 | # SCHEMA, {'LECF', 'LFLF', 'RMDA', 'ROOM', 'PALS'} 67 | # ) 68 | ) 69 | 70 | os.makedirs(f'AWIZ_out/{basename}', exist_ok=True) 71 | 72 | for t in root: 73 | for lflf in get_rooms(t): 74 | print(lflf, lflf.attribs['path']) 75 | _, palette, _, _ = read_room_settings(lflf) 76 | 77 | for awiz in sputm.findall('AWIZ', lflf): 78 | print(awiz, awiz.attribs['path']) 79 | 80 | im = read_awiz_resource(awiz, palette) 81 | im.save( 82 | f'AWIZ_out/{basename}/{os.path.basename(lflf.attribs["path"])}_{os.path.basename(awiz.attribs["path"])}.png', 83 | ) 84 | 85 | for mult in sputm.findall('MULT', lflf): 86 | rgbs = None 87 | defa = sputm.find('DEFA', mult) 88 | if defa: 89 | rgbs = sputm.find('RGBS', defa) 90 | wrap = sputm.find('WRAP', mult) 91 | for awiz in wrap.children[1:]: 92 | im = read_awiz_resource(awiz, rgbs.data if rgbs else palette) 93 | im.save( 94 | f'AWIZ_out/{basename}/{os.path.basename(lflf.attribs["path"])}_{os.path.basename(mult.attribs["path"])}_{os.path.basename(awiz.attribs["path"])}.png', 95 | ) 96 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/costume/awiz_encode.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3\ 2 | import os 3 | import pathlib 4 | from struct import Struct 5 | from typing import NamedTuple 6 | 7 | import numpy as np 8 | from PIL import Image 9 | 10 | from nutcracker.codex.rle import encode_lined_rle 11 | from nutcracker.kernel.structured import StructuredTuple 12 | from nutcracker.sputm.costume.akos import decode32 13 | from nutcracker.sputm.room.pproom import get_rooms, read_room_settings 14 | from nutcracker.sputm.tree import open_game_resource 15 | from nutcracker.utils.fileio import write_file 16 | from nutcracker.utils.funcutils import flatten 17 | 18 | from ..preset import sputm 19 | 20 | 21 | class WizHeader(NamedTuple): 22 | comp: int 23 | width: int 24 | height: int 25 | 26 | 27 | WIZ_HEADER = StructuredTuple(('comp', 'width', 'height'), Struct('<3I'), WizHeader) 28 | 29 | 30 | def read_wiz_header(data: bytes): 31 | return WIZ_HEADER.unpack_from(data) 32 | 33 | 34 | def read_awiz_resource(awiz, room_palette): 35 | print(awiz.children) 36 | # awiz = iter(awiz) 37 | rgbs = sputm.find('RGBS', awiz) 38 | cnvs = sputm.find('CNVS', awiz) 39 | relo = sputm.find('RELO', awiz) 40 | wizh = sputm.find('WIZH', awiz) 41 | wizd = sputm.find('WIZD', awiz) 42 | comp, width, height = read_wiz_header(wizh.data) 43 | print(comp, width, height) 44 | palette = rgbs.data if rgbs is not None else room_palette 45 | if comp == 1: 46 | im = decode32(width, height, None, wizd.data) 47 | im.putpalette(palette) 48 | return im 49 | else: 50 | raise ValueError(comp) 51 | 52 | 53 | if __name__ == '__main__': 54 | import argparse 55 | import glob 56 | import os 57 | 58 | parser = argparse.ArgumentParser(description='read smush file') 59 | parser.add_argument('files', nargs='+', help='files to read from') 60 | args = parser.parse_args() 61 | 62 | files = sorted(set(flatten(glob.iglob(r) for r in args.files))) 63 | print(files) 64 | for filename in files: 65 | print(filename) 66 | 67 | gameres = open_game_resource(filename) 68 | basename = gameres.basename 69 | 70 | root = gameres.read_resources( 71 | # schema=narrow_schema( 72 | # SCHEMA, {'LECF', 'LFLF', 'RMDA', 'ROOM', 'PALS'} 73 | # ) 74 | ) 75 | 76 | os.makedirs(f'AWIZ_out/{basename}', exist_ok=True) 77 | 78 | for t in root: 79 | for lflf in get_rooms(t): 80 | print(lflf, lflf.attribs['path']) 81 | _, palette, _, _ = read_room_settings(lflf) 82 | 83 | for awiz in sputm.findall('AWIZ', lflf): 84 | print(awiz, awiz.attribs['path']) 85 | 86 | im = read_awiz_resource(awiz, palette) 87 | 88 | imname = f'{os.path.basename(lflf.attribs["path"])}_{os.path.basename(awiz.attribs["path"])}.png' 89 | fullpath = pathlib.Path('AWIZ_out', basename, imname) 90 | if fullpath.exists(): 91 | im = Image.open(fullpath) 92 | 93 | encoded = encode_lined_rle(np.asarray(im)) 94 | 95 | os.makedirs( 96 | os.path.dirname(f'{basename}/{awiz.attribs["path"]}'), 97 | exist_ok=True, 98 | ) 99 | write_file( 100 | f'{basename}/{awiz.attribs["path"]}', 101 | sputm.mktag( 102 | awiz.tag, 103 | sputm.write_chunks( 104 | sputm.mktag(e.tag, encoded) 105 | if e.tag == 'WIZD' 106 | else sputm.mktag(e.tag, e.data) 107 | for e in awiz 108 | ), 109 | ), 110 | ) 111 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/costume/banner.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import pathlib 3 | import struct 4 | 5 | import numpy as np 6 | import typer 7 | from PIL import Image 8 | 9 | from nutcracker.codex.rle import encode_lined_rle 10 | from nutcracker.kernel2.fileio import ResourceFile 11 | from nutcracker.sputm.costume.awiz import read_awiz_resource 12 | from nutcracker.sputm.preset import sputm 13 | from nutcracker.sputm.room.orgroom import make_wrap 14 | from nutcracker.utils.fileio import write_file 15 | from nutcracker.utils.funcutils import flatten 16 | 17 | app = typer.Typer() 18 | 19 | 20 | @app.command() 21 | def decode( 22 | files: list[str] = typer.Argument(..., help='*.wiz files to read from'), 23 | ) -> None: 24 | files = sorted(set(flatten(glob.iglob(r) for r in files))) 25 | for filename in files: 26 | with ResourceFile.load(filename) as resource: 27 | chunks = list(sputm.map_chunks(resource)) 28 | 29 | for chunk in chunks: 30 | sputm.render(chunk) 31 | 32 | if chunk.tag == 'MULT': 33 | wrap = sputm.find('WRAP', chunk) 34 | defa = sputm.find('DEFA', chunk) 35 | rgbs = sputm.find('RGBS', defa) 36 | assert wrap 37 | assert defa 38 | assert rgbs 39 | children = list(wrap.children()) 40 | for awiz in children[1:]: 41 | im = read_awiz_resource(awiz, rgbs.data) 42 | im.save(f'{pathlib.Path(filename).stem}.png') 43 | 44 | 45 | @app.command() 46 | def encode( 47 | files: list[str] = typer.Argument(..., help='*.png files to read from'), 48 | ) -> None: 49 | files = sorted(set(flatten(glob.iglob(r) for r in files))) 50 | for filename in files: 51 | im = Image.open(filename) 52 | width, height = im.size 53 | palette = bytes(im.getpalette()) 54 | encoded = encode_lined_rle(np.asarray(im)) 55 | 56 | defa = sputm.mktag('DEFA', sputm.mktag('RGBS', palette)) 57 | wizh = struct.pack('<3I', 1, width, height) 58 | awiz = sputm.untag( 59 | sputm.mktag( 60 | 'AWIZ', 61 | sputm.write_chunks( 62 | [ 63 | sputm.mktag('WIZH', wizh), 64 | sputm.mktag('SPOT', b'\xbf\x00\x00\x00Q\xff\xff\xff'), 65 | sputm.mktag('WIZD', encoded), 66 | ], 67 | ), 68 | ), 69 | ) 70 | wrap = make_wrap( 71 | [(awiz, awiz.data)], 72 | ) 73 | write_file( 74 | f'{pathlib.Path(filename).stem}.wiz', 75 | sputm.mktag('MULT', sputm.write_chunks([defa, wrap])), 76 | ) 77 | 78 | 79 | if __name__ == '__main__': 80 | app() 81 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/preset.py: -------------------------------------------------------------------------------- 1 | from nutcracker.kernel2 import preset 2 | from nutcracker.kernel2.chunk import IFFChunkHeader 3 | 4 | from .schema import SCHEMA 5 | 6 | sputm = preset.shell( 7 | header_dtype=IFFChunkHeader, 8 | alignment=1, 9 | inclheader=True, 10 | skip_byte=0x80, 11 | schema=SCHEMA, 12 | errors='ignore', 13 | ) 14 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/resource.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | from collections.abc import Sequence 4 | from dataclasses import asdict, dataclass, field 5 | 6 | from nutcracker.kernel2.fileio import ResourceFile 7 | from nutcracker.kernel2.element import Element 8 | 9 | from .index import read_directory_leg, read_directory_leg_v8 10 | from .preset import sputm 11 | 12 | version_by_ext_maxs = { 13 | ('.LA0', 176): (8, 0), 14 | ('.LA0', 138): (7, 0), 15 | ('.000', 138): (7, 0), 16 | ('.HE0', 52): (6, 99), 17 | ('.HE0', 46): (6, 90), # (6, 90), (6, 98) 18 | ('.HE0', 40): (6, 80), # (6, 72), (6, 73), (6, 80), 19 | ('.HE0', 38): (6, 71), # (6, 60), (6, 70), (6, 71), 20 | ('.000', 38): (6, 0), 21 | ('.SM0', 38): (6, 0), 22 | ('.000', 26): (5, 0), 23 | ('.LFL', 26): (5, 0), 24 | } 25 | 26 | chiper_keys = { 27 | '.000': 0x69, 28 | '.SM0': 0x69, 29 | '.HE0': 0x69, 30 | '.LA0': 0x00, 31 | } 32 | 33 | 34 | @dataclass 35 | class _GameMeta: 36 | basedir: str 37 | basename: str 38 | ext: str 39 | version: int 40 | he_version: int 41 | chiper_key: int 42 | 43 | 44 | @dataclass 45 | class Game(_GameMeta): 46 | index: Sequence[Element] = field(repr=False) 47 | disks: Sequence[str] = field(repr=False) 48 | 49 | 50 | def get_disk(game: _GameMeta, num: int) -> str: 51 | if game.ext == '.000': 52 | return f'{game.basename}.{num:03d}' 53 | if game.ext == '.SM0': 54 | return f'{game.basename}.SM{num:d}' 55 | if game.ext == '.HE0': 56 | if game.he_version >= 98 and num > 0: 57 | return f"{game.basename}.({chr(ord('`') + num)})" 58 | return f'{game.basename}.HE{num:d}' 59 | if game.ext == '.LA0': 60 | return f'{game.basename}.LA{num:d}' 61 | assert game.ext == '.LFL' 62 | return f'DISK{num:02d}.LEC' if num > 0 else '000.LFL' 63 | 64 | 65 | def load_resource(index_file: str | os.PathLike[str], chiper_key: int | None = None) -> Game: 66 | print(index_file) 67 | basename, ext = os.path.splitext(os.path.basename(index_file)) 68 | ext = ext.upper() 69 | basedir = os.path.dirname(index_file) 70 | 71 | if chiper_key is None: 72 | chiper_key = chiper_keys.get(ext, 0x00) 73 | 74 | with ResourceFile.load(index_file, key=chiper_key) as index: 75 | schema = sputm.generate_schema(index) 76 | index_root = list(sputm(schema=schema).map_chunks(index)) 77 | 78 | # Detect version from index 79 | maxs = sputm.find('MAXS', index_root) 80 | version, he_version = version_by_ext_maxs[(ext, len(maxs.data) + 8)] 81 | if sputm.find('INIB', index_root): 82 | assert he_version >= 90 83 | he_version = max(98, he_version) 84 | if 0 < he_version < 72 and sputm.find('DROO', index_root): 85 | he_version = 60 86 | # TODO: can diffenetiate he80 from he72 by size of RNAM? 87 | 88 | room_pattern = '{room:03d}.LFL' # noqa: F841 89 | 90 | if ext == '.LFL': 91 | basename = os.path.basename(basedir) 92 | 93 | disk_elem = sputm.find('DROO', index_root) or sputm.find('DISK', index_root) 94 | read_dir = read_directory_leg_v8 if version == 8 else read_directory_leg 95 | 96 | disk_data = read_dir(disk_elem.data) if disk_elem else ((0, (0, 0)), (1, (1, 0))) 97 | 98 | disks = sorted(set(disk for _room_id, (disk, _) in disk_data)) 99 | 100 | game = _GameMeta(basedir, basename, ext, version, he_version, chiper_key) 101 | 102 | return Game( 103 | **(asdict(game)), 104 | index=index_root, 105 | disks=tuple(get_disk(game, disk) for disk in disks), 106 | ) 107 | 108 | 109 | if __name__ == '__main__': 110 | import argparse 111 | 112 | from nutcracker.utils.funcutils import flatten 113 | 114 | parser = argparse.ArgumentParser(description='read smush file') 115 | parser.add_argument('files', nargs='+', help='files to read from') 116 | args = parser.parse_args() 117 | 118 | files = set(flatten(glob.iglob(r) for r in args.files)) 119 | print(files) 120 | for filename in files: 121 | print(load_resource(filename)) 122 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/room/encode_image.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import struct 4 | 5 | import numpy as np 6 | from PIL import Image 7 | 8 | from nutcracker.codex.codex import decode1, encode1 9 | from nutcracker.codex.smap import ( 10 | decode_smap, 11 | encode_he, 12 | encode_smap, 13 | extract_smap_codes, 14 | ) 15 | from nutcracker.kernel2.element import Element 16 | from nutcracker.utils.fileio import write_file 17 | 18 | from ..preset import sputm 19 | 20 | 21 | def encode_block_v8( 22 | filename: str, 23 | blocktype: str, 24 | version: int = 8, 25 | ref: Element | None = None, 26 | ) -> bytes: 27 | im = Image.open(filename) 28 | npim = np.asarray(im, dtype=np.uint8) 29 | 30 | if blocktype == 'SMAP': 31 | ref_data = ref.data if ref else None 32 | if version == 8 and ref_data: 33 | chunk = bytes(sputm.mktag(blocktype, ref_data)) 34 | s = sputm.generate_schema(chunk) 35 | image = next(sputm(schema=s).map_chunks(chunk)) 36 | 37 | bstr = sputm.findpath('BSTR/WRAP', image) 38 | sputm.render(bstr) 39 | ref_data = bstr.data[8:] if bstr else None 40 | 41 | codes = extract_smap_codes(*npim.shape, ref_data) if ref_data else None 42 | smap = encode_smap(npim, codes=codes) 43 | assert np.array_equal(npim, decode_smap(*npim.shape, smap)) 44 | # TODO: detect version, older games should return here 45 | if version < 8: 46 | return smap 47 | 48 | num_strips = im.width // 8 49 | offs = smap[: num_strips * 4] 50 | data = smap[4 * num_strips :] 51 | smap_v8 = bytes(sputm.mktag( 52 | 'BSTR', 53 | bytes(sputm.mktag('WRAP', bytes(sputm.mktag('OFFS', offs)) + data)), 54 | )) 55 | 56 | # verify 57 | chunk = bytes(sputm.mktag(blocktype, smap_v8)) 58 | s = sputm.generate_schema(chunk) 59 | image = next(sputm(schema=s).map_chunks(chunk)) 60 | 61 | bstr = sputm.findpath('BSTR/WRAP', image) 62 | assert np.array_equal(npim, decode_smap(*npim.shape, bstr.data[8:])) 63 | 64 | return smap_v8 65 | 66 | if blocktype == 'BOMP': 67 | bomp = encode1(npim) 68 | assert np.array_equal(npim, decode1(*npim.shape[::-1], bomp)) 69 | if version < 8: 70 | header = ref.data[:10] 71 | print(header, struct.pack('<5H', 0, *npim.shape[::-1], 0, 0)) 72 | return bytes(header) + bomp 73 | return struct.pack('<2I', *npim.shape[::-1]) + bomp 74 | 75 | if blocktype == 'BMAP': 76 | assert ref 77 | code = ref.data[0] 78 | palen = code % 10 79 | if 134 <= code <= 138: 80 | return bytes([code]) + encode_he(bytes(npim.ravel()), palen) 81 | elif 144 <= code <= 148: 82 | # TODO: handle transparency 83 | # tr = TRANSPARENCY 84 | return bytes([code]) + encode_he(bytes(npim.ravel()), palen) 85 | elif code == 150: 86 | assert len(set(npim.ravel())) == 1 87 | return bytes([code, npim.ravel()[0]]) 88 | 89 | raise ValueError(blocktype) 90 | 91 | 92 | if __name__ == '__main__': 93 | import argparse 94 | 95 | parser = argparse.ArgumentParser(description='read smush file') 96 | parser.add_argument('filename', help='filename to read from') 97 | parser.add_argument('-f', '--format', default='SMAP', help='filename to read from') 98 | args = parser.parse_args() 99 | 100 | im = Image.open(args.filename) 101 | npim = np.asarray(im, dtype=np.uint8) 102 | 103 | smap = encode_smap(npim) 104 | assert np.array_equal(npim, decode_smap(*npim.shape, smap)) 105 | 106 | write_file('SMAP', sputm.mktag('SMAP', smap)) 107 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/room/proom.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import io 4 | from dataclasses import dataclass 5 | 6 | import numpy as np 7 | 8 | from nutcracker.codex.codex import decode1 9 | from nutcracker.codex.smap import decode_he, decode_smap, read_uint16le, read_uint32le 10 | 11 | from ..preset import sputm 12 | 13 | 14 | def read_room_background_v8(image, width, height, zbuffers, transparency=None): 15 | if image.tag == 'SMAP': 16 | sputm.render(image) 17 | bstr = sputm.findpath('BSTR/WRAP', image) 18 | if not bstr: 19 | return None 20 | return decode_smap(height, width, bstr.data[8:], transparency=transparency) 21 | elif image.tag == 'BOMP': 22 | with io.BytesIO(image.data) as s: 23 | width = read_uint32le(s) 24 | height = read_uint32le(s) 25 | im = decode1(width, height, s.read()) 26 | return np.asarray(im, dtype=np.uint8) 27 | else: 28 | raise ValueError(f'Unknown image codec: {image.tag}') 29 | 30 | 31 | def read_room_background(image, width, height, zbuffers, transparency=None): 32 | if image.tag == 'SMAP': 33 | return decode_smap(height, width, image.data, transparency) 34 | elif image.tag == 'BOMP': 35 | with io.BytesIO(image.data) as s: 36 | # pylint: disable=unused-variable 37 | unk = read_uint16le(s) 38 | width = read_uint16le(s) 39 | height = read_uint16le(s) 40 | # TODO: check if x,y or y,x 41 | xpad, ypad = read_uint16le(s), read_uint16le(s) 42 | im = decode1(width, height, s.read()) 43 | return np.asarray(im, dtype=np.uint8) 44 | elif image.tag == 'BMAP': 45 | with io.BytesIO(image.data) as s: 46 | code = s.read(1)[0] 47 | palen = code % 10 48 | if 134 <= code <= 138: 49 | res = decode_he(s, width * height, palen) 50 | return np.frombuffer(res, dtype=np.uint8).reshape((height, width)) 51 | elif 144 <= code <= 148: 52 | # TODO: handle transparency 53 | # tr = TRANSPARENCY 54 | res = decode_he(s, width * height, palen) 55 | return np.frombuffer(res, dtype=np.uint8).reshape((height, width)) 56 | elif code == 150: 57 | return np.full((height, width), s.read(1)[0], dtype=np.uint8) 58 | else: 59 | print(image.tag, image.data) 60 | # raise ValueError(f'Unknown image codec: {tag}') 61 | 62 | 63 | @dataclass 64 | class RoomHeader: 65 | width: int 66 | height: int 67 | robjs: int 68 | version: int | None = None 69 | zbuffers: int | None = None 70 | transparency: int | None = None 71 | 72 | 73 | def read_rmhd_structured(data) -> RoomHeader: 74 | version = None 75 | zbuffers = None 76 | transparency = None 77 | if len(data) == 6: 78 | # 'Game Version < 7' 79 | rwidth = int.from_bytes(data[:2], signed=False, byteorder='little') 80 | rheight = int.from_bytes(data[2:4], signed=False, byteorder='little') 81 | robjs = int.from_bytes(data[4:], signed=False, byteorder='little') 82 | elif len(data) == 10: 83 | # 'Game Version == 7' 84 | version = int.from_bytes(data[:4], signed=False, byteorder='little') 85 | rwidth = int.from_bytes(data[4:6], signed=False, byteorder='little') 86 | rheight = int.from_bytes(data[6:8], signed=False, byteorder='little') 87 | robjs = int.from_bytes(data[8:], signed=False, byteorder='little') 88 | else: 89 | # 'Game Version == 8' 90 | assert len(data) == 24 91 | version = int.from_bytes(data[:4], signed=False, byteorder='little') 92 | rwidth = int.from_bytes(data[4:8], signed=False, byteorder='little') 93 | rheight = int.from_bytes(data[8:12], signed=False, byteorder='little') 94 | robjs = int.from_bytes(data[12:16], signed=False, byteorder='little') 95 | zbuffers = int.from_bytes(data[16:20], signed=False, byteorder='little') 96 | transparency = int.from_bytes(data[20:24], signed=False, byteorder='little') 97 | return RoomHeader( 98 | width=rwidth, 99 | height=rheight, 100 | robjs=robjs, 101 | version=version, 102 | zbuffers=zbuffers, 103 | transparency=transparency, 104 | ) 105 | 106 | 107 | def read_imhd(data): 108 | # pylint: disable=unused-variable 109 | with io.BytesIO(data) as stream: 110 | obj_id = read_uint16le(stream) 111 | obj_num_imnn = read_uint16le(stream) 112 | # should be per imnn, but at least 1 113 | obj_nums_zpnn = read_uint16le(stream) 114 | obj_flags = stream.read(1)[0] 115 | obj_unknown = stream.read(1)[0] 116 | obj_x = read_uint16le(stream) 117 | obj_y = read_uint16le(stream) 118 | obj_width = read_uint16le(stream) 119 | obj_height = read_uint16le(stream) 120 | obj_hotspots = stream.read() 121 | if obj_hotspots: 122 | # TODO: read hotspots 123 | pass 124 | return obj_id, obj_height, obj_width, obj_x, obj_y 125 | 126 | 127 | def read_imhd_v7(data): 128 | # pylint: disable=unused-variable 129 | with io.BytesIO(data) as stream: 130 | version = read_uint32le(stream) 131 | obj_id = read_uint16le(stream) 132 | image_count = read_uint16le(stream) 133 | obj_x = read_uint16le(stream) 134 | obj_y = read_uint16le(stream) 135 | obj_width = read_uint16le(stream) 136 | obj_height = read_uint16le(stream) 137 | obj_unknown = stream.read(3) 138 | actor_dir = stream.read(1)[0] 139 | num_hotspots = read_uint16le(stream) 140 | obj_hotspots = stream.read() 141 | if obj_hotspots: 142 | # TODO: read hotspots 143 | pass 144 | return obj_id, obj_height, obj_width, obj_x, obj_y 145 | 146 | 147 | def read_imhd_v8(data): 148 | # pylint: disable=unused-variable 149 | with io.BytesIO(data) as stream: 150 | name = stream.read(40).split(b'\0')[0].decode() 151 | version = read_uint32le(stream) 152 | image_count = read_uint32le(stream) 153 | obj_x = read_uint32le(stream) 154 | obj_y = read_uint32le(stream) 155 | obj_width = read_uint32le(stream) 156 | obj_height = read_uint32le(stream) 157 | actor_dir = read_uint32le(stream) 158 | flags = read_uint32le(stream) 159 | obj_hotspots = stream.read() 160 | if obj_hotspots: 161 | # TODO: read hotspots 162 | pass 163 | return name, obj_height, obj_width, obj_x, obj_y 164 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/room/runner.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | import typer 5 | 6 | from nutcracker.sputm.room.orgroom import make_room_images_patch 7 | from nutcracker.sputm.room.pproom import extract_room_images 8 | from nutcracker.utils.fileio import write_file 9 | 10 | from ..tree import open_game_resource 11 | 12 | app = typer.Typer() 13 | 14 | 15 | @app.command('decode') 16 | def decode( 17 | filename: Path = typer.Argument(..., help='Game resource index file'), 18 | ega_mode: bool = typer.Option(False, '--ega', help='Simulate EGA images decoding'), 19 | ) -> None: 20 | gameres = open_game_resource(filename) 21 | basename = gameres.basename 22 | 23 | root = gameres.read_resources( 24 | # schema=narrow_schema( 25 | # SCHEMA, {'LECF', 'LFLF', 'RMDA', 'ROOM', 'PALS'} 26 | # ) 27 | ) 28 | 29 | rnam = gameres.rooms 30 | version = gameres.game.version 31 | 32 | basedir = os.path.join(basename, 'IMAGES') 33 | os.makedirs(basedir, exist_ok=True) 34 | 35 | os.makedirs(os.path.join(basedir, 'backgrounds'), exist_ok=True) 36 | os.makedirs(os.path.join(basedir, 'objects'), exist_ok=True) 37 | os.makedirs(os.path.join(basedir, 'objects_layers'), exist_ok=True) 38 | 39 | extract_room_images(root, basedir, rnam, version, ega_mode=ega_mode) 40 | 41 | 42 | @app.command('encode') 43 | def encode( 44 | dirname: Path = typer.Argument(..., help='Patch directory'), 45 | ref: Path = typer.Option(..., '--ref', help='Reference resource index'), 46 | ) -> None: 47 | gameres = open_game_resource(ref) 48 | basename = os.path.basename(os.path.normpath(dirname)) 49 | 50 | print(f'Creating patch images for: {basename}') 51 | 52 | root = gameres.read_resources( 53 | # schema=narrow_schema( 54 | # SCHEMA, {'LECF', 'LFLF', 'RMDA', 'ROOM', 'PALS'} 55 | # ) 56 | ) 57 | 58 | for path, content in make_room_images_patch( 59 | root, 60 | os.path.join(basename, 'IMAGES'), 61 | gameres.rooms, 62 | gameres.game.version, 63 | ): 64 | res_path = os.path.join(dirname, path) 65 | os.makedirs(os.path.dirname(res_path), exist_ok=True) 66 | write_file(res_path, content) 67 | 68 | 69 | if __name__ == '__main__': 70 | app() 71 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/runner.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | from pathlib import Path 4 | 5 | import typer 6 | 7 | from nutcracker.sputm.build import rebuild_resources, update_element 8 | from nutcracker.sputm.char.decode import decode_all_fonts, get_chars 9 | from nutcracker.sputm.char.encode import encode_char 10 | from nutcracker.sputm.schema import SCHEMA 11 | from nutcracker.sputm.strings import ( 12 | RAW_ENCODING, 13 | get_all_scripts, 14 | get_optable, 15 | get_script_map, 16 | msg_to_print, 17 | print_to_msg, 18 | update_element_strings, 19 | ) 20 | from nutcracker.sputm.tree import dump_resources, narrow_schema, open_game_resource 21 | from nutcracker.utils.fileio import write_file 22 | 23 | from .preset import sputm 24 | from .room import runner as room_image 25 | from .windex import runner as script_windex 26 | 27 | app = typer.Typer() 28 | app.add_typer(room_image.app, name='room') 29 | app.add_typer(script_windex.app, name='script') 30 | 31 | # ## RESOURCE 32 | 33 | 34 | @app.command() 35 | def extract( 36 | filename: Path = typer.Argument(..., help='Game resource index file'), 37 | ) -> None: 38 | gameres = open_game_resource(filename) 39 | basename = gameres.basename 40 | print(f'Extracting game resources: {basename}') 41 | dump_resources(gameres, basename) 42 | 43 | 44 | @app.command() 45 | def build( 46 | dirname: Path = typer.Argument(..., help='Patch directory'), 47 | ref: Path = typer.Option(..., '--ref', help='Reference resource index'), 48 | ) -> None: 49 | gameres = open_game_resource(ref) 50 | basename = os.path.basename(os.path.normpath(dirname)) 51 | print(f'Rebuilding game resources: {basename}') 52 | 53 | files = set(glob.iglob(f'{dirname}/**/*', recursive=True)) 54 | assert None not in files 55 | 56 | root = gameres.read_resources( 57 | # schema=narrow_schema( 58 | # SCHEMA, {'LECF', 'LFLF', 'ROOM', 'RMIM'} 59 | # ) 60 | ) 61 | 62 | updated_resource = list(update_element(dirname, root, files)) 63 | rebuild_resources(gameres, basename, updated_resource) 64 | 65 | 66 | # ## STRINGS 67 | 68 | 69 | @app.command('strings_extract') 70 | def extract_strings( 71 | filename: Path = typer.Argument(..., help='Game resource index file'), 72 | textfile: Path = typer.Option( 73 | 'strings.txt', 74 | '--textfile', 75 | '-t', 76 | help='save strings to file', 77 | ), 78 | ) -> None: 79 | gameres = open_game_resource(filename) 80 | basename = os.path.basename(os.path.normpath(filename)) 81 | print(f'Extracting strings from game resources: {basename}') 82 | 83 | script_ops = get_optable(gameres.game) 84 | script_map = get_script_map(gameres.game) 85 | 86 | root = gameres.read_resources( 87 | schema=narrow_schema( 88 | SCHEMA, 89 | {'LECF', 'LFLF', 'RMDA', 'ROOM', 'OBCD', 'TLKE', *script_map}, 90 | ), 91 | ) 92 | 93 | var_size = 4 if gameres.game.version >= 8 else 2 94 | 95 | with open(textfile, 'w', **RAW_ENCODING) as f: 96 | for msg in get_all_scripts(root, script_ops, script_map): 97 | print(msg_to_print(msg, var_size=var_size), file=f) 98 | 99 | 100 | @app.command('strings_inject') 101 | def inject_strings( 102 | filename: Path = typer.Argument(..., help='Game resource index file'), 103 | textfile: Path = typer.Option( 104 | 'strings.txt', 105 | '--textfile', 106 | '-t', 107 | help='save strings to file', 108 | ), 109 | ) -> None: 110 | gameres = open_game_resource(filename) 111 | basename = gameres.basename 112 | print(f'Injecting strings into game resources: {basename}') 113 | 114 | script_ops = get_optable(gameres.game) 115 | script_map = get_script_map(gameres.game) 116 | 117 | root = gameres.read_resources( 118 | schema=narrow_schema( 119 | SCHEMA, 120 | {'LECF', 'LFLF', 'RMDA', 'ROOM', 'OBCD', 'TLKE', *script_map}, 121 | ), 122 | ) 123 | 124 | with open(textfile, 'r', **RAW_ENCODING) as f: 125 | fixed_lines = (print_to_msg(line) for line in f) 126 | updated_resource = list( 127 | update_element_strings(root, fixed_lines, script_ops, script_map), 128 | ) 129 | 130 | rebuild_resources(gameres, basename, updated_resource) 131 | 132 | 133 | # ## FONTS 134 | 135 | 136 | @app.command('fonts_extract') 137 | def extract_fonts( 138 | filename: Path = typer.Argument(..., help='Game resource index file'), 139 | ) -> None: 140 | gameres = open_game_resource(filename) 141 | basename = gameres.basename 142 | print(f'Extracting fonts from game resources: {basename}') 143 | 144 | root = gameres.read_resources( 145 | schema=narrow_schema(SCHEMA, {'LECF', 'LFLF', 'CHAR'}), 146 | ) 147 | 148 | outdir = os.path.join(basename, 'chars') 149 | os.makedirs(outdir, exist_ok=True) 150 | 151 | for fname, bim in decode_all_fonts(root): 152 | bim.save(os.path.join(outdir, f'{fname}.png')) 153 | print(f'saved {basename}-{fname}.png') 154 | 155 | 156 | @app.command('fonts_inject') 157 | def inject_fonts( 158 | dirname: Path = typer.Argument(..., help='Patch directory'), 159 | ref: Path = typer.Option(..., '--ref', help='Reference resource index'), 160 | ) -> None: 161 | gameres = open_game_resource(ref) 162 | basename = os.path.basename(os.path.normpath(dirname)) 163 | print(f'Creating path for game fonts: {basename}') 164 | 165 | root = gameres.read_resources( 166 | schema=narrow_schema(SCHEMA, {'LECF', 'LFLF', 'CHAR'}), 167 | ) 168 | 169 | base = os.path.join(dirname, 'chars') 170 | 171 | for elem in get_chars(root): 172 | path = elem.attribs['path'] 173 | fname = os.path.basename(path) 174 | patch_file = os.path.join(base, f'{fname}.png') 175 | if os.path.exists(patch_file): 176 | base_out = os.path.join(dirname, path) 177 | os.makedirs(os.path.dirname(base_out), exist_ok=True) 178 | write_file(base_out, bytes(sputm.mktag('CHAR', memoryview(encode_char(elem, patch_file))))) 179 | 180 | 181 | if __name__ == '__main__': 182 | app() 183 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/schema.py: -------------------------------------------------------------------------------- 1 | DATA: set[str] = set() 2 | 3 | IMXX = { 4 | 'SMAP', 5 | 'BMAP', 6 | 'BOMP', 7 | *[f'ZP{i:02X}' for i in range(1, 5)], 8 | } 9 | 10 | RAWD = '____' # Collect rest of chunk as raw data 11 | 12 | SCHEMA: dict[str, set[str]] = { 13 | RAWD: DATA, 14 | 'LECF': { 15 | 'LOFF', 16 | 'LFLF', 17 | }, 18 | 'LOFF': DATA, 19 | 'LFLF': { 20 | 'RMIM', 21 | 'RMDA', 22 | 'ROOM', 23 | 'RMSC', 24 | 'SCRP', 25 | 'SOUN', 26 | 'AKOS', 27 | 'COST', 28 | 'CHAR', 29 | 'DIGI', 30 | 'MULT', 31 | 'AWIZ', 32 | 'TALK', 33 | 'TLKE', 34 | RAWD, 35 | }, 36 | 'ROOM': { 37 | 'RMHD', 38 | 'CYCL', 39 | 'PALS', 40 | 'IMAG', 41 | 'OBIM', 42 | 'BOXD', 43 | 'BOXM', 44 | 'SCAL', 45 | 'RMSC', 46 | 'TRNS', 47 | 'EPAL', 48 | 'CLUT', 49 | 'RMIM', 50 | 'OBCD', 51 | 'EXCD', 52 | 'ENCD', 53 | 'NLSC', 54 | 'LSCR', 55 | }, 56 | 'RMDA': { 57 | 'RMHD', 58 | 'CYCL', 59 | 'TRNS', 60 | 'PALS', 61 | 'OBIM', 62 | 'OBCD', 63 | 'EXCD', 64 | 'ENCD', 65 | 'NLSC', 66 | 'LSC2', 67 | 'LSCR', 68 | 'POLD', 69 | }, 70 | 'RMHD': DATA, 71 | 'RMIM': { 72 | 'RMIH', 73 | 'IM00', 74 | }, 75 | 'TRNS': DATA, 76 | 'EPAL': DATA, 77 | 'CYCL': DATA, 78 | 'PALS': { 79 | 'WRAP', 80 | }, 81 | 'OFFS': DATA, 82 | 'APAL': DATA, 83 | 'WRAP': { 84 | 'OFFS', 85 | 'APAL', 86 | 'SMAP', 87 | 'BOMP', 88 | 'AWIZ', 89 | 'SEQI', 90 | }, 91 | 'IMAG': { 92 | 'WRAP', 93 | }, 94 | 'OBIM': { 95 | 'IMHD', 96 | 'IMAG', 97 | *[f'IM{i:02X}' for i in range(1, 17)], 98 | }, 99 | 'RMSC': { 100 | 'ENCD', 101 | 'EXCD', 102 | 'OBCD', 103 | 'LSCR', 104 | }, 105 | 'OBCD': { 106 | 'CDHD', 107 | 'OBNA', 108 | 'VERB', 109 | }, 110 | **{f'IM{i:02X}': IMXX for i in range(17)}, 111 | **{f'ZP{i:02X}': DATA for i in range(1, 5)}, 112 | 'BOXD': DATA, 113 | 'BOXM': DATA, 114 | 'CLUT': DATA, 115 | 'SCAL': DATA, 116 | 'RMIH': DATA, 117 | 'AKOS': { 118 | 'AKHD', 119 | 'AKPL', 120 | 'RGBS', 121 | 'AKSQ', 122 | 'AKCH', 123 | 'AKOF', 124 | 'AKCI', 125 | 'AKCD', 126 | 'AKLC', 127 | 'AKST', 128 | 'AKCT', 129 | 'SP2C', 130 | 'SPLF', 131 | 'CLRS', 132 | 'IMGL', 133 | 'SQDB', 134 | 'AKFO', 135 | }, 136 | 'SMAP': DATA, # ? 137 | 'IMHD': DATA, 138 | 'CDHD': DATA, 139 | 'VERB': DATA, 140 | 'OBNA': DATA, 141 | 'EXCD': DATA, 142 | 'ENCD': DATA, 143 | 'NLSC': DATA, 144 | 'LSCR': DATA, 145 | 'CHAR': DATA, 146 | 'SCRP': DATA, 147 | 'COST': DATA, 148 | 'SOUN': DATA, 149 | 'BOMP': DATA, 150 | 'RNAM': DATA, 151 | 'MAXS': DATA, 152 | 'DROO': DATA, 153 | 'DSCR': DATA, 154 | 'DSOU': DATA, 155 | 'DCOS': DATA, 156 | 'DCHR': DATA, 157 | 'DOBJ': DATA, 158 | 'BMAP': DATA, 159 | 'LSC2': DATA, 160 | 'DIGI': { 161 | 'HSHD', 162 | 'SDAT', 163 | 'SBNG', 164 | }, 165 | 'HSHD': DATA, 166 | 'SDAT': DATA, 167 | 'AKHD': DATA, 168 | 'AKPL': DATA, 169 | 'RGBS': DATA, 170 | 'AKSQ': DATA, 171 | 'AKCH': DATA, 172 | 'AKOF': DATA, 173 | 'AKCI': DATA, 174 | 'AKCD': DATA, 175 | 'AKLC': DATA, 176 | 'AKST': DATA, 177 | 'AKCT': DATA, 178 | 'AKFO': DATA, 179 | 'MULT': { 180 | 'DEFA', 181 | 'WRAP', 182 | }, 183 | 'DEFA': { 184 | 'RGBS', 185 | 'CNVS', 186 | }, 187 | 'AWIZ': { 188 | 'WIZH', 189 | 'WIZD', 190 | 'CNVS', 191 | 'SPOT', 192 | 'RELO', 193 | 'RGBS', 194 | }, 195 | 'TLKE': { 196 | 'TEXT', 197 | }, 198 | 'TEXT': DATA, 199 | 'WIZH': DATA, 200 | 'WIZD': DATA, 201 | 'CNVS': DATA, 202 | 'SPOT': DATA, 203 | 'RELO': DATA, 204 | 'POLD': DATA, 205 | 'SP2C': DATA, 206 | 'SPLF': DATA, 207 | 'CLRS': DATA, 208 | 'IMGL': DATA, 209 | 'SQDB': { 210 | 'WRAP', 211 | }, 212 | 'SEQI': { 213 | 'NAME', 214 | 'STOF', 215 | 'SQLC', 216 | 'SIZE', 217 | }, 218 | 'NAME': DATA, 219 | 'STOF': DATA, 220 | 'SQLC': DATA, 221 | 'SIZE': DATA, 222 | 'SBNG': DATA, 223 | 'TALK': { 224 | 'HSHD', 225 | 'SDAT', 226 | 'SBNG', 227 | }, 228 | # HE0 229 | 'DIRI': DATA, 230 | 'DIRR': DATA, 231 | 'DIRS': DATA, 232 | 'DIRN': DATA, 233 | 'DIRC': DATA, 234 | 'DIRF': DATA, 235 | 'DIRM': DATA, 236 | 'DIRT': DATA, 237 | 'DLFL': DATA, 238 | 'DISK': DATA, 239 | 'SVER': DATA, 240 | 'AARY': DATA, 241 | 'INIB': { 242 | 'NOTE', 243 | }, 244 | 'NOTE': DATA, 245 | # HE2 246 | 'TLKB': { 247 | 'SBNG', 248 | 'TALK', 249 | }, 250 | # HE4 251 | 'SONG': { 252 | 'SGHD', 253 | 'SGEN', 254 | 'DIGI', 255 | }, 256 | 'SGHD': DATA, 257 | 'SGEN': DATA, 258 | # LA0 259 | 'ANAM': DATA, 260 | } 261 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/script/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/src/nutcracker/sputm/script/__init__.py -------------------------------------------------------------------------------- /src/nutcracker/sputm/script/bytecode.py: -------------------------------------------------------------------------------- 1 | import io 2 | from collections.abc import Iterable, Iterator, Mapping 3 | from typing import TypeVar 4 | 5 | from nutcracker.kernel2.element import Element 6 | from nutcracker.sputm.script.opcodes import OpTable 7 | from nutcracker.sputm.script.opcodes_v5 import SomeOp 8 | from nutcracker.utils.funcutils import flatten 9 | 10 | from .parser import CString, RefOffset, ScriptArg, Statement 11 | 12 | S_Arg = TypeVar('S_Arg', bound=ScriptArg) 13 | ByteCode = Mapping[int, Statement] 14 | 15 | 16 | def get_argtype(args: Iterable[ScriptArg], argtype: type[S_Arg]) -> Iterable[S_Arg]: 17 | for arg in args: 18 | if isinstance(arg, SomeOp): 19 | yield from get_argtype(arg.args, argtype) 20 | elif isinstance(arg, argtype): 21 | yield arg 22 | 23 | 24 | class BytecodeParseError(ValueError): 25 | def __init__( 26 | self, 27 | cause: Exception, 28 | buffer: bytes, 29 | opcode: int, 30 | bytecode: ByteCode, 31 | offset: int, 32 | base_offset: int = 0, 33 | ) -> None: 34 | super().__init__( 35 | f'Could not parse opcode 0x{opcode:02X} at offset [{base_offset + offset:08d}]: {cause!r}', 36 | ) 37 | self.buffer = buffer 38 | self.opcode = opcode 39 | self.offset = offset 40 | self.bytecode = bytecode 41 | self.base_offset = base_offset 42 | 43 | 44 | def descumm_iter( 45 | data: bytes, 46 | opcodes: OpTable, 47 | base_offset: int = 0, 48 | ) -> Iterable[tuple[int, Statement]]: 49 | with io.BytesIO(data) as stream: 50 | bytecode = {} 51 | while True: 52 | offset = stream.tell() 53 | next_byte = stream.read(1) 54 | if not next_byte: 55 | break 56 | opcode = ord(next_byte) 57 | try: 58 | op = opcodes[opcode](opcode, stream) # type: ignore 59 | bytecode[op.offset] = op 60 | # print(f'0x{op.offset:04x}', op) 61 | 62 | except Exception as e: 63 | print(f'{type(e)}: {str(e)}') 64 | print(f'0x{offset:04x}', f'0x{opcode:02x}') 65 | raise BytecodeParseError( 66 | e, 67 | data, 68 | opcode, 69 | bytecode, 70 | offset, 71 | base_offset, 72 | ) from e 73 | 74 | else: 75 | yield op.offset, bytecode[op.offset] 76 | 77 | for _off, stat in bytecode.items(): 78 | for arg in get_argtype(stat.args, RefOffset): 79 | assert arg.abs in bytecode, hex(arg.abs) 80 | 81 | assert to_bytes(bytecode) == data 82 | assert to_bytes(refresh_offsets(bytecode)) == data, ( 83 | to_bytes(refresh_offsets(bytecode)), 84 | data, 85 | ) 86 | 87 | 88 | def descumm(data: bytes, opcodes: OpTable) -> ByteCode: 89 | return dict(descumm_iter(data, opcodes)) 90 | 91 | 92 | def print_bytecode(bytecode: ByteCode) -> None: 93 | for off, stat in bytecode.items(): 94 | print(f'0x{off:04x}', stat) 95 | 96 | 97 | def get_strings(bytecode: ByteCode) -> Iterable[CString]: 98 | for _off, stat in bytecode.items(): 99 | for arg in get_argtype(stat.args, CString): 100 | if arg.msg: 101 | yield arg 102 | 103 | 104 | def update_strings(bytecode: ByteCode, strings: Iterable[bytes]) -> ByteCode: 105 | for orig, upd in zip(get_strings(bytecode), strings): 106 | orig.msg = upd 107 | return refresh_offsets(bytecode) 108 | 109 | 110 | def refresh_offsets(bytecode: ByteCode) -> ByteCode: 111 | updated = {} 112 | off = 0 113 | for stat in bytecode.values(): 114 | for arg in get_argtype(stat.args, RefOffset): 115 | arg.endpos += off - stat.offset 116 | stat.offset = off 117 | off += len(stat.to_bytes()) 118 | for stat in bytecode.values(): 119 | for arg in get_argtype(stat.args, RefOffset): 120 | arg.abs = bytecode[arg.abs].offset 121 | updated[stat.offset] = stat 122 | return updated 123 | 124 | 125 | def to_bytes(bytecode: ByteCode) -> bytes: 126 | with io.BytesIO() as stream: 127 | for off, stat in bytecode.items(): 128 | assert off == stream.tell(), (off, stream.tell()) 129 | stream.write(stat.to_bytes()) 130 | return stream.getvalue() 131 | 132 | 133 | def global_script(data: bytes) -> tuple[bytes, bytes]: 134 | return b'', data 135 | 136 | 137 | def local_script(data: bytes) -> tuple[bytes, bytes]: 138 | return data[:1], data[1:] 139 | 140 | 141 | def local_script_v7(data: bytes) -> tuple[bytes, bytes]: 142 | return data[:2], data[2:] 143 | 144 | 145 | def local_script_v8(data: bytes) -> tuple[bytes, bytes]: 146 | return data[:4], data[4:] 147 | 148 | 149 | def verb_script(data: bytes) -> tuple[bytes, bytes]: 150 | serial = b'' 151 | with io.BytesIO(data) as stream: 152 | while True: 153 | key = stream.read(1) 154 | serial += key 155 | if key in {b'\0'}: # , b'\xFF'}: 156 | break 157 | serial += stream.read(2) 158 | return serial, stream.read() 159 | 160 | 161 | script_map = { 162 | 'SCRP': global_script, 163 | 'LSCR': local_script, 164 | 'LSC2': local_script_v8, 165 | 'VERB': verb_script, 166 | 'ENCD': global_script, 167 | 'EXCD': global_script, 168 | } 169 | 170 | 171 | def get_scripts(root: Iterable[Element]) -> Iterator[Element]: 172 | for elem in root: 173 | if elem.tag in {'LECF', 'LFLF', 'RMDA', 'ROOM', 'OBCD', *script_map}: 174 | if elem.tag in {*script_map, 'OBCD'}: 175 | yield elem 176 | else: 177 | yield from get_scripts(elem.children()) 178 | 179 | 180 | if __name__ == '__main__': 181 | import argparse 182 | import glob 183 | 184 | from nutcracker.kernel2.fileio import ResourceFile 185 | 186 | from ..preset import sputm 187 | from .opcodes import OPCODES_he80 188 | 189 | parser = argparse.ArgumentParser(description='read smush file') 190 | parser.add_argument('files', nargs='+', help='files to read from') 191 | parser.add_argument('--chiper-key', default='0x00', type=str, help='xor key') 192 | args = parser.parse_args() 193 | 194 | files = set(flatten(glob.iglob(r) for r in args.files)) 195 | for filename in files: 196 | with ResourceFile.load(filename, key=int(args.chiper_key, 16)) as resource: 197 | for elem in get_scripts(sputm.map_chunks(resource)): 198 | _, script_data = script_map[elem.tag](elem.data) 199 | bytecode = descumm(script_data, OPCODES_he80) 200 | print_bytecode(bytecode) 201 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/script/parser.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Callable, Iterable, Iterator 2 | from typing import IO, Protocol 3 | 4 | 5 | def read_message( 6 | stream: IO[bytes], 7 | escape: bytes | None = None, 8 | var_size: int = 2, 9 | ) -> Iterator[bytes]: 10 | while True: 11 | c = stream.read(1) 12 | if c in {b'', b'\0'}: 13 | break 14 | assert c is not None 15 | if c == escape: 16 | t = stream.read(1) 17 | c += t 18 | if ord(t) not in {1, 2, 3, 8}: 19 | c += stream.read(var_size) 20 | yield c 21 | 22 | 23 | class ScriptArg(Protocol): 24 | def to_bytes(self) -> bytes: ... 25 | 26 | 27 | class CString(ScriptArg): 28 | def __init__(self, stream: IO[bytes], var_size: int = 2) -> None: 29 | self.msg = b''.join(read_message(stream, escape=b'\xff', var_size=var_size)) 30 | 31 | def __repr__(self) -> str: 32 | return f'MSG {self.msg!r}' 33 | 34 | def to_bytes(self) -> bytes: 35 | msg = self.msg if self.msg is not None else b'' 36 | return msg + b'\0' 37 | 38 | 39 | class ByteValue(ScriptArg): 40 | def __init__(self, stream: IO[bytes]) -> None: 41 | self.op = stream.read(1) 42 | 43 | def __repr__(self) -> str: 44 | return f'BYTE hex=0x{ord(self.op):02x} dec={ord(self.op)}' 45 | 46 | def to_bytes(self) -> bytes: 47 | return self.op 48 | 49 | 50 | class WordValue(ScriptArg): 51 | def __init__(self, stream: IO[bytes]) -> None: 52 | self.op = stream.read(2) 53 | 54 | def __repr__(self) -> str: 55 | val = int.from_bytes(self.op, byteorder='little', signed=True) 56 | return f'WORD hex=0x{val:04x} dec={val}' 57 | 58 | def to_bytes(self) -> bytes: 59 | return self.op 60 | 61 | 62 | class DWordValue(ScriptArg): 63 | def __init__(self, stream: IO[bytes]) -> None: 64 | self.op = stream.read(4) 65 | 66 | def __repr__(self) -> str: 67 | val = int.from_bytes(self.op, byteorder='little', signed=True) 68 | return f'DWORD hex=0x{val:04x} dec={val}' 69 | 70 | def to_bytes(self) -> bytes: 71 | return self.op 72 | 73 | 74 | class RefOffset(ScriptArg): 75 | def __init__(self, stream: IO[bytes], word_size: int = 2): 76 | rel = int.from_bytes(stream.read(word_size), byteorder='little', signed=True) 77 | self.endpos = stream.tell() 78 | self.size = word_size 79 | self.abs = rel + self.endpos 80 | 81 | @property 82 | def rel(self) -> int: 83 | return self.abs - self.endpos 84 | 85 | def __repr__(self) -> str: 86 | if self.size == 2: 87 | return f'REF rel=0x{self.rel:04x} abs=0x{(self.abs):04x}' 88 | return f'REF rel=0x{self.rel:08x} abs=0x{(self.abs):08x}' 89 | 90 | def to_bytes(self) -> bytes: 91 | return self.rel.to_bytes(self.size, byteorder='little', signed=True) 92 | 93 | 94 | class Statement: 95 | def __init__( 96 | self, 97 | name: str, 98 | op: Callable[[IO[bytes]], Iterable[ScriptArg]], 99 | opcode: int, 100 | stream: IO[bytes], 101 | ) -> None: 102 | self.name = name 103 | self.opcode = opcode 104 | self.offset = stream.tell() - 1 105 | self.args = tuple(op(stream)) 106 | 107 | def __repr__(self) -> str: 108 | return ' '.join( 109 | [f'0x{self.opcode:02x}', self.name, '{', *(str(x) for x in self.args), '}'], 110 | ) 111 | 112 | def to_bytes(self) -> bytes: 113 | return b''.join([bytes([self.opcode]), *(x.to_bytes() for x in self.args)]) 114 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/script/shared.py: -------------------------------------------------------------------------------- 1 | import io 2 | from collections import deque 3 | from collections.abc import Iterator, Mapping 4 | 5 | try: 6 | # Python 3.10+ 7 | from itertools import pairwise 8 | except: 9 | # Python 3.9 10 | from more_itertools import pairwise 11 | 12 | from nutcracker.sputm.script.bytecode import BytecodeParseError 13 | from nutcracker.sputm.script.opcodes_v5 import SomeOp 14 | from nutcracker.sputm.script.parser import Statement 15 | from nutcracker.utils.funcutils import grouper 16 | 17 | 18 | def print_asts(indent, asts): 19 | for label, seq in asts.items(): 20 | if not label.startswith('_'): # or True: 21 | yield f'{label}:' 22 | for st in seq: 23 | yield f'{indent}{st}' 24 | 25 | 26 | def parse_verb_meta(meta): 27 | with io.BytesIO(meta) as stream: 28 | while True: 29 | key = stream.read(1) 30 | if key in {b'\0'}: # , b'\xFF'}: 31 | break 32 | entry = int.from_bytes(stream.read(2), byteorder='little', signed=False) 33 | yield key, entry - len(meta) 34 | assert stream.read() == b'' 35 | 36 | 37 | def canonical_bytecode( 38 | bytecode: Mapping[int, Statement | SomeOp], 39 | base_offset: int = 0, 40 | ) -> Iterator[str]: 41 | for off, stat in bytecode.items(): 42 | byte_width = 4 43 | hexdump = ' |\n\t '.join( 44 | bytes(x for x in part if x is not None)[::-1] 45 | .hex(' ') 46 | .upper() 47 | .rjust(3 * byte_width - 1) 48 | for part in reversed(list(grouper(stat.to_bytes()[::-1], byte_width))) 49 | ) 50 | yield f'[{base_offset + off:08d}]: {hexdump} | {stat}' 51 | 52 | 53 | class BytecodeError(ValueError): 54 | def __init__(self, cause: BytecodeParseError, path, asts): 55 | block = '\n'.join(print_asts('\t', asts)) 56 | bytecode_str = '\n\t'.join( 57 | canonical_bytecode(cause.bytecode, cause.base_offset), 58 | ) 59 | msg = ( 60 | '\n' 61 | f'Script path: {path}\n' 62 | f'Block:\n{block}\n' 63 | f'Bytecode:\n\t{bytecode_str}\n' 64 | f'Next:\n\t[{cause.base_offset + cause.offset:08d}]: {cause.buffer[cause.offset:cause.offset+16].hex(" ").upper()}\n' 65 | f'Error summary: {cause}' 66 | ) 67 | super().__init__( 68 | msg, 69 | ) 70 | self.cause = cause 71 | self.path = path 72 | self.asts = asts 73 | 74 | 75 | class ScriptError(ValueError): 76 | def __init__(self, cause, path, asts, stat, stack): 77 | block = '\n'.join(print_asts('\t', asts)) 78 | msg = ( 79 | '\n' 80 | f'Script path: {path}\n' 81 | f'Block:\n{block}\n' 82 | f'Next statement: {stat} called with stack: {stack}\n' 83 | f'Error summary: {repr(cause)}' 84 | ) 85 | super().__init__( 86 | msg, 87 | ) 88 | self.cause = cause 89 | self.path = path 90 | self.asts = asts 91 | self.stat = stat 92 | self.stack = stack 93 | 94 | 95 | def realize_refs(srefs, hrefs, seq): 96 | refs = {label: label in hrefs for label in sorted(srefs | hrefs)} 97 | assert refs 98 | if len(refs) == 1: 99 | nref = next(iter(refs)) 100 | else: 101 | for ref, nref in pairwise(refs): 102 | label = f'[{ref + 8:08d}]' if refs[ref] else f'_[{ref + 8:08d}]' 103 | stats = deque(stat for off, stat in seq if off < nref) 104 | # TODO: investigate what is the meaning of empty ref block 105 | if stats: 106 | yield label, stats 107 | seq = deque((off, stat) for off, stat in seq if off >= nref) 108 | label = f'[{nref + 8:08d}]' if refs[nref] else f'_[{nref + 8:08d}]' 109 | stats = deque(stat for _, stat in seq) 110 | if stats: 111 | yield label, stats 112 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/windex/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/src/nutcracker/sputm/windex/__init__.py -------------------------------------------------------------------------------- /src/nutcracker/sputm/windex/runner.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import os 3 | from enum import Enum 4 | from pathlib import Path 5 | 6 | import typer 7 | 8 | from .. import windex_v5, windex_v6 9 | from ..preset import sputm 10 | from ..schema import SCHEMA 11 | from ..script.bytecode import script_map 12 | from ..strings import RAW_ENCODING 13 | from ..tree import narrow_schema, open_game_resource 14 | from .scu import dump_script_file 15 | 16 | app = typer.Typer() 17 | 18 | SUPPORTED_VERSION = { 19 | '8': (8, 0), 20 | '7': (7, 0), 21 | 'he101': (6, 101), 22 | 'he100': (6, 100), 23 | 'he99': (6, 99), 24 | 'he98': (6, 98), 25 | 'he90': (6, 90), 26 | 'he80': (6, 80), 27 | 'he73': (6, 73), 28 | 'he72': (6, 72), 29 | 'he71': (6, 71), 30 | 'he70': (6, 70), 31 | 'he60': (6, 60), 32 | '6': (6, 0), 33 | '5': (5, 0), 34 | } 35 | 36 | Version = Enum( # type: ignore[misc] 37 | 'Version', 38 | dict(zip(SUPPORTED_VERSION.keys(), SUPPORTED_VERSION.keys(), strict=True)), 39 | ) 40 | 41 | 42 | @app.command('decompile') 43 | def decompile( 44 | filename: Path = typer.Argument(..., help='Game resource index file'), 45 | gver: Version = typer.Option( 46 | None, 47 | '--game', 48 | '-g', 49 | help='Force game version', 50 | ), 51 | *, 52 | verbose: bool = typer.Option(False, '--verbose', help='Dump each opcode for debug'), 53 | chiper_key: str = typer.Option( 54 | None, 55 | '--chiper-key', 56 | help='XOR key for decrypting game resources', 57 | ), 58 | skip_transform: bool = typer.Option( 59 | False, 60 | '--skip-transform', 61 | help='Disable structure simplification', 62 | ), 63 | ) -> None: 64 | gameres = open_game_resource( 65 | filename, 66 | SUPPORTED_VERSION.get(gver.name) if gver else None, 67 | int(chiper_key, 16) if chiper_key else None, 68 | ) 69 | basename = gameres.basename 70 | 71 | root = gameres.read_resources( 72 | schema=narrow_schema( 73 | SCHEMA, 74 | {'LECF', 'LFLF', 'RMDA', 'ROOM', 'OBCD', *script_map}, 75 | ), 76 | ) 77 | 78 | rnam = gameres.rooms 79 | print(gameres.game) 80 | print(rnam) 81 | 82 | script_dir = os.path.join('scripts', basename) 83 | os.makedirs(script_dir, exist_ok=True) 84 | 85 | if gameres.game.version >= 6: 86 | decompile = functools.partial( 87 | windex_v6.decompile_script, 88 | game=gameres.game, 89 | verbose=verbose, 90 | transform=not skip_transform, 91 | ) 92 | elif gameres.game.version >= 5: 93 | decompile = functools.partial( 94 | windex_v5.decompile_script, 95 | transform=not skip_transform, 96 | ) 97 | 98 | for disk in root: 99 | for room in sputm.findall('LFLF', disk): 100 | room_no = rnam.get(room.attribs['gid'], f"room_{room.attribs['gid']}") 101 | print( 102 | '==========================', 103 | room.attribs['path'], 104 | room_no, 105 | ) 106 | fname = f"{script_dir}/{room.attribs['gid']:04d}_{room_no}.scu" 107 | 108 | with open(fname, 'w', **RAW_ENCODING) as script_file: 109 | dump_script_file(room_no, room, decompile, script_file) 110 | 111 | 112 | if __name__ == '__main__': 113 | app() 114 | -------------------------------------------------------------------------------- /src/nutcracker/sputm/windex/scu.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Callable, Iterable, Iterator 2 | from typing import IO 3 | 4 | from nutcracker.kernel2.element import Element 5 | from nutcracker.sputm.script.bytecode import script_map 6 | 7 | 8 | def get_global_scripts(root: Iterable[Element]) -> Iterator[Element]: 9 | for elem in root: 10 | if elem.tag in {'LECF', 'LFLF', 'OBCD', *script_map}: 11 | if elem.tag in {*script_map}: 12 | yield elem 13 | else: 14 | yield from get_global_scripts(elem.children()) 15 | 16 | 17 | def get_room_scripts(root: Iterable[Element]) -> Iterator[Element]: 18 | for elem in root: 19 | if elem.tag in {'LECF', 'LFLF', 'RMDA', 'ROOM', 'OBCD', *script_map}: 20 | if elem.tag == 'SCRP': 21 | assert 'ROOM' not in elem.attribs['path'], elem 22 | assert 'RMDA' not in elem.attribs['path'], elem 23 | continue 24 | elif elem.tag in {*script_map, 'OBCD'}: 25 | yield elem 26 | else: 27 | yield from get_room_scripts(elem.children()) 28 | 29 | 30 | def dump_script_file( 31 | room_no: str, 32 | room: Element, 33 | decompile: Callable[[Element], Iterator[str]], 34 | outfile: IO[str], 35 | ) -> None: 36 | children = list(room.children()) 37 | for elem in get_global_scripts(children): 38 | for line in decompile(elem): 39 | print(line, file=outfile) 40 | print('', file=outfile) # end with new line 41 | print(f'room {room_no}', '{', file=outfile) 42 | for elem in get_room_scripts(children): 43 | print('', file=outfile) # end with new line 44 | for line in decompile(elem): 45 | print( 46 | line if line.endswith(']:') or not line else f'\t{line}', 47 | file=outfile, 48 | ) 49 | print('}', file=outfile) 50 | print('', file=outfile) # end with new line 51 | -------------------------------------------------------------------------------- /src/nutcracker/sputm_old/song.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import io 3 | import os 4 | import pathlib 5 | import struct 6 | import wave 7 | 8 | UINT32LE = struct.Struct(' 32: 40 | for i in range(num_songs): 41 | songs.append( 42 | dict( 43 | zip( 44 | ('song', 'offset', 'size'), 45 | UINT32LE_x3.unpack_from( 46 | sghd, 47 | offset=4 + (UINT32LE_x3.size + 13) * i, 48 | ), 49 | ), 50 | name=sghd[ 51 | 4 + (UINT32LE_x3.size + 13) * i + UINT32LE_x3.size : 4 52 | + (UINT32LE_x3.size + 13) * i 53 | + UINT32LE_x3.size 54 | + 13 55 | ], 56 | ), 57 | ) 58 | 59 | else: 60 | for i in range(num_songs): 61 | elem = next(children) 62 | assert elem.tag == 'SGEN', elem 63 | 64 | songs.append( 65 | dict( 66 | zip( 67 | ('song', 'offset', 'size'), 68 | UINT32LE_x3.unpack_from(elem.data), 69 | ), 70 | name=elem.data[UINT32LE_x3.size :], 71 | ), 72 | ) 73 | 74 | for s in songs: 75 | elem = next(children) 76 | assert elem.tag == 'DIGI', elem 77 | assert elem.attribs['offset'] + 8 == s['offset'], ( 78 | elem.attribs['offset'] + 8, 79 | s['offset'], 80 | ) 81 | assert elem.attribs['size'] + 8 == s['size'], ( 82 | elem.attribs['size'] + 8, 83 | s['size'], 84 | ) 85 | assert s['name'] in {b'\0', b'abcdefghijklm'}, s['name'] 86 | 87 | songid = s['song'] 88 | 89 | hshd, sdat = elem.children() 90 | 91 | with io.BytesIO(hshd.data) as hshd: 92 | unk1 = struct.unpack(' bytes: 40 | return binascii.unhexlify(hexstr.encode()) 41 | 42 | 43 | def read_streams(src_dir: str, ext: str, files: Iterable[str]) -> Iterable[bytes]: 44 | for file in files: 45 | yield read_file(os.path.join(src_dir, f'{file}.{ext}')) 46 | 47 | 48 | if __name__ == '__main__': 49 | import argparse 50 | 51 | from nutcracker.sputm.preset import sputm 52 | from nutcracker.sputm.schema import SCHEMA 53 | from nutcracker.sputm.tree import narrow_schema, open_game_resource 54 | 55 | parser = argparse.ArgumentParser(description='read smush file') 56 | group = parser.add_mutually_exclusive_group() 57 | group.add_argument('--extract', '-e', action='store_true') 58 | group.add_argument('--inject', '-i', action='store_true') 59 | parser.add_argument('filename', help='filename to read from') 60 | parser.add_argument( 61 | '--textfile', 62 | '-t', 63 | help='save strings to file', 64 | default='embedded.txt', 65 | ) 66 | args = parser.parse_args() 67 | 68 | gameres = open_game_resource(args.filename) 69 | 70 | root = gameres.read_resources( 71 | schema=narrow_schema(SCHEMA, {'LECF', 'LFLF', 'SOUN'}), 72 | ) 73 | 74 | if args.extract: 75 | os.makedirs('sfx_ext', exist_ok=True) 76 | # with open(args.textfile, 'r') as voctable: 77 | # coff = next(voctable) 78 | for off, stream in get_all_sounds(root): 79 | vname = f'{off:08x}' 80 | # if coff.startswith(vname): 81 | # vname = coff[8:-1] 82 | # coff = next(voctable, '') 83 | # else: 84 | # print(coff, 'X', vname) 85 | 86 | with open(os.path.join('sfx_ext', f'{vname}.voc'), 'wb') as voc: 87 | voc.write(stream) 88 | 89 | elif args.inject: 90 | # TODO: use actual streams 91 | with open(args.textfile, 'r') as voctable: 92 | streams = [line[8:-1] for line in voctable] 93 | cstreams = ( 94 | sputm.mktag('MPEG', cont) for cont in read_streams('sfx_hq', 'mp3', streams) 95 | ) 96 | updated_resource = list(inject_sound_chunks(root, cstreams)) 97 | 98 | basename = os.path.basename(args.filename) 99 | rebuild_resources(gameres, basename, updated_resource) 100 | -------------------------------------------------------------------------------- /src/nutcracker/sputm_old/tlkb.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import io 3 | import os 4 | import pathlib 5 | import struct 6 | import wave 7 | 8 | from nutcracker.utils.fileio import read_file 9 | 10 | if __name__ == '__main__': 11 | import argparse 12 | 13 | from nutcracker.sputm.preset import sputm 14 | 15 | parser = argparse.ArgumentParser(description='read smush file') 16 | parser.add_argument('filename', help='filename to read from') 17 | args = parser.parse_args() 18 | 19 | res = read_file(args.filename) 20 | basename = os.path.basename(args.filename) 21 | 22 | target_dir = pathlib.Path('OUT') / basename 23 | os.makedirs(target_dir, exist_ok=True) 24 | 25 | _, gchunk = sputm.untag(res) 26 | tlkb = sputm.assert_tag('TLKB', gchunk) 27 | chunks = ( 28 | (offset + 8, sputm.assert_tag('TALK', chunk)) 29 | for offset, chunk in sputm.read_chunks(tlkb) 30 | ) 31 | for idx, (offset, chunk) in enumerate(chunks): 32 | sound = b'' 33 | for _, (tag, data) in sputm.read_chunks(chunk): 34 | if tag == 'HSHD': 35 | print(len(data)) 36 | print(tag, data) 37 | with io.BytesIO(data) as hshd: 38 | unk1 = struct.unpack(' Iterator[bytes]: 10 | return iter(functools.partial(source, buffer_size), b'') 11 | -------------------------------------------------------------------------------- /src/nutcracker/utils/fileio.py: -------------------------------------------------------------------------------- 1 | __all__ = ('read_file', 'write_file') 2 | 3 | from pathlib import Path 4 | 5 | from nutcracker.chiper import xor 6 | from nutcracker.kernel2.fileio import read_file 7 | 8 | 9 | def write_file(path: str, data: bytes, key: int = 0x00) -> int: 10 | with Path(path).open('wb') as res: 11 | return xor.write(res, data, key=key) 12 | -------------------------------------------------------------------------------- /src/nutcracker/utils/funcutils.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Iterable, Iterator, Sequence 2 | from itertools import chain, zip_longest 3 | from typing import TypeVar 4 | 5 | T = TypeVar('T') 6 | 7 | 8 | def flatten(ls: Iterable[Iterable[T]]) -> Iterator[T]: 9 | # flatten(['ABC', 'DEF']) --> A B C D E F 10 | """Flatten one level of nesting.""" 11 | return chain.from_iterable(ls) 12 | 13 | 14 | def grouper( 15 | iterable: Iterable[T], 16 | n: int, 17 | fillvalue: T | None = None, 18 | ) -> Iterator[Sequence[T]]: 19 | """Collect data into fixed-length chunks or blocks.""" 20 | # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" 21 | args = [iter(iterable)] * n 22 | return zip_longest(*args, fillvalue=fillvalue) 23 | -------------------------------------------------------------------------------- /src/nutcracker/utils/libio.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from collections.abc import Iterator 4 | from contextlib import contextmanager 5 | from pathlib import Path 6 | 7 | 8 | @contextmanager 9 | def suppress_stdout() -> Iterator[None]: 10 | """ 11 | Context manager that suppresses all print output. 12 | Within this context, output to sys.stdout is redirected to os.devnull. 13 | """ 14 | with Path(os.devnull).open('w') as devnull: 15 | old_stdout = sys.stdout 16 | sys.stdout = devnull 17 | try: 18 | yield 19 | finally: 20 | sys.stdout = old_stdout 21 | -------------------------------------------------------------------------------- /test.bat: -------------------------------------------------------------------------------- 1 | python -m nutcracker.smush.runner decode nuts\fonts\ft\*.NUT --nut --target nuts\out\ft 2 | python -m nutcracker.smush.encode nuts\out\ft\SCUMMFNT.NUT\chars.png --target nuts\test\ft\SCUMMFNT.NUT --ref nuts\fonts\ft\SCUMMFNT.NUT --codec 21 3 | python -m nutcracker.smush.encode nuts\out\ft\SPECFNT.NUT\chars.png --target nuts\test\ft\SPECFNT.NUT --ref nuts\fonts\ft\SPECFNT.NUT --codec 21 4 | python -m nutcracker.smush.encode nuts\out\ft\TECHFNT.NUT\chars.png --target nuts\test\ft\TECHFNT.NUT --ref nuts\fonts\ft\TECHFNT.NUT --codec 21 5 | python -m nutcracker.smush.encode nuts\out\ft\TITLFNT.NUT\chars.png --target nuts\test\ft\TITLFNT.NUT --ref nuts\fonts\ft\TITLFNT.NUT --codec 21 6 | 7 | python -m nutcracker.smush.runner decode nuts\fonts\dig\*.NUT --nut --target nuts\out\dig 8 | python -m nutcracker.smush.encode nuts\out\dig\FONT0.NUT\chars.png --target nuts\test\dig\FONT0.NUT --ref nuts\fonts\dig\FONT0.NUT --codec 21 --fake 44 9 | python -m nutcracker.smush.encode nuts\out\dig\FONT1.NUT\chars.png --target nuts\test\dig\FONT1.NUT --ref nuts\fonts\dig\FONT1.NUT --codec 21 --fake 44 10 | python -m nutcracker.smush.encode nuts\out\dig\FONT2.NUT\chars.png --target nuts\test\dig\FONT2.NUT --ref nuts\fonts\dig\FONT2.NUT --codec 21 --fake 44 11 | python -m nutcracker.smush.encode nuts\out\dig\FONT3.NUT\chars.png --target nuts\test\dig\FONT3.NUT --ref nuts\fonts\dig\FONT3.NUT --codec 21 --fake 44 12 | 13 | python -m nutcracker.smush.runner decode nuts\fonts\comi\*.NUT --nut --target nuts\out\comi 14 | python -m nutcracker.smush.encode nuts\out\comi\FONT0.NUT\chars.png --target nuts\test\comi\FONT0.NUT --ref nuts\fonts\comi\FONT0.NUT --codec 44 15 | python -m nutcracker.smush.encode nuts\out\comi\FONT1.NUT\chars.png --target nuts\test\comi\FONT1.NUT --ref nuts\fonts\comi\FONT1.NUT --codec 44 16 | python -m nutcracker.smush.encode nuts\out\comi\FONT2.NUT\chars.png --target nuts\test\comi\FONT2.NUT --ref nuts\fonts\comi\FONT2.NUT --codec 44 17 | python -m nutcracker.smush.encode nuts\out\comi\FONT3.NUT\chars.png --target nuts\test\comi\FONT3.NUT --ref nuts\fonts\comi\FONT3.NUT --codec 21 --fake 44 18 | python -m nutcracker.smush.encode nuts\out\comi\FONT4.NUT\chars.png --target nuts\test\comi\FONT4.NUT --ref nuts\fonts\comi\FONT4.NUT --codec 44 19 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BLooperZ/nutcracker/7822fc97f3ddad84dbb875298a87b12a15c5de45/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_nutcracker.py: -------------------------------------------------------------------------------- 1 | from nutcracker import __version__ 2 | 3 | 4 | def test_version() -> None: 5 | assert __version__ == '0.3.141' 6 | -------------------------------------------------------------------------------- /vocal.yml: -------------------------------------------------------------------------------- 1 | INTROD_8.SAN: [4, 6, 8, 10, 12, 14, 16, 18, 20, 25, 28, 33, 35, 38, 42, 46, 48, 51, 54, 57, 61, 64, 67, 77, 80, 86] 2 | CREDITS: [2, 8, 27, 30] 3 | --------------------------------------------------------------------------------