├── tpl.py ├── ymap.py ├── splitasound.py ├── brres.py ├── ghidra_scripts ├── JSUConvertOffsetToPtr.py ├── vt.py ├── jisstrings.py ├── params.py ├── to_string.py ├── annotate_virtual_call.py └── classes.py ├── .gitignore ├── unityassets.py ├── scenebin.py ├── dxt1to5.py ├── bcsv.py ├── scene2blender.py ├── dedupe.py ├── col.py ├── brfnt.py ├── README.md ├── scenario names.py ├── bti.py ├── ral.py ├── col_blender.py ├── lineblur.py ├── bfn.py ├── thp2avi.py ├── common.py ├── bmg.py ├── blo.py ├── get-obj-info.py ├── col2unity.py ├── scene2vmf.py ├── bck.py ├── bck_blender.py ├── jpa.py ├── sequence-com.py └── texture.py /tpl.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys, os 4 | from texture import * 5 | from struct import unpack 6 | 7 | if len(sys.argv) != 2: 8 | sys.stderr.write("Usage: %s \n"%sys.argv[0]) 9 | exit(1) 10 | 11 | fin = open(sys.argv[1], 'rb') 12 | i = 0 13 | fin.seek(0,2) 14 | endfile = fin.tell() 15 | fin.seek(0) 16 | while fin.tell() < endfile: 17 | fin.seek(0x14,1) 18 | height, width, format, offset = unpack('>HHII', fin.read(12)) 19 | print(format, width, height) 20 | fin.seek(0x20,1) 21 | data = readTextureData(fin, format, width, height) 22 | images = decodeTexturePIL(data, format, width, height, 0, None) 23 | images[0][0].save(os.path.splitext(sys.argv[1])[0]+str(i)+'.png') 24 | fout = open(os.path.splitext(sys.argv[1])[0]+str(i)+".dds", 'wb') 25 | decodeTextureDDS(fout, data, format, width, height, 0, None) 26 | fout.close() 27 | i += 1 28 | 29 | fin.close() 30 | -------------------------------------------------------------------------------- /ymap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from struct import unpack 4 | import sys, os 5 | from texture import TexFmt, readTextureData, decodeTexturePIL 6 | 7 | if len(sys.argv) != 2: 8 | sys.stderr.write("Usage: %s ymap.ymp\n"%sys.argv[0]) 9 | exit(1) 10 | 11 | fin = open(sys.argv[1], 'rb') 12 | 13 | nRegions, dataOffset = unpack('>H2xI', fin.read(8)) 14 | 15 | assert dataOffset == 8 16 | 17 | fin.seek(dataOffset) 18 | 19 | print("%d regions"%nRegions) 20 | 21 | for i in range(nRegions): 22 | pollutionEffect, flags, pollutionLayerType = unpack('>HHH2x', fin.read(8)) 23 | 24 | yOffset, texToWorldSize, xMin, zMin, xMax, zMax = unpack('>ffffff', fin.read(24)) 25 | 26 | widthPow, heightPow, unk, dataOffset = unpack('>HHII', fin.read(12)) 27 | width = 1<I', s) 17 | if chunkid in (1, 4, 5, 6, 7): 18 | offset, size = struct.unpack('>II4x', fin.read(12)) 19 | if chunkid == 4: name = "BARC" 20 | elif chunkid == 5: name = "strm" 21 | else: name = str(chunkid) 22 | print(i, chunkid, hex(offset), hex(size)) 23 | dumpsection(name+"-"+str(i)+".bin", fin, offset, size) 24 | i += 1 25 | elif chunkid in (2, 3): 26 | while True: 27 | s = fin.read(4) 28 | if len(s) == 0: break 29 | offset, = struct.unpack('>I', s) 30 | if offset == 0: break 31 | size, id = struct.unpack('>II', fin.read(8)) 32 | if chunkid == 3: name = "WSYS" 33 | elif chunkid == 2: name = "IBNK" 34 | else: name = str(chunkid) 35 | print(i, chunkid, hex(offset), hex(size)) 36 | dumpsection(name+"-"+str(id)+"-"+str(i)+".bin", fin, offset, size) 37 | i += 1 38 | elif chunkid == 0: 39 | break 40 | 41 | fin.close() 42 | -------------------------------------------------------------------------------- /brres.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from struct import Struct 3 | from common import BFile, Section 4 | from texture import readTextureData, decodeTexturePIL 5 | 6 | class Tex0(Section): 7 | header = struct.Struct('>L16xHH32x') 8 | 9 | def read(self, fin, start, size): 10 | self.mipmapCount, self.width, self.height = self.header.unpack(fin.read(0x38)) 11 | self.data = readTextureData(fin, self.format, self.width, self.height, mipmapCount=self.mipmapCount) 12 | 13 | def export(self, name): 14 | images = decodeTexturePIL(self.data, self.format, self.width, self.height, mipmapCount=self.mipmapCount) 15 | for arrayIdx, mips in enumerate(images): 16 | mips[0].save(name+str(arrayIdx)+".png") 17 | 18 | fout = open(name+".dds", 'wb') 19 | decodeTextureDDS(fout, self.data, self.format, self.width, self.height, 0, None, self.mipmapCount) 20 | fout.close() 21 | 22 | class BRres(BFile): 23 | aligned = True 24 | header = Struct('>8sL2xH4xL') 25 | sectionHandlers = {b'TEX0': Tex0} 26 | def readHeader(self, fin): 27 | self.signature, self.fileLength, self.chunkCount, extraHeaderSize = self.header.unpack(fin.read(0x18)) 28 | fin.seek(extraHeaderSize, 1) 29 | 30 | if len(sys.argv) != 2: 31 | sys.stderr.write("Usage: %s \n"%sys.argv[0]) 32 | exit(1) 33 | 34 | fin = open(sys.argv[1], 'rb') 35 | brres = BRres() 36 | brres.read(fin) 37 | fin.close() 38 | brres.tex0.export(os.path.splitext(sys.argv[1])[0]) 39 | -------------------------------------------------------------------------------- /ghidra_scripts/JSUConvertOffsetToPtr.py: -------------------------------------------------------------------------------- 1 | def getType(name): 2 | name = name.split("::")[-1] 3 | if name.startswith("unsigned "): name = "u"+name[9:] 4 | if name.endswith(" *"): 5 | ptr = True 6 | name = name[:-2] 7 | else: 8 | ptr = False 9 | l = []; dtm.findDataTypes(name, l) 10 | if len(l) != 0: 11 | if ptr: 12 | return dtm.getPointer(l[0]) 13 | else: 14 | return l[0] 15 | else: 16 | return None 17 | 18 | dtm = currentProgram.dataTypeManager 19 | voidp = dtm.getPointer(dtm.getDataType("/void")) 20 | for sym in currentProgram.symbolTable.getSymbols("JSUConvertOffsetToPtr"): 21 | f = getFunctionAt(sym.address) 22 | tempParam = f.comment[f.comment.find("<")+1:f.comment.find(">")] 23 | sig = f.signature.copy(dtm) 24 | returnType = getType(tempParam) 25 | if returnType is not None: 26 | sig.returnType = dtm.getPointer(returnType) 27 | else: 28 | sig.returnType = voidp 29 | argsDef = f.comment[f.comment.find("(")+1:f.comment.find(")")] 30 | args = [] 31 | for i, a in enumerate(argsDef.split(',')): 32 | argType = getType(a.strip()) 33 | if argType is None: argType = voidp 34 | args.append(ghidra.program.model.data.ParameterDefinitionImpl("param_"+str(i+1), argType, "")) 35 | sig.setArguments(args) 36 | sig.genericCallingConvention = ghidra.program.model.data.GenericCallingConvention.stdcall 37 | runCommand(ghidra.app.cmd.function.ApplyFunctionSignatureCmd(f.entryPoint, sig, f.signatureSource)) 38 | 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.gitignore.io/api/python 3 | 4 | ### Python ### 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | env/ 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *,cover 51 | .hypothesis/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | 61 | # Flask stuff: 62 | instance/ 63 | .webassets-cache 64 | 65 | # Scrapy stuff: 66 | .scrapy 67 | 68 | # Sphinx documentation 69 | docs/_build/ 70 | 71 | # PyBuilder 72 | target/ 73 | 74 | # Jupyter Notebook 75 | .ipynb_checkpoints 76 | 77 | # pyenv 78 | .python-version 79 | 80 | # celery beat schedule file 81 | celerybeat-schedule 82 | 83 | # dotenv 84 | .env 85 | 86 | # virtualenv 87 | .venv 88 | venv/ 89 | ENV/ 90 | 91 | # Spyder project settings 92 | .spyderproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # End of https://www.gitignore.io/api/python 98 | -------------------------------------------------------------------------------- /unityassets.py: -------------------------------------------------------------------------------- 1 | import uuid, yaml, os.path, unityparser 2 | 3 | def writeMeta(name, importer, outputFolderLocation): 4 | metaPath = os.path.join(outputFolderLocation, name+".meta") 5 | if os.path.exists(metaPath): 6 | guid = unityparser.UnityDocument.load_yaml(metaPath).entry['guid'] 7 | else: 8 | guid = str(uuid.uuid4()).replace('-', '') 9 | meta = { 10 | "fileFormatVersion": 2, 11 | "guid": guid 12 | } 13 | meta.update(importer) 14 | yaml.dump(meta, open(metaPath, 'w')) 15 | return guid 16 | 17 | def writeNativeMeta(name, mainObjectFileID, outputFolderLocation): 18 | return writeMeta(name, { 19 | "NativeFormatImporter": { 20 | "mainObjectFileID": mainObjectFileID 21 | } 22 | }, outputFolderLocation) 23 | 24 | def fixUnityParserFloats(): 25 | import re 26 | # for whatever reason, unityparser adds explicit type markers to floats with no 27 | # fraction. this restores pyyaml's default behavior 28 | unityparser.resolver.Resolver.add_implicit_resolver( 29 | 'tag:yaml.org,2002:float', 30 | re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? 31 | |\.[0-9_]+(?:[eE][-+][0-9]+)? 32 | |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* 33 | |[-+]?\.(?:inf|Inf|INF) 34 | |\.(?:nan|NaN|NAN))$''', re.X), 35 | list('-+0123456789.')) 36 | 37 | def getFileRef(guid, id, type=2): 38 | return {'fileID': id, 'guid': guid, 'type': type} 39 | 40 | def getObjRef(obj): 41 | return {'fileID': int(obj.anchor)} 42 | 43 | -------------------------------------------------------------------------------- /ghidra_scripts/vt.py: -------------------------------------------------------------------------------- 1 | # 2 | #@keybinding 3 | #@menupath Tools.vt 4 | #@toolbar 5 | 6 | from ghidra.program.model.data import * 7 | from ghidra.program.model.address import AddressSet 8 | 9 | symdb = currentProgram.symbolTable 10 | listing = currentProgram.getListing() 11 | namespaceSymbols = list(symdb.getChildren(currentProgram.getNamespaceManager().getGlobalNamespace().getSymbol())) 12 | done = 0 13 | while len(namespaceSymbols) > 0: 14 | namespaceSymbol = namespaceSymbols.pop(0) 15 | namespace = symdb.getNamespace(namespaceSymbol.name, namespaceSymbol.getParentNamespace()) 16 | if namespace is None: continue 17 | namespaceSymbols.extend(symdb.getChildren(namespaceSymbol)) 18 | for sym in symdb.getSymbols(namespace): 19 | if sym.name != u'__vt': continue 20 | 21 | addr = sym.address.next() 22 | if addr is None: continue 23 | while len(symdb.getSymbols(addr)) == 0: 24 | addr = addr.next() 25 | 26 | while addr.subtract(sym.address)%4 != 0: 27 | addr = addr.previous() 28 | 29 | length = addr.subtract(sym.address) 30 | startAddr = sym.address 31 | endAddr = addr.previous() 32 | print namespace, sym, startAddr, length 33 | 34 | adt = ArrayDataType(PointerDataType.dataType, length/4, 4, currentProgram.getDataTypeManager()) 35 | assert length == adt.getLength() 36 | adset = AddressSet(startAddr, endAddr) 37 | if listing.getInstructions(adset, True).hasNext(): 38 | print "Can't create data because the current selection contains instructions" 39 | continue 40 | listing.clearCodeUnits(startAddr, endAddr, False) 41 | listing.createData(startAddr, adt, length) 42 | 43 | -------------------------------------------------------------------------------- /scenebin.py: -------------------------------------------------------------------------------- 1 | import struct 2 | from struct import unpack 3 | import sys, io, pathlib 4 | from warnings import warn 5 | from common import calcKeyCode 6 | 7 | def readString(fin): 8 | length, = unpack('>H', fin.read(2)) 9 | return fin.read(length).decode('shift-jis') 10 | 11 | def stylecolor(c): 12 | r, g, b = c[:3] 13 | if (r*r + g*g + b*b) < 48768: 14 | stylecode = 48 15 | else: 16 | stylecode = 38 17 | mr = min(r, 255) 18 | mg = min(g, 255) 19 | mb = min(b, 255) 20 | s = "\x1b[%d;2;%d;%d;%dm#%02X%02X%02X\x1b[0m"%(stylecode, mr, mg, mb, r, g, b) 21 | for x in c[3:]: 22 | s += " %02X"%x 23 | return s 24 | 25 | def readsection(fin): 26 | sectionlength, namehash = unpack('>IH', fin.read(6)) 27 | #print("len", sectionlength, "hash", namehash) 28 | #print("at", fin.tell()) 29 | name = readString(fin) 30 | assert namehash == calcKeyCode(name), (hex(namehash), name) 31 | #print("Found a", name) 32 | if name in classes.registeredObjectClasses: 33 | #print("Constructing a", classes.registeredObjectClasses[name]) 34 | o = classes.registeredObjectClasses[name]() 35 | else: 36 | warn("Unknown class {}".format(name)) 37 | o = classes.TNameRef() 38 | 39 | o.namehash = namehash 40 | o.name = name 41 | 42 | assert name.isidentifier() 43 | x = io.BytesIO(fin.read(sectionlength-8-len(name))) 44 | #print("Reading") 45 | try: 46 | o.read(x) 47 | except struct.error as e: 48 | warn("Couldn't load a {}: {}".format(name, e)) 49 | o.extra = x.read() 50 | #print("Got", o) 51 | return o 52 | 53 | import classes 54 | 55 | if __name__ == "__main__": 56 | def printobj(o, i=0): 57 | try: print(' '*i, o) 58 | except Exception as e: print(' '*i, e) 59 | if o.extra: 60 | print(' '*(i+1), o.extra.hex()) 61 | if hasattr(o, "objects"): 62 | for o2 in o.objects: 63 | printobj(o2, i+1) 64 | o = readsection(open(sys.argv[1], 'rb')) 65 | printobj(o) 66 | 67 | -------------------------------------------------------------------------------- /dxt1to5.py: -------------------------------------------------------------------------------- 1 | import struct, sys 2 | 3 | COMPRESSED_RGB_S3TC_DXT1_EXT = 0x83F0 4 | COMPRESSED_RGBA_S3TC_DXT5_EXT = 0x83F3 5 | 6 | fin = open(sys.argv[1], 'rb') 7 | identifier, endianness = struct.unpack('12sI', fin.read(16)) 8 | assert identifier == bytes([0xAB, 0x4B, 0x54, 0x58, 0x20, 0x31, 0x31, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A]), identifier.hex() 9 | assert endianness == 0x04030201, hex(endianness) 10 | glType, glTypeSize, glFormat, glInternalFormat, glBaseInternalFormat, pixelWidth, pixelHeight, pixelDepth, numberOfArrayElements, numberOfFaces, numberOfMipmapLevels, bytesOfKeyValueData = struct.unpack('IIIIIIIIIIII', fin.read(48)) 11 | assert glType == 0, glType 12 | assert glFormat == 0, glFormat 13 | assert glInternalFormat == COMPRESSED_RGB_S3TC_DXT1_EXT, glInternalFormat 14 | 15 | fout = open(sys.argv[1][:sys.argv[1].rfind('.')]+"-dxt5.ktx", 'wb') 16 | fout.write(struct.pack('12sI', identifier, endianness)) 17 | fout.write(struct.pack('IIIIIIIIIIII', glType, glTypeSize, glFormat, COMPRESSED_RGBA_S3TC_DXT5_EXT, glBaseInternalFormat, pixelWidth, pixelHeight, pixelDepth, numberOfArrayElements, numberOfFaces, numberOfMipmapLevels, bytesOfKeyValueData)) 18 | fout.write(fin.read(bytesOfKeyValueData)) 19 | 20 | hasTransparency = False 21 | for mipmap_level in range(max(1, numberOfMipmapLevels)): 22 | imageSize, = struct.unpack('I', fin.read(4)) 23 | fout.write(struct.pack('I', imageSize*2)) 24 | for i in range(imageSize//8): 25 | color0, color1, pixels = struct.unpack('HHI', fin.read(8)) 26 | alphas = 0 27 | if color0 > color1: 28 | for j in range(16): alphas |= 1<<(j*3) 29 | #print('o', bin(alphas)) 30 | else: 31 | for j in range(16): alphas |= ((pixels>>(j*2))&3 != 3)<<(j*3) 32 | #print('x', bin(alphas)) 33 | hasTransparency = True 34 | fout.write(struct.pack('BB3HHHI', 0, 255, alphas&0xFFFF, (alphas>>16)&0xFFFF, alphas>>32, color0, color1, pixels)) 35 | if not hasTransparency: 36 | print("Warning:", sys.argv[1], "doesn't have any transparent pixels. Converting is a waste", file=sys.stderr) 37 | fout.close() 38 | fin.close() 39 | -------------------------------------------------------------------------------- /ghidra_scripts/jisstrings.py: -------------------------------------------------------------------------------- 1 | # 2 | #@keybinding 3 | #@menupath Tools.jisstrings 4 | #@toolbar 5 | 6 | from jarray import * 7 | from ghidra.program.model.data import * 8 | from ghidra.program.model.listing import * 9 | 10 | symdb = currentProgram.symbolTable 11 | listing = currentProgram.getListing() 12 | namespaceSymbols = list(symdb.getChildren(currentProgram.getNamespaceManager().getGlobalNamespace().getSymbol())) 13 | while len(namespaceSymbols) > 0: 14 | namespaceSymbol = namespaceSymbols.pop(0) 15 | namespace = symdb.getNamespace(namespaceSymbol.name, namespaceSymbol.getParentNamespace()) 16 | if namespace is None: continue 17 | print namespace 18 | namespaceSymbols.extend(symdb.getChildren(namespaceSymbol)) 19 | for sym in symdb.getSymbols(namespace): 20 | if not sym.name.startswith(u'@'): continue 21 | 22 | addr = sym.address.next() 23 | if addr is None: continue 24 | 25 | while len(symdb.getSymbols(addr)) == 0: 26 | addr = addr.next() 27 | length = addr.subtract(sym.address) 28 | 29 | #if isinstance(sym.object.getDataType(), TerminatedStringDataType): 30 | # listing.clearCodeUnits(sym.address, addr.previous(), False) 31 | # 32 | #continue 33 | 34 | if not isinstance(sym.object, Data): continue 35 | if sym.object.isDefined(): continue 36 | arr = zeros(length, 'b') 37 | try: 38 | currentProgram.memory.getBytes(sym.address, arr) 39 | except: 40 | continue 41 | l = arr.tolist() 42 | if len(l) < 2 or all([x == 0 for x in l]) or any([x > 0 and x < 32 for x in l]): continue 43 | s = arr.tostring() 44 | try: 45 | u = s.decode('shift-jis') 46 | except UnicodeDecodeError: 47 | continue 48 | if not u.endswith(u'\0'): continue 49 | u = u.rstrip(u'\0') 50 | if len(u) < 2: continue 51 | print u 52 | newData = listing.createData(sym.address, StringDataType.dataType, l.index(0)+1) 53 | newData.setComment(newData.PLATE_COMMENT, "Found via jisstrings") 54 | -------------------------------------------------------------------------------- /bcsv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # real editor here http://kuribo64.net/board/thread.php?id=126&page=2 3 | # spktable.bct and metable.bmt look similar; this code supports reading strings from them 4 | 5 | import sys 6 | from struct import unpack, Struct 7 | from common import getString 8 | 9 | def printHex(s): 10 | for i in range(0, len(s), 4): 11 | print ''.join(['%02X'%ord(x) for x in s[i:min(i+4, len(s))]]), 12 | 13 | fin = open(sys.argv[1], 'rb') 14 | fin.seek(0, 2) 15 | fileSize = fin.tell() 16 | fin.seek(0) 17 | count, fieldCount, offset, itemLen = unpack('>IIII', fin.read(16)) 18 | isBMT = itemLen == 0 19 | 20 | if isBMT: 21 | # BMT 22 | fieldCount = count 23 | count = 1 24 | itemLen = 4*fieldCount 25 | 26 | print fieldCount, "fields" 27 | fields = [] 28 | rowStructFmt = '>' 29 | fieldTypeFormats = ['I', 'I', 'f', 'i', 'h', 'b', 'I', 'I'] 30 | fieldTypeSizes = [ 4, 4, 4, 4, 2, 1, 4, 4] 31 | for i in range(fieldCount): 32 | if isBMT: 33 | fieldId, = unpack('>Q', fin.read(8)) 34 | fieldOffset, fieldType = i*4, 6 35 | else: 36 | fieldId, fieldOffset, fieldType = unpack('>I4xHH', fin.read(12)) 37 | fields.append((fieldId, fieldOffset, fieldType)) 38 | fields.sort(key=lambda a: a[1]) 39 | currentFieldOffset = 0 40 | for (fieldId, fieldOffset, fieldType) in fields: 41 | assert currentFieldOffset == fieldOffset 42 | print hex(fieldId), fieldType 43 | rowStructFmt += fieldTypeFormats[fieldType] 44 | currentFieldOffset += fieldTypeSizes[fieldType] 45 | assert fin.tell() <= offset 46 | rowStruct = Struct(rowStructFmt) 47 | if rowStruct.size < itemLen: 48 | rowStructFmt += str(itemLen-rowStruct.size)+'x' 49 | rowStruct = Struct(rowStructFmt) 50 | 51 | if isBMT: 52 | strTableOffset = 0 53 | else: 54 | strTableOffset = offset+(count*itemLen) 55 | print "string table is at 0x%X"%strTableOffset 56 | 57 | print count, "items,", itemLen, "bytes each" 58 | fin.seek(offset) 59 | for i in range(count): 60 | row = rowStruct.unpack(fin.read(itemLen)) 61 | nextEntry = fin.tell() 62 | for val, (fieldId, fieldOffset, fieldType) in zip(row, fields): 63 | if fieldType == 6: # string 64 | if val == 0xFFFFFFFF: 65 | val = None 66 | else: 67 | val = getString(val+strTableOffset, fin) 68 | elif fieldType in (1, 7): 69 | val = None 70 | print repr(val)+",", 71 | print 72 | fin.seek(nextEntry) 73 | 74 | fin.close() -------------------------------------------------------------------------------- /scene2blender.py: -------------------------------------------------------------------------------- 1 | from scenebin import * 2 | import sys, pathlib 3 | from warnings import warn 4 | import bpy 5 | from math import radians 6 | 7 | def bmd2blendcoords(pos, rot): 8 | x, y, z = pos 9 | rx, ry, rz = rot 10 | return (z, x, y), (radians(rz+90), radians(rx), radians(ry+90)) 11 | 12 | def getmesh(name): 13 | try: 14 | return bpy.data.meshes[name] 15 | except KeyError: 16 | try: 17 | return bpy.data.meshes[name.lower()] 18 | except KeyError: 19 | return bpy.data.meshes.new(name) 20 | 21 | argpath = pathlib.Path("/media/spencer/ExtraData/Game extracts/sms/scene/pinnaBoss1") 22 | if argpath.is_dir(): 23 | if argpath.name == "map": 24 | scenedirpath = argpath.parent 25 | scenebinpath = argpath / "scene.bin" 26 | else: 27 | scenedirpath = argpath 28 | scenebinpath = scenedirpath / "map" / "scene.bin" 29 | else: 30 | scenedirpath = argpath.parents[1] 31 | scenebinpath = argpath 32 | 33 | scenename = scenedirpath.name 34 | 35 | scene = readsection(open(scenebinpath, 'rb')) 36 | 37 | for o in scene.objects: 38 | if o.namehash == 0x3c2e: # MarScene 39 | marScene = o 40 | break 41 | 42 | for o in marScene.objects: 43 | if o.namehash == 0x4746: # LightAry 44 | for o2 in o.objects: 45 | assert o2.namehash == 0x286a # Light 46 | lamp = bpy.data.lights.new(o2.description, "POINT") 47 | obj = bpy.data.objects.new(o2.description, lamp) 48 | bpy.context.scene.collection.objects.link(obj) 49 | obj.location, obj.rotation_euler = bmd2blendcoords(o2.pos, (0, 0, 0)) 50 | lamp.color = [c/255.0 for c in o2.color][:3] 51 | if o.namehash == 0xabc3: # Strategy 52 | strategy = o 53 | 54 | for group in strategy.objects: 55 | assert group.namehash == 0x2682 56 | for o in group.objects: 57 | if not hasattr(o, "pos"): continue 58 | if o.namehash == 0xa3d9: 59 | data = bpy.data.lights.new(o.description, "SUN") 60 | elif hasattr(o, "model"): 61 | data = getmesh(o.model.lower()) 62 | else: 63 | data = None 64 | obj = bpy.data.objects.new(o.description, data) 65 | bpy.context.scene.collection.objects.link(obj) 66 | if hasattr(o, "rot"): 67 | obj.location, obj.rotation_euler = bmd2blendcoords(o.pos, o.rot) 68 | else: 69 | obj.location, obj.rotation_euler = bmd2blendcoords(o.pos, (0,0,0)) 70 | if hasattr(o, "scale"): 71 | obj.scale = o.scale 72 | -------------------------------------------------------------------------------- /dedupe.py: -------------------------------------------------------------------------------- 1 | from zlib import crc32 2 | import pathlib, os.path 3 | import unityparser, uuid, yaml 4 | 5 | def toTuple(d): 6 | if isinstance(d, dict): 7 | return tuple((k, toTuple(v)) for k, v in d.items()) 8 | elif isinstance(d, list): 9 | return tuple(toTuple(v) for v in d) 10 | else: 11 | return d 12 | 13 | def toDict(d): 14 | if isinstance(d, dict): 15 | return {k: toDict(v) for k, v in d.items()} 16 | else: 17 | return d 18 | 19 | toCheck = set(pathlib.Path.cwd().rglob("*.*")) 20 | while len(toCheck) > 0: 21 | files = {} 22 | for path in toCheck: 23 | if path.is_dir(): continue 24 | if path.suffix == ".meta": continue 25 | data = path.read_bytes() 26 | if path.suffix == ".shader": data = data[data.find(b'\n'):] # skip shader name 27 | elif path.suffix in (".mat", ".asset"): 28 | data = data.split(b'\n') 29 | data = [line for line in data if not line.startswith(b' m_Name: ')] 30 | data = b'\n'.join(data) 31 | h = crc32(data) 32 | if h in files: files[h].add(path) 33 | else: files[h] = {path} 34 | 35 | toCheck = set() 36 | 37 | for dupes in files.values(): 38 | if len(dupes) == 1: continue 39 | metas = [unityparser.UnityDocument.load_yaml(f.with_suffix(f.suffix+".meta")).entry for f in dupes] 40 | guids = {m['guid'] for m in metas} 41 | for m in metas: del m['guid'] 42 | origMeta = toDict(metas[0]) 43 | metas = {toTuple(m) for m in metas} 44 | if len(metas) != 1: continue 45 | 46 | name = (','.join({f.stem for f in dupes}))[:50]+(','.join({f.suffix for f in dupes})) 47 | dir = pathlib.Path(os.path.commonpath(dupes)) 48 | newPath = dir / name 49 | 50 | print("Moving", ", ".join(str(d.relative_to(dir)) for d in dupes), "to", newPath) 51 | for d in dupes: d.with_suffix(d.suffix+".meta").unlink() 52 | dupes.pop().rename(newPath) 53 | for d in dupes: d.unlink() 54 | 55 | newGuid = str(uuid.uuid4()).replace('-', '') 56 | origMeta['guid'] = newGuid 57 | yaml.dump(origMeta, open(newPath.with_suffix(newPath.suffix+".meta"), 'w')) 58 | 59 | newGuid = newGuid.encode() 60 | guids = {guid.encode() for guid in guids} 61 | for f in pathlib.Path.cwd().rglob("*.*"): 62 | if path.is_dir(): continue 63 | if path.suffix == ".meta": continue 64 | txt = f.read_bytes() 65 | repl = txt 66 | for guid in guids: 67 | repl = repl.replace(guid, newGuid) 68 | if repl != txt: 69 | print("Updating", f) 70 | f.write_bytes(repl) 71 | toCheck.add(f) 72 | 73 | -------------------------------------------------------------------------------- /col.py: -------------------------------------------------------------------------------- 1 | import struct, array, sys 2 | 3 | class ColGroup: 4 | def readHeader(self, fin): 5 | self.surfaceId, self.numTriIndices, self.flags, self.unknown3, self.indicesOffset, self.terrainTypesOffset, self.unknownOffset2, self.unknownOffset3 = struct.unpack(">HHHHIIII", fin.read(24)) 6 | self.indexBuffer = array.array('H') 7 | self.terrainTypes = array.array('B') 8 | self.tribuf2 = array.array('B') 9 | self.tribuf3 = array.array('h') 10 | 11 | def readBuffers(self, fin): 12 | fin.seek(self.indicesOffset) 13 | self.indexBuffer.fromfile(fin, self.numTriIndices*3) 14 | if sys.byteorder != 'big': self.indexBuffer.byteswap() 15 | 16 | fin.seek(self.terrainTypesOffset) 17 | self.terrainTypes.fromfile(fin, self.numTriIndices) 18 | 19 | fin.seek(self.unknownOffset2) 20 | self.tribuf2.fromfile(fin, self.numTriIndices) 21 | 22 | if self.unknownOffset3 != 0: 23 | fin.seek(self.unknownOffset3) 24 | self.tribuf3.fromfile(fin, self.numTriIndices) 25 | if sys.byteorder != 'big': self.tribuf3.byteswap() 26 | 27 | def __repr__(self): 28 | return "surfaceId=%x, ntri=%d, flags=%d"%(self.surfaceId, len(self.indexBuffer)//3, self.flags) 29 | 30 | class ColReader: 31 | def read(self, fin): 32 | numCoords, coordsOffset, numGroups, groupsOffset = struct.unpack('>IIII', fin.read(16)) 33 | 34 | assert fin.tell() == groupsOffset 35 | fin.seek(groupsOffset) 36 | self.groups = [ColGroup() for i in range(numGroups)] 37 | for group in self.groups: 38 | group.readHeader(fin) 39 | 40 | assert len({group.surfaceId for group in self.groups}) == len(self.groups) 41 | 42 | assert fin.tell() == coordsOffset 43 | fin.seek(coordsOffset) 44 | self.vertexBuffer = array.array('f') 45 | self.vertexBuffer.fromfile(fin, numCoords*3) 46 | if sys.byteorder != 'big': self.vertexBuffer.byteswap() 47 | 48 | for group in self.groups: 49 | group.readBuffers(fin) 50 | assert max(group.indexBuffer) < len(self.vertexBuffer)/3, (max(group.indexBuffer), len(self.vertexBuffer)) 51 | 52 | def __repr__(self): 53 | return hex(self.unknown0)+'|'+repr(self.groups) 54 | 55 | if 0: 56 | import os 57 | for dirpath, dirnames, filenames in os.walk("."): 58 | for name in filenames: 59 | if not name.lower().endswith(".col"): continue 60 | fin = open(os.path.join(dirpath, name), 'rb') 61 | c = ColReader() 62 | c.read(fin) 63 | fin.close() 64 | if __name__ == "__main__": 65 | fin = open(sys.argv[1], 'rb') 66 | c = ColReader() 67 | c.read(fin) 68 | fin.close() 69 | for g in c.groups: print(g) 70 | 71 | -------------------------------------------------------------------------------- /ghidra_scripts/params.py: -------------------------------------------------------------------------------- 1 | # 2 | #@author 3 | #@category 4 | #@keybinding 5 | #@menupath 6 | #@toolbar 7 | 8 | from ghidra.app.decompiler import DecompInterface, DecompileOptions 9 | from ghidra.util.task import ConsoleTaskMonitor 10 | 11 | currentFunction = getFunctionContaining(currentAddress) 12 | baseParamConstructor = currentProgram.getListing().getFunctions("TBaseParam", "TBaseParam")[0] 13 | 14 | thisType = currentFunction.parameters[0].dataType.dataType 15 | #thisType = currentProgram.getDataTypeManager().getDataType("boot.dol/EnemyMarioPadSettingParams") 16 | if thisType: 17 | inst = getInstructionAt(currentAddress) 18 | while inst.address < currentFunction.body.maxAddress: 19 | if inst.mnemonicString == u'stw' and inst.numOperands == 2: 20 | offset, reg = inst.getOpObjects(1) 21 | if reg.name == u'r31': 22 | refs = getReferencesFrom(inst.address) 23 | if len(refs) > 0: 24 | sym = getDataAt(refs[0].toAddress).symbols[0] 25 | if sym.name == u'__vt' and sym.parentNamespace.name.startswith(u'TParamT'): 26 | newTypeName = sym.parentNamespace.getName(True) 27 | newTypeName = newTypeName[:newTypeName.find('<')].replace(u'::', u'/')+newTypeName[newTypeName.find('<'):] 28 | newType = currentProgram.getDataTypeManager().getDataType("boot.dol/Demangler/"+newTypeName) 29 | print offset, newTypeName, newType 30 | thisType.replaceAtOffset(offset.value, newType, 0, None, None) 31 | inst = inst.next 32 | 33 | options = DecompileOptions() 34 | monitor = ConsoleTaskMonitor() 35 | ifc = DecompInterface() 36 | ifc.setOptions(options) 37 | ifc.openProgram(currentFunction.program) 38 | res = ifc.decompileFunction(currentFunction, 60, monitor) 39 | high_func = res.getHighFunction() 40 | 41 | inst = getInstructionAt(currentAddress) 42 | while inst.address < currentFunction.body.maxAddress: 43 | callConstructor = None 44 | if inst.getFlowType().isCall() and \ 45 | inst.getFlows()[0] == baseParamConstructor.entryPoint: 46 | callConstructor = inst 47 | print "Constructor is called at", callConstructor.address 48 | 49 | fieldOffset = None 50 | if callConstructor is not None: 51 | call = list(high_func.getPcodeOps(callConstructor.address))[0] 52 | thisDef = call.inputs[1].getDef().inputs[0].getDef() 53 | if thisDef is None: 54 | thisDef = call.inputs[1].getDef() 55 | thisType = thisDef.inputs[0].getHigh().dataType.dataType 56 | fieldOffset = thisDef.inputs[1].getHigh().scalar.value 57 | try: 58 | paramName = bytearray(getDataAt(getAddressFactory().getDefaultAddressSpace().getAddress(call.inputs[4].getDef().inputs[0].getHigh().scalar.value)).bytes).decode('shift-jis').rstrip(u'\0') 59 | except AttributeError: 60 | fieldOffset = None 61 | 62 | if fieldOffset is not None: 63 | print fieldOffset, paramName 64 | thisType.getComponentAt(fieldOffset).fieldName = paramName 65 | 66 | inst = inst.next 67 | -------------------------------------------------------------------------------- /brfnt.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys, os 4 | from struct import Struct 5 | from common import Section, BFile 6 | from texture import readTextureData, decodeTexturePIL 7 | 8 | import string 9 | 10 | class Finf(Section): 11 | def read(self, fin, start, size): 12 | s = fin.read(size-8) 13 | print(' '.join(['%02X'%c for c in s])) 14 | s = map(chr, s) 15 | print(' '.join([c if c.isprintable() else '.' for c in s])) 16 | 17 | class Tglp(Section): 18 | header = Struct('>4x2x2xHH2x2xHHI') 19 | def read(self, fin, start, size): 20 | self.count, self.format, self.width, self.height, offset = self.header.unpack(fin.read(0x18)) 21 | print("Fmt:", self.format, "Sz:", self.width, self.height) 22 | fin.seek(offset) 23 | self.data = readTextureData(fin, self.format, self.width, self.height, arrayCount=self.count) 24 | 25 | def export(self, fname): 26 | images = decodeTexturePIL(self.data, self.format, self.width, self.height, arrayCount=self.count) 27 | for arrayIdx, mips in enumerate(images): 28 | mips[0].save(fname+"-"+str(arrayIdx)+".png") 29 | 30 | class Cwdh(Section): 31 | header = Struct('>I3xHH') 32 | def read(self, fin, start, size): 33 | count, unk1, unk2 = self.header.unpack(fin.read(11)) 34 | piece = Struct('3b') 35 | for i in range(count): 36 | print(*piece.unpack(fin.read(3))) 37 | 38 | class Cmap(Section): 39 | header = Struct('>HHHII') 40 | def read(self, fin, start, size): 41 | self.rangeStart, self.rangeEnd, unk1, unk2, glyphCount = self.header.unpack(fin.read(14)) 42 | self.codepointToGlyph = {} 43 | if unk1 == 0: 44 | # TODO 45 | for i in range(self.rangeStart, self.rangeEnd+1): 46 | self.codepointToGlyph[i] = i 47 | elif unk1 == 1: 48 | tableEntry = Struct('>H') 49 | for codepoint in range(self.rangeStart, self.rangeEnd): 50 | glyph, = tableEntry.unpack(fin.read(2)) 51 | if glyph != 0xFFFF: 52 | self.codepointToGlyph[codepoint] = glyph 53 | elif unk1 == 2: 54 | tableEntry = Struct('>HH') 55 | for i in range(glyphCount): 56 | codepoint, glyph = tableEntry.unpack(fin.read(4)) 57 | self.codepointToGlyph[codepoint] = glyph 58 | else: 59 | print("Unknown CMAP format:", hex(self.rangeStart), hex(self.rangeEnd), hex(unk1), hex(unk2)) 60 | return 61 | print("Map", len(self.codepointToGlyph), "glyphs from", hex(self.rangeStart), "to", hex(self.rangeEnd)) 62 | 63 | class BRFont(BFile): 64 | header = Struct('>8sLHH') 65 | sectionHandlers = {b'TGLP': Tglp, b'CWDH': Cwdh, b'CMAP': Cmap, b'FINF': Finf} 66 | def readHeader(self, fin): 67 | self.signature, self.fileLength, unk, self.chunkCount = self.header.unpack(fin.read(0x10)) 68 | 69 | if len(sys.argv) != 2: 70 | sys.stderr.write("Usage: %s \n"%sys.argv[0]) 71 | exit(1) 72 | 73 | fin = open(sys.argv[1], 'rb') 74 | brfnt = BRFont() 75 | brfnt.read(fin) 76 | fin.close() 77 | brfnt.tglp.export(os.path.splitext(sys.argv[1])[0]) 78 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # smstools 2 | 3 | These are a bunch of scripts for looking at miscellaneous Nintendo game data files. They were mostly originally built for *Super Mario Sunshine*, but they also work well with *The Legend of Zelda: The Wind Waker*. There is some support for the later-generation 3D games like Mario Galaxy (2). Some are Blender plugins, some work in Python 2 and 3, and some only work in Python 2. Most of the Python scripts require PIL or [Pillow](//github.com/python-pillow/Pillow). Check the file headers for details. 4 | 5 | * **bmd_blender and bck_blender:** Blender importers for BMD/BDL model files, BMT materials, and BCK animations. They're basically just Python ports of [bmdview](http://www.amnoid.de/gc/) (complete with comments!). Animations are currently restricted to linear interpolation only, and materials are rough approximations, not full shaders. Make sure common.py and texture.py are in the Python path somewhere (like blender/2.xx/scripts/addons/modules/) 6 | * **bcsv.py:** Rudimentary Python2 command-line script for dumping strings from BMT and BCT table files. 7 | * **bfn.py:** Python command-line script for dumping BFN bitmap fonts. 8 | * **blo.py:** Script for BLO layout files. BLO are used for GUI things like the HUD and menus. This tool makes HTML from them, and references images dumped with bti.py. 9 | * **bmg.py:** BMG files are for localized strings. Some are used for subtitles in cutscenes; the script will make Subrip files out of those, and TXT dumps of anything else. 10 | * **brfnt.py:** Like BFN, for later games. 11 | * **brres.py:** Images/textures, but I'm not sure anymore where from. 12 | * **bti.py:** Texture files, without bitmaps, usually used for GUI elements. Dumps to both PNG (PIL) and DDS (non-standard formats, but also probably incorrect sometimes) 13 | * **col_blender:** Blender importer for COL file collision data. Also started from thakis' colview. 14 | * **ral.py:** Unknown. Just strings so far. 15 | * **scene.bin.py:** In Sunshine, map layouts are stored in hierarchical "scene.bin" files. There are a few other .bin files for other things. This script is very old and very messy, but can theoretically dump the scene info to console, import it into Blender (not an addon, run it directly from script view), or put it into a VMF Source engine map. 16 | * **sequence-com.py:** BMS files and COM files (extracted from aaf, see wwdumpsnd) are MIDI-like note sequences (.scom is for sfx indexing, I'm guessing). This script translates them into real MIDI. 17 | * **tpl.py:** Like BTI, for later games. These files can actually have multiple images collected inside, but I'm guessing that's either a mistake or a glitch in the decompressor I was using. 18 | * **wwdumpsnd:** hcs's [wwdumpsnd](https://www.hcs64.com/vgm_ripping.html), significantly expanded through disassembly/debugger stepping, to support more than just dumping the WAVes. Still lots to expand on. 19 | * **ymap.py:** Sunshine maps also usually com with a ymap.ymp file which is a grayscale heightmap of the level. This dumps that to an image as well as the dimensions. 20 | 21 | LICENSE: 22 | 23 | bmd_blender, bck_blender, col_blender, wwdumpsnd, and possibly others are directly based on the work of other people. I don't know what the original licenses were, so I can't say if these can be freely distributed. But, you're probably not going to use these comercially anyway, right? 24 | -------------------------------------------------------------------------------- /scenario names.py: -------------------------------------------------------------------------------- 1 | from scenebin import * 2 | from bmg import * 3 | import os 4 | 5 | shineStageTable = [0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 1, 1, 5, 6, 1, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 3, 8, 4, 4, 5, 5, 5, 5, 5, 5, 6, 5, 7, 6 | 7, 8, 8, 2, 2, 3, 3, 5, 6, 9, 1, 1, 2, 6, 8, 5, 3, 9] 7 | shineConvTable = [[86], [], [0, 1, 2, 3, 4, 5, 6, 7], [10, 11, 12, 13, 14, 15, 16, 17], [20, 21, 22, 23, 24, 25, 26, 27], [30, 31, 32, 33, 34, 35, 36, 37], [40, 41, 42, 43, 44, 45, 46, 47], [60, 65, 62, 61, 64, 63, 66, 67], [50, 51, 52, 53, 54, 55, 56, 57], []] 8 | etcShineConvTable = [[], [107], [100, 8, 9], [101, 18, 19], [102, 28, 29], [103, 38, 39], [104, 48, 49], [106, 68, 69], [105, 58, 59], []] 9 | scenarioNameTableJp = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 32, 35, 34, 31, 36, 37, 38, 39] 10 | scenarioNameTableUs = [0, 1, 2, 3, 4, 5, 6, 7, 10, 11, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 35, 34, 37, 36, 33, 38, 39, 40, 41] 11 | scenarioNameTable = scenarioNameTableUs 12 | 13 | stageArc = readsection(open("stageArc.bin", 'rb')) 14 | stageNames = BMessages(open("common/2d/stagename.bmg", 'rb')).strings 15 | scenarioNames = BMessages(open("common/2d/scenarioname.bmg", 'rb')).strings 16 | namesInStage = stageArc.objects[0] 17 | for stageNumber, nameTable in enumerate(namesInStage.objects): 18 | shineStage = shineStageTable[stageNumber] 19 | stageName = stageNames[shineStage][0].decode('shift-jis') 20 | print(' "%s",'%stageName.title()) 21 | for scenarioNumber, archiveName in enumerate(nameTable.objects): 22 | archiveShort = archiveName.name1[:archiveName.name1.rfind('.')] 23 | if not os.path.exists("scene/"+archiveShort): 24 | continue 25 | scenarioName = archiveShort 26 | 27 | if stageNumber <= 10: 28 | shineConv = 99 29 | if scenarioNumber < len(shineConvTable[shineStage]): 30 | shineConv = shineConvTable[shineStage][scenarioNumber] 31 | 32 | if shineConv <= len(scenarioNameTable): 33 | scenarioName = scenarioNames[scenarioNameTable[shineConv]][0].decode('shift-jis') 34 | 35 | print(' new SunshineSceneDesc("%s", "%s"),'%(archiveShort, scenarioName)) 36 | 37 | from classes import TMapObjChangeStage 38 | print() 39 | 40 | for stageNumber, nameTable in enumerate(namesInStage.objects): 41 | shineStage = shineStageTable[stageNumber] 42 | stageName = stageNames[shineStage][0].decode('shift-jis') 43 | for scenarioNumber, archiveName in enumerate(nameTable.objects): 44 | archiveShort = archiveName.name1[:archiveName.name1.rfind('.')] 45 | if not os.path.exists("scene/"+archiveShort): 46 | continue 47 | print(archiveShort) 48 | sceneBin = readsection(open("scene/"+archiveShort+"/map/scene.bin", 'rb')) 49 | objectGroup = sceneBin.search("オブジェクトグループ") 50 | destinations = [obj.destination for obj in objectGroup.objects if isinstance(obj, TMapObjChangeStage)] 51 | destinations.sort() 52 | for dest in destinations: 53 | print(' ', namesInStage.objects[dest].objects[0].name1) 54 | 55 | -------------------------------------------------------------------------------- /bti.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys, os 4 | from struct import Struct, unpack 5 | from texture import * 6 | from common import * 7 | 8 | class Image(ReadableStruct): 9 | header = Struct('>BBHHBBBBHIBBBBBBBBBxhI') 10 | fields = [ 11 | ("format", TexFmt), 12 | "transparency", 13 | "width", 14 | "height", 15 | "wrapS", 16 | "wrapT", 17 | ("usePalette", bool), 18 | ("paletteFormat", TlutFmt), 19 | "paletteNumEntries", 20 | "paletteOffset", 21 | ("isMipmap", bool), 22 | ("edgeLod", bool), 23 | ("biasClamp", bool), 24 | "maxAniso", 25 | "minFilter", 26 | "magFilter", 27 | "minLod", 28 | "maxLod", 29 | "mipmapCount", 30 | "lodBias", 31 | "dataOffset" 32 | ] 33 | def read(self, fin, start=None, textureHeaderOffset=None, texIndex=None): 34 | super().read(fin) 35 | self.mipmapCount = max(self.mipmapCount, 1) 36 | assert (self.format in (TexFmt.C4, TexFmt.C8, TexFmt.C14X2)) == self.usePalette, (self.format, self.usePalette) 37 | if self.format in (TexFmt.C4, TexFmt.C8, TexFmt.C14X2): 38 | self.hasAlpha = self.paletteFormat in (TlutFmt.IA8, TlutFmt.RGB5A3) 39 | else: 40 | self.hasAlpha = self.format in (TexFmt.IA4, TexFmt.IA8, TexFmt.RGB5A3, TexFmt.RGBA8) 41 | if start is not None: 42 | nextHeader = fin.tell() 43 | 44 | self.fullDataOffset = start+textureHeaderOffset+self.dataOffset+0x20*texIndex 45 | self.dataOffset = None 46 | fin.seek(self.fullDataOffset) 47 | self.data = readTextureData(fin, self.format, self.width, self.height, self.mipmapCount) 48 | 49 | self.fullPaletteOffset = start+textureHeaderOffset+self.paletteOffset+0x20*texIndex 50 | self.paletteOffset = None 51 | fin.seek(self.fullPaletteOffset) 52 | self.palette = readPaletteData(fin, self.paletteFormat, self.paletteNumEntries) 53 | 54 | fin.seek(nextHeader) 55 | 56 | def write(self, fout, offset=None): 57 | if offset is None: offset=fout.tell() 58 | self.dataOffset = offset+self.header.size 59 | self.paletteOffset = self.dataOffset + len(self.data)*self.data.itemsize 60 | super().write(fout) 61 | swapArray(self.data).tofile(fout) 62 | if self.palette is not None: swapArray(self.palette).tofile(fout) 63 | 64 | def getDataName(self, bmd): 65 | s = bmd.name+"@"+hex(self.fullDataOffset) 66 | if self.format in (TexFmt.C4, TexFmt.C8, TexFmt.C14X2): 67 | s += "p"+hex(self.fullPaletteOffset) 68 | return s 69 | 70 | if __name__ == "__main__": 71 | if len(sys.argv) != 2: 72 | sys.stderr.write("Usage: %s \n"%sys.argv[0]) 73 | exit(1) 74 | 75 | img = Image() 76 | fin = open(sys.argv[1], 'rb') 77 | img.read(fin, 0, 0, 0) 78 | fin.close() 79 | print("%dx%d, fmt=%s, mips=%d, pfmt=%s" % (img.width, img.height, img.format, img.mipmapCount, img.paletteFormat)) 80 | 81 | images = decodeTexturePIL(img.data, img.format, img.width, img.height, img.paletteFormat, img.palette, img.mipmapCount) 82 | images[0][0].save(os.path.splitext(sys.argv[1])[0]+'.png') 83 | 84 | fout = open(os.path.splitext(sys.argv[1])[0]+".ktx", 'wb') 85 | decodeTextureKTX(fout, img.data, img.format, img.width, img.height, img.paletteFormat, img.palette, img.mipmapCount) 86 | fout.close() 87 | 88 | -------------------------------------------------------------------------------- /ral.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from struct import Struct, pack, unpack 4 | from common import getString, Readable, ReadableStruct, alignOffset, alignFile 5 | 6 | class Keyframe(ReadableStruct): 7 | header = Struct('>hhhh2xhhhhh') 8 | fields = ["x", "y", "z", "connectionCount", "unk3", "pitch", "yaw", "roll", "speed"] 9 | def read(self, fin): 10 | super().read(fin) 11 | self.connections = unpack('>8h', fin.read(8*2))[:self.connectionCount] 12 | self.periods = unpack('>8f', fin.read(8*4))[:self.connectionCount] 13 | self.connectionCount = None 14 | def write(self, fout): 15 | if len(self.connections) != len(self.periods): 16 | raise ValueError("Connection count %d does not match period count %d"%(len(self.connections), len(self.periods))) 17 | if len(self.connections) > 8: 18 | raise ValueError("%d is more than 8 connections"%len(self.connections)) 19 | self.connectionCount = len(self.connections) 20 | super().write(fout) 21 | fout.write(pack('>8h', *(self.connections+(0,)*(8-self.connectionCount)))) 22 | fout.write(pack('>8f', *(self.periods+(0,)*(8-self.connectionCount)))) 23 | def __repr__(self): 24 | return super().__repr__()+' connections='+repr(self.connections)+' periods='+repr(self.periods) 25 | 26 | class RalSection(ReadableStruct): 27 | header = Struct('>III') 28 | fields = ["keyframeCount", "strOffset", "sectionOffset"] 29 | def read(self, fin): 30 | super().read(fin) 31 | if self.keyframeCount == 0: 32 | return 33 | self.name = getString(self.strOffset, fin) 34 | self.strOffset = None 35 | def readData(self, fin): 36 | fin.seek(self.sectionOffset) 37 | self.keyframes = [] 38 | for i in range(self.keyframeCount): 39 | self.keyframes.append(Keyframe(fin)) 40 | self.sectionOffset = None 41 | self.keyframeCount = None 42 | def write(self, fout): 43 | self.keyframeCount = len(self.keyframes) 44 | super().write(fout) 45 | 46 | class RalFile(Readable): 47 | def read(self, fin): 48 | self.sections = [] 49 | while True: 50 | s = RalSection() 51 | s.read(fin) 52 | if s.keyframeCount == 0: 53 | break 54 | self.sections.append(s) 55 | for s in self.sections: 56 | s.readData(fin) 57 | def write(self, fout): 58 | offset = RalSection.header.size*(len(self.sections)+1) 59 | for s in self.sections: 60 | s.strOffset = offset 61 | offset += len(s.name.encode('shift-jis'))+1 62 | offset = alignOffset(offset) 63 | for s in self.sections: 64 | s.sectionOffset = offset 65 | offset += (Keyframe.header.size+8*2+8*4)*len(s.keyframes) 66 | 67 | for s in self.sections: 68 | s.write(fout) 69 | fout.write(b'\0'*RalSection.header.size) 70 | for s in self.sections: 71 | fout.write(s.name.encode('shift-jis')+b'\0') 72 | alignFile(fout) 73 | for s in self.sections: 74 | for k in s.keyframes: 75 | k.write(fout) 76 | 77 | if __name__ == "__main__": 78 | import sys 79 | 80 | if len(sys.argv) != 2: 81 | sys.stderr.write("Usage: %s scene.ral\n"%sys.argv[0]) 82 | exit(1) 83 | 84 | fin = open(sys.argv[1], 'rb') 85 | r = RalFile(fin) 86 | for s in r.sections: 87 | print(s.name) 88 | for k in s.keyframes: 89 | print(k) 90 | print() 91 | fin.close() 92 | 93 | -------------------------------------------------------------------------------- /col_blender.py: -------------------------------------------------------------------------------- 1 | import bpy, struct, os, bmesh 2 | from col import ColReader 3 | 4 | bl_info = { 5 | "name": "Import COL", 6 | "author": "Spencer Alves", 7 | "version": (1,0,0), 8 | "blender": (2, 80, 0), 9 | "location": "Import", 10 | "description": "Import J3D COL collision data", 11 | "warning": "", 12 | "wiki_url": "", 13 | "tracker_url": "", 14 | "category": "Import-Export"} 15 | 16 | # ImportHelper is a helper class, defines filename and 17 | # invoke() function which calls the file selector. 18 | from bpy_extras.io_utils import ImportHelper 19 | from bpy.props import StringProperty, BoolProperty, EnumProperty 20 | from bpy.types import Operator 21 | 22 | TerrainNames = ["stone", "stn_snd", "marble", "soil_sld", "soil", "sand", "gravel", "woodboard", "wood_thn", "wood", "wood_sq", "metalnet", "metal_vc", "metal_sl", "branch", "tallgrass", "lawn", "straw", "rooftile", "rooftotan", "roof_hood", "wire", "table", "bed", "carpet", "chair", None, "glass", None, None, "kinoko", "carpet2"] 23 | 24 | def importFile(fname): 25 | print("Reading", fname) 26 | fin = open(fname, 'rb') 27 | col = ColReader() 28 | col.read(fin) 29 | fin.close() 30 | 31 | nameBase = os.path.splitext(os.path.split(fname)[-1])[0] 32 | 33 | for groupidx, group in enumerate(col.groups): 34 | bm = bmesh.new() 35 | for x, y, z in zip(col.vertexBuffer[0::3], col.vertexBuffer[1::3], col.vertexBuffer[2::3]): 36 | bm.verts.new((z, x, y)) 37 | bm.verts.ensure_lookup_table() 38 | 39 | m = bpy.data.meshes.new('%s-%04x'%(nameBase, group.surfaceId)) 40 | terrainSlots = {} 41 | for i in sorted(set(group.terrainTypes)): 42 | if i < len(TerrainNames) and TerrainNames[i] is not None: 43 | terrainName = TerrainNames[i] 44 | else: 45 | terrainName = "terrain"+str(i) 46 | mat = bpy.data.materials.get(terrainName, None) or bpy.data.materials.new(terrainName) 47 | terrainSlots[i] = len(m.materials) 48 | m.materials.append(mat) 49 | 50 | for triIndices, terrainType in zip(zip(group.indexBuffer[0::3], group.indexBuffer[1::3], group.indexBuffer[2::3]), group.terrainTypes): 51 | try: face = bm.faces.new([bm.verts[vIdx] for vIdx in triIndices]) 52 | except ValueError: pass # duplicate faces, probably different terrainType 53 | face.material_index = terrainSlots[terrainType] 54 | 55 | o = bpy.data.objects.new(m.name, m) 56 | bm.to_mesh(m) 57 | bm.free() 58 | bpy.context.scene.collection.objects.link(o) 59 | 60 | class ImportCOL(Operator, ImportHelper): 61 | bl_idname = "import_scene.col" # important since its how bpy.ops.import_test.some_data is constructed 62 | bl_label = "Import COL" 63 | 64 | # ImportHelper mixin class uses this 65 | filename_ext = ".col" 66 | 67 | filter_glob: StringProperty( 68 | default="*.col", 69 | options={'HIDDEN'}, 70 | ) 71 | 72 | def execute(self, context): 73 | importFile(self.filepath) 74 | return {'FINISHED'} 75 | 76 | # Only needed if you want to add into a dynamic menu 77 | def menu_func_import(self, context): 78 | self.layout.operator(ImportCOL.bl_idname, text="Import J3D COL collision data (*.col)") 79 | 80 | 81 | def register(): 82 | bpy.utils.register_class(ImportCOL) 83 | bpy.types.TOPBAR_MT_file_import.append(menu_func_import) 84 | 85 | 86 | def unregister(): 87 | bpy.utils.unregister_class(ImportCOL) 88 | bpy.types.TOPBAR_MT_file_import.remove(menu_func_import) 89 | 90 | 91 | if __name__ == "__main__": 92 | register() 93 | 94 | # test call 95 | #bpy.ops.import_scene.bmd('INVOKE_DEFAULT') 96 | -------------------------------------------------------------------------------- /lineblur.py: -------------------------------------------------------------------------------- 1 | from PIL import Image, ImageChops 2 | 3 | def clampBlur(blur, extend): 4 | for y in range(blur.size[1]): 5 | for x in range(blur.size[0]): 6 | px = blur.getpixel((x,y)) 7 | mnpx = extend.getpixel((x,y))&0xF0 8 | mxpx = mnpx+15 9 | px = max(mnpx, min(mxpx, px)) 10 | blur.putpixel((x,y),px) 11 | 12 | def doLineBlur(blur, extend, flip, mask): 13 | for y in range(blur.size[0] if flip else blur.size[1]): 14 | start = None 15 | last = None 16 | #print("row", y) 17 | for x in range(blur.size[1] if flip else blur.size[0]): 18 | px = extend.getpixel((y,x) if flip else (x,y)) 19 | if last is not None: 20 | if x == 1 and px == last and False: 21 | start = 0 22 | elif abs(px-last) <= 17 and px != last: 23 | #print("at", start, "is", last, "- at", x, "is", px) 24 | if start is not None and (x-start) > 1 and (x-start) < 16: 25 | if start == 0: 26 | startColor = extend.getpixel((y,start) if flip else (start,y)) 27 | if px > startColor: startColor -= 17 28 | else: startColor += 17 29 | else: 30 | startColor = extend.getpixel((y,start-1) if flip else (start-1,y)) 31 | #print("prior is", startColor) 32 | endColor = px 33 | if startColor != endColor: 34 | if startColor > endColor: 35 | startColor &= 0xF0 36 | endColor = (endColor&0xF0)+0x10 37 | #print("downward from", hex(startColor), "to", hex(endColor)) 38 | else: 39 | startColor = (startColor&0xF0)+0x10 40 | endColor &= 0xF0 41 | #print("upward from", hex(startColor), "to", hex(endColor)) 42 | for dx in range(start, x): 43 | blur.putpixel((y,dx) if flip else (dx,y), int(((endColor-startColor)*(dx+0.5-start)/(x-start))+startColor)) 44 | mask.putpixel((y,dx) if flip else (dx,y), 1) 45 | start = x 46 | elif abs(px-last) > 17: 47 | start = None 48 | last = px 49 | 50 | def bidirLineBlurOneChannel(extend): 51 | blurHoriz = Image.new('L', extend.size) 52 | horizMask = Image.new('1', extend.size, 0) 53 | doLineBlur(blurHoriz, extend, False, horizMask) 54 | blurVert = Image.new('L', extend.size) 55 | vertMask = Image.new('1', extend.size, 0) 56 | doLineBlur(blurVert, extend, True, vertMask) 57 | combinedBlur = Image.blend(blurHoriz, blurVert, 0.5) 58 | combinedMask = ImageChops.darker(horizMask, vertMask) 59 | maskedBlur = ImageChops.composite(combinedBlur, ImageChops.composite(blurVert, ImageChops.composite(blurHoriz, extend, horizMask), vertMask), combinedMask) 60 | clampBlur(maskedBlur, extend) # shouldn't be needed 61 | return maskedBlur 62 | 63 | def bidirLineBlur(extend): 64 | bandNames = extend.getbands() 65 | if len(bandNames) == 1: 66 | return bidirLineBlurOneChannel(extend) 67 | else: 68 | return Image.merge(extend.mode, [bidirLineBlurOneChannel(extend.getchannel(channelName)) for channelName in bandNames]) 69 | 70 | if __name__ == "__main__": 71 | from bmd import BModel 72 | from texture import TexFmt, decodeTexturePIL 73 | bmd = BModel(open("scene/dolpic10/map/map/map.bmd", 'rb')) 74 | for img in bmd.tex1.textures: 75 | if img.format in (TexFmt.I4, TexFmt.IA4): 76 | extend = decodeTexturePIL(img.data, img.format, img.width, img.height, img.paletteFormat, img.palette, img.mipmapCount)[0][0] 77 | extend.transpose(method=1).save(img.name+"-extend.png") 78 | bidirLineBlur(extend).transpose(method=1).save(img.name+"-lineBlur.png") 79 | 80 | -------------------------------------------------------------------------------- /ghidra_scripts/to_string.py: -------------------------------------------------------------------------------- 1 | from ghidra.program.model.address import AddressSet 2 | from ghidra.program.model.data import ArrayDataType 3 | 4 | listing = currentProgram.listing 5 | 6 | def getField(data, name): 7 | for i in range(data.numComponents): 8 | c = data.getComponent(i) 9 | if c.fieldName == name: 10 | return c 11 | 12 | defined = set() 13 | 14 | def toString(data, dataType=None, arrayCount=None): 15 | if dataType is not None and (not data.defined or (arrayCount is not None and (not data.pointer) and (not data.array or data.numComponents != arrayCount)) or (arrayCount is None and data.dataType != dataType)): 16 | if arrayCount is not None: 17 | dataType = ArrayDataType(dataType, arrayCount, dataType.length, currentProgram.getDataTypeManager()) 18 | startAddr = data.address 19 | endAddr = startAddr.add(dataType.length).previous() 20 | print "Defining data", dataType, "at", startAddr, endAddr 21 | adset = AddressSet(startAddr, endAddr) 22 | if listing.getInstructions(adset, True).hasNext(): 23 | raise RuntimeError("Can't create data because %s contains instructions"%adset) 24 | listing.clearCodeUnits(startAddr, endAddr, False) 25 | data = listing.createData(startAddr, dataType, dataType.length) 26 | 27 | if data.array: 28 | #print data.fieldName or data.label, "is an array, each type is", data.dataType.dataType 29 | return "["+(", ".join(toString(data.getComponent(i), data.dataType.dataType) for i in range(data.numComponents)))+"]" 30 | elif data.structure: 31 | #print data.fieldName or data.label, "is a structure", data.dataType.name 32 | if data.dataType.name == 'AnimInfo': 33 | countField = 'dataCount' 34 | arrayField = 'animData' 35 | elif data.dataType.name == 'ObjHitInfo': 36 | countField = 'hitDataCount' 37 | arrayField = 'hitDataTable' 38 | elif data.dataType.name == 'MapCollisionInfo': 39 | countField = 'collisionDataCount' 40 | assert getField(data, 'collisionDataCount').value.value == getField(data, 'colliderCount').value.value, data 41 | arrayField = 'collisionData' 42 | elif data.dataType.name == 'SoundInfo': 43 | countField = 'soundKeyCount' 44 | arrayField = 'soundKeys' 45 | elif data.dataType.name == 'PhysicalInfo': 46 | countField = 'physicalDataCount' 47 | arrayField = 'physicalData' 48 | else: 49 | countField = None 50 | arrayField = None 51 | s = "{" 52 | for i in range(data.numComponents): 53 | c = data.getComponent(i) 54 | if not c.defined and c.value.value == 0: 55 | continue 56 | if c.fieldName == countField: 57 | dataCount = c.value.value 58 | continue 59 | s += repr(c.fieldName) 60 | s += ': ' 61 | if c.fieldName == arrayField: 62 | #print data.dataType.name, 'has array', arrayField, 'length', dataCount 63 | arrayCount = dataCount 64 | else: 65 | arrayCount = None 66 | if c.defined: 67 | fieldType = data.dataType.getComponent(i).dataType 68 | else: 69 | fieldType = None 70 | #print 'Field', c.fieldName, 'has type', fieldType 71 | s += toString(c, fieldType, arrayCount) 72 | s += ', ' 73 | s += "}" 74 | return s 75 | elif data.pointer: 76 | #print data.fieldName or data.label, "is a pointer to", data.dataType.dataType 77 | deref = listing.getDataAt(data.value) 78 | if deref is None: 79 | return repr(deref) 80 | elif deref.hasStringValue(): 81 | return toString(deref) 82 | else: 83 | if deref.label not in defined: 84 | fout.write(deref.label+' = '+toString(deref, data.dataType.dataType, arrayCount)+'\n') 85 | defined.add(deref.label) 86 | return deref.label 87 | else: 88 | #print data.fieldName or data.label, "is a scalar or string" 89 | return repr(data.value) 90 | 91 | d = listing.getDataAt(currentAddress) 92 | fout = open(getProjectRootFolder().projectLocator.projectDir.toString()+'/'+d.label+".py", 'w') 93 | fout.write(d.label+' = '+toString(d)+'\n') 94 | fout.close() 95 | 96 | -------------------------------------------------------------------------------- /bfn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys, struct, array 4 | from common import BFile, Section, swapArray 5 | from texture import readTextureData, decodeTexturePIL, calcTextureSize, TexFmt 6 | import os.path 7 | 8 | class Gly1(Section): 9 | header = struct.Struct('>HHHHIhHHhh2x') 10 | fields = [ 11 | 'minimumFontCode', 'maximumFontCode', 12 | 'glyphWidth', 'glyphHeight', 13 | 'arraySize', ('format', TexFmt), 'columns', 'rows', 'w', 'h' 14 | ] 15 | 16 | def read(self, fin, start, size): 17 | super().read(fin, start, size) 18 | self.arrayCount = (size-self.header.size-8)//self.arraySize 19 | #self.h = (size-0x18)/w 20 | #if format == 0: self.h *= 2 21 | self.data = readTextureData(fin, self.format, self.w, self.h, arrayCount=arrayCount) 22 | 23 | def write(self, fout): 24 | super().write(fout) 25 | swapArray(self.data).tofile(fout) 26 | 27 | def export(self, name): 28 | images = decodeTexturePIL(self.data, self.format, self.w, self.h, arrayCount=self.arrayCount) 29 | for arrayIdx, mips in enumerate(images): 30 | for mipIdx, im in enumerate(mips): 31 | im.save(name+str(arrayIdx)+".png") 32 | 33 | 34 | class Map1(Section): 35 | header = struct.Struct('>hHHH') 36 | fields = ['mappingType', 'startingCharacter', 'endingCharacter', 'spanCount'] 37 | # mappingType 0: glyph = character - startingCharacter 38 | # mappingType 1: glyph = ((char&0xff)-0x40.5)+(((char>>8)-0x88)*0xbc)+0x2be 39 | # (see convertSjis. I assume useful for Kanji in Shift-JIS, which all have 40 | # high byte above 0x88 and low byte above 0x3f) 41 | # mappingType 2: glyph = spans[character - startingCharacter] 42 | # mappingType 3: glyph = spans[i*2 + 1] where spans[i*2] == character 43 | def read(self, fin, start, size): 44 | super().read(fin, start, size) 45 | self.spans = array.array('H') 46 | self.spans.fromfile(fin, self.spanCount*self.spans.itemsize) 47 | if sys.byteorder == 'little': self.spans.byteswap() 48 | self.spanCount = None 49 | 50 | def write(self, fout): 51 | self.spanCount = len(self.spans)//self.spans.itemsize 52 | super().write(fout) 53 | swapArray(self.spans).tofile(fout) 54 | 55 | 56 | class Inf1(Section): 57 | header = struct.Struct('>hhhhhH') 58 | fields = ['fontType', 'ascent', 'descent', 'width', 'leading', 'defaultCharacterCode'] 59 | # fontType 0: 1-byte (e.g. CP-1252) 60 | # fontType 1: 2-byte (e.g. UTF-16) 61 | # fontType 2: Shift-JIS 62 | 63 | 64 | class Wid1(Section): 65 | header = struct.Struct('>HH') 66 | fields = ['minimumFontCode', 'maximumFontCode'] 67 | def read(self, fin, start, size): 68 | super().read(fin, start, size) 69 | self.widths = array.array('B') 70 | self.widths.fromfile(fin, (size-self.header.size-8)//self.widths.itemsize) 71 | if sys.byteorder == 'little': self.widths.byteswap() 72 | 73 | def write(self, fout): 74 | super().write(fout) 75 | swapArray(self.widths).tofile(fout) 76 | 77 | 78 | class BFont(BFile): 79 | sectionHandlers = {b'GLY1': Gly1, b'MAP1': Map1, b'INF1': Inf1, b'WID1': Wid1} 80 | 81 | def read(self, fin): 82 | super().read(fin) 83 | self.startingCharacter = min(chunk.startingCharacter for chunk in self.chunks if isinstance(chunk, Map1)) 84 | 85 | 86 | if __name__ == "__main__": 87 | if len(sys.argv) != 2: 88 | sys.stderr.write("Usage: %s \n"%sys.argv[0]) 89 | exit(1) 90 | 91 | fin = open(sys.argv[1], 'rb') 92 | bfn = BFont() 93 | bfn.read(fin) 94 | fin.close() 95 | bfn.gly1.export(os.path.splitext(sys.argv[1])[0]) 96 | 97 | print("INF", bfn.inf1.fontType, bfn.inf1.ascent, bfn.inf1.descent, bfn.inf1.width, bfn.inf1.leading, bfn.inf1.defaultCharacterCode) 98 | for chunk in bfn.chunks: 99 | if isinstance(chunk, Gly1): 100 | print("GLY", chunk.minimumFontCode, chunk.maximumFontCode, chunk.glyphWidth, chunk.glyphHeight, chunk.arraySize, chunk.format, chunk.columns, chunk.rows, chunk.w, chunk.h) 101 | if isinstance(chunk, Map1): 102 | print("MAP", chunk.mappingType, chunk.startingCharacter, chunk.endingCharacter) 103 | print(chunk.spans) 104 | if isinstance(chunk, Wid1): 105 | print("WID", chunk.minimumFontCode, chunk.maximumFontCode) 106 | print(chunk.widths) 107 | 108 | -------------------------------------------------------------------------------- /thp2avi.py: -------------------------------------------------------------------------------- 1 | import struct, sys, os.path 2 | 3 | fin = open(sys.argv[1], 'rb') 4 | tag, version, maxBufferSize, maxAudioSamples, fps, numFrames, firstFrameSize, dataSize, componentDataOffset, offsetsDataOffset, firstFrameOffset, lastFrameOffset = struct.unpack(">4sIIIfIIIIIII", fin.read(12*4)) 5 | assert tag == b"THP\0", tag 6 | assert offsetsDataOffset == 0, offsetsDataOffset 7 | 8 | fin.seek(componentDataOffset) 9 | numComponents, = struct.unpack(">I", fin.read(4)) 10 | componentTypes = struct.unpack(">16B", fin.read(16)) 11 | componentTypes = componentTypes[:numComponents] 12 | 13 | streamInfos = [None]*numComponents 14 | maxWidth = maxHeight = 0 15 | for i, streamType in enumerate(componentTypes): 16 | if streamType == 0: 17 | width, height = struct.unpack(">II", fin.read(8)) 18 | if version >= 0x00011000: 19 | struct.unpack(">I", fin.read(4)) 20 | streamInfos[i] = (width, height) 21 | maxWidth = max(maxWidth, width) 22 | maxHeight = max(maxHeight, height) 23 | 24 | class Chunk: 25 | def __init__(self, f, fourcc): 26 | self.f = f 27 | self.fourcc = fourcc 28 | self.pos = f.tell() 29 | self.len = 0 30 | self.write(fourcc) 31 | self.write(b'\0\0\0\0') 32 | assert self.len == 8 33 | def write(self, data): 34 | self.f.write(data) 35 | self.len = max(self.len, self.tell()) 36 | def fix(self): 37 | t = self.tell() 38 | self.seek(4) 39 | self.write(struct.pack("III", fin.read(12)) 119 | if 1 in componentTypes: 120 | audioSize, = struct.unpack(">I", fin.read(4)) 121 | totalSize = nextTotalSize 122 | frameData = fin.read(imageSize) 123 | startImage = frameData.find(b"\xff\xda")+2 124 | endImage = frameData.rfind(b"\xff\xd9") 125 | jpegData = frameData[:startImage]+(frameData[startImage:endImage].replace(b"\xff", b"\xff\x00"))+frameData[endImage:] 126 | maxBufferSize = max(maxBufferSize, len(jpegData)) 127 | fin.seek(nextOffset) 128 | 129 | chunkPositions[i] = (movi.tell()-8, len(jpegData)) 130 | dc = Chunk(movi, b'00dc') 131 | dc.write(jpegData) 132 | dc.fix() 133 | if movi.tell()%2 != 0: movi.write(b'\0') 134 | 135 | movi.fix() 136 | 137 | idx1 = Chunk(avi, b'idx1') 138 | for pos, sz in chunkPositions: 139 | idx1.write(struct.pack('<4sIII', b'00dc', 0x10, pos, sz)) 140 | idx1.fix() 141 | 142 | avi.fix() 143 | 144 | fin.close() 145 | fout.close() 146 | 147 | -------------------------------------------------------------------------------- /common.py: -------------------------------------------------------------------------------- 1 | # Common functions and templates for (chunked) Mario/Zelda data files 2 | 3 | import io 4 | import sys 5 | import struct 6 | import warnings 7 | from array import array 8 | from enum import Enum 9 | 10 | class Readable(object): 11 | def __init__(self, fin=None, pos=None): 12 | super().__init__() 13 | if fin is not None: 14 | if pos is not None: 15 | fin.seek(pos) 16 | self.read(fin) 17 | 18 | class ReadableStruct(Readable): # name??? 19 | @classmethod 20 | def try_make(cls, fin, pos=None): 21 | return cls(fin=fin, pos=pos) 22 | def read(self, fin): 23 | for field, value in zip(self.fields, self.header.unpack(fin.read(self.header.size))): 24 | if isinstance(field, str): 25 | setattr(self, field, value) 26 | else: 27 | fieldName, fieldType = field 28 | setattr(self, fieldName, fieldType(value)) 29 | def as_tuple(self): 30 | return tuple(getattr(self, field) if isinstance(field, str) else getattr(self, field[0]).value if isinstance(getattr(self, field[0]), Enum) else int(getattr(self, field[0])) for field in self.fields) 31 | def write(self, fout): 32 | fout.write(self.header.pack(*self.as_tuple())) 33 | def __repr__(self): 34 | return self.__class__.__name__ + " " + " ".join([(field if isinstance(field, str) else field[0])+"="+repr(getattr(self, (field if isinstance(field, str) else field[0]))) for field in self.fields]) 35 | def __hash__(self): 36 | return hash(self.as_tuple()) 37 | def __eq__(self, other): 38 | return isinstance(other, __class__) and self.as_tuple() == other.as_tuple() 39 | 40 | class Section(ReadableStruct): 41 | def read(self, fin, start, size): 42 | super().read(fin) 43 | 44 | def swapArray(a): 45 | if sys.byteorder == 'little': 46 | b = array(a.typecode, a) 47 | b.byteswap() 48 | return b 49 | else: 50 | return a 51 | 52 | def getString(pos, f): 53 | t = f.tell() 54 | f.seek(pos) 55 | if sys.version_info[0] >= 3: ret = bytes() 56 | else: ret = str() 57 | 58 | c = f.read(1) 59 | while ord(c) != 0 and len(c) != 0: 60 | ret += c 61 | c = f.read(1) 62 | 63 | f.seek(t) 64 | 65 | return ret.decode('shift-jis') 66 | 67 | class BFile(Readable): 68 | header = struct.Struct('>8sLL4s12x') 69 | 70 | def __init__(self, *args, **kwargs): 71 | self.aligned = False 72 | super().__init__(*args, **kwargs) 73 | self.alignment = 32 74 | 75 | def readHeader(self, fin): 76 | self.signature, self.fileLength, self.chunkCount, self.svr = self.header.unpack(fin.read(0x20)) 77 | 78 | def readChunks(self, fin): 79 | self.chunks = [] 80 | for chunkno in range(self.chunkCount): 81 | start = fin.tell() 82 | try: chunkId, size = struct.unpack('>4sL', fin.read(8)) 83 | except struct.error: 84 | warnings.warn("File too small for chunk count of "+str(self.chunkCount)) 85 | break 86 | if chunkId in self.sectionHandlers: 87 | chunk = self.sectionHandlers[chunkId]() 88 | chunk.chunkId = chunkId 89 | chunk.read(fin, start, size) 90 | className = self.sectionHandlers[chunkId].__name__ 91 | setattr(self, className[0].lower()+className[1:], chunk) 92 | setattr(self, chunkId.decode().lower(), chunk) 93 | self.chunks.append(chunk) 94 | else: 95 | warnings.warn("Unsupported section %r" % chunkId) 96 | if self.aligned: fin.seek(((start+size+3)/4)*4) 97 | else: fin.seek(start+size) 98 | 99 | def read(self, fin): 100 | self.readHeader(fin) 101 | self.readChunks(fin) 102 | 103 | def writeHeader(self, fout): 104 | fout.write(self.header.pack(self.signature, self.fileLength, self.chunkCount, self.svr)) 105 | 106 | def writeChunks(self, fout): 107 | for chunk in self.chunks: 108 | buffer = io.BytesIO() 109 | chunk.write(buffer) 110 | alignFile(buffer, self.alignment, 8) 111 | data = buffer.getvalue() 112 | fout.write(struct.pack('>4sL', chunk.chunkId, len(data)+8)) 113 | fout.write(data) 114 | 115 | def write(self, fout): 116 | buffer = io.BytesIO() 117 | self.writeChunks(buffer) 118 | data = buffer.getvalue() 119 | self.fileLength = len(data)+self.header.size 120 | self.chunkCount = len(self.chunks) 121 | self.writeHeader(fout) 122 | fout.write(data) 123 | 124 | Padding = b"This is padding data to alignme" 125 | 126 | def alignAmt(pos, alignment): 127 | return (alignment-pos)%alignment 128 | 129 | def alignOffset(offset, alignment=4): 130 | return offset+alignAmt(offset, alignment) 131 | 132 | def alignFile(fout, alignment=4, offset=0): 133 | fout.write(Padding[:alignAmt(fout.tell()+offset, alignment)]) 134 | 135 | def calcKeyCode(name): 136 | if isinstance(name, str): 137 | name = name.encode('shift-jis') 138 | x = 0 139 | for c in name: 140 | x = (c + x*3)&0xFFFF 141 | return x 142 | 143 | def arrayStringSearch(haystack, needle): 144 | # could use something like Boyer-Moore, or could hack into Python's built-in 145 | # string search, but whatever 146 | if len(needle) <= 1: 147 | jump = 1 148 | else: 149 | try: 150 | jump = needle.index(needle[0], 1) 151 | except ValueError: 152 | jump = 1 153 | i = 0 154 | while i < len(haystack)-len(needle)+1: 155 | try: 156 | i = haystack.index(needle[0], i) 157 | except ValueError: 158 | return None 159 | if tuple(haystack[i:i+len(needle)]) == tuple(needle): 160 | return i 161 | i += jump 162 | return None 163 | 164 | -------------------------------------------------------------------------------- /bmg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from struct import unpack, Struct 4 | from common import Section, BFile 5 | 6 | fpms = 0.03/1.001 7 | mspf = 1.001/0.03 8 | def frameToHMSMS(frame): 9 | totalms = round(frame*mspf) 10 | ms = totalms%1000 11 | seconds = (totalms//1000)%60 12 | minutes = (totalms//60000)%60 13 | hours = totalms//3600000 14 | return (hours,minutes,seconds,ms) 15 | 16 | def HMSMStoFrame(hours, minutes, seconds, ms): 17 | totalms = hours*3600000+minutes*60000+seconds*1000+ms 18 | return round(totalms*fpms) 19 | 20 | class Inf1(Section): 21 | header = Struct('>HHH2x') 22 | fields = ["count_", "size", "someMessageIndex"] 23 | entryStructs = [Struct('>LHHLLLL'), Struct('>LHHB3x'), Struct('>L'), Struct('>LL')] 24 | sizeToStruct = {s.size: s for s in entryStructs} 25 | countToSize = {7: 24, 4: 12, 1: 4, 2: 8} 26 | def read(self, fin, start, chunksize): 27 | super().read(fin, start, chunksize) 28 | assert chunksize-16 >= self.size*self.count_, (chunksize, self.size, self.count_) 29 | if self.size not in self.sizeToStruct: 30 | raise Exception("Unknown size %d" % self.size) 31 | entryStruct = self.sizeToStruct[self.size] 32 | self.inf = [entryStruct.unpack(fin.read(self.size)) for j in range(self.count_)] 33 | 34 | def write(self, fout): 35 | self.count_ = len(self.inf) 36 | super().write(fout) 37 | entryStruct = self.sizeToStruct[self.size] 38 | for entry in self.inf: 39 | fout.write(entryStruct.pack(*entry)) 40 | 41 | class Dat1(Section): 42 | def read(self, fin, start, size): 43 | self.data = fin.read(size-8) 44 | def write(self, fout): 45 | fout.write(self.data) 46 | 47 | class BMessages(BFile): 48 | sectionHandlers = {b'INF1': Inf1, b'DAT1': Dat1} 49 | def readHeader(self, fin): 50 | super().readHeader(fin) 51 | assert self.signature == b'MESGbmg1', self.signature 52 | def writeHeader(self, fout): 53 | self.signature = b'MESGbmg1' 54 | self.svr = b'\0\0\0\0' 55 | super().writeHeader(fout) 56 | def read(self, fin): 57 | super().read(fin) 58 | self.strings = [None]*len(self.inf1.inf) 59 | for i in range(len(self.inf1.inf)): 60 | offset = self.inf1.inf[i][0] 61 | end = self.inf1.inf[i+1][0]-1 if i < len(self.inf1.inf)-1 else len(self.dat1.data)-1 62 | data = self.dat1.data[offset:end] 63 | self.strings[i] = (data,)+tuple(self.inf1.inf[i][1:]) 64 | def write(self, fout): 65 | self.inf1.inf = [] 66 | self.dat1 = Dat1() 67 | self.dat1.chunkId = b'DAT1' 68 | self.dat1.data = b'\0' 69 | self.chunks = [self.inf1, self.dat1] 70 | for entry in self.strings: 71 | data = entry[0] 72 | offset = len(self.dat1.data) 73 | self.inf1.inf.append((offset,)+(entry[1:])) 74 | self.dat1.data += data+b'\0' 75 | super().write(fout) 76 | 77 | if __name__ == "__main__": 78 | import sys 79 | import os.path 80 | if len(sys.argv) != 2: 81 | sys.stderr.write("Usage: %s \n"%sys.argv[0]) 82 | exit(1) 83 | 84 | basename, ext = os.path.splitext(sys.argv[1]) 85 | 86 | if ext.casefold() == '.srt': 87 | import re 88 | timecodeFormat = re.compile("(\d{2}):(\d{2}):(\d{2}),(\d{3})") 89 | fin = open(sys.argv[1]) 90 | bmg = BMessages() 91 | bmg.strings = [] 92 | bmg.inf1 = Inf1() 93 | bmg.inf1.chunkId = b'INF1' 94 | bmg.inf1.size = 12 95 | bmg.inf1.someMessageIndex = 0 96 | counter = fin.readline() 97 | while True: 98 | if counter == '': break 99 | assert counter.rstrip().isdigit() 100 | times = fin.readline().rstrip() 101 | startTime, endTime = times.split(" --> ") 102 | startFrame = HMSMStoFrame(*map(int, timecodeFormat.match(startTime).groups())) 103 | endFrame = HMSMStoFrame(*map(int, timecodeFormat.match(endTime).groups())) 104 | data = '' 105 | while True: 106 | line = fin.readline() 107 | if line.rstrip().isdigit() or line == '': 108 | counter = line 109 | break 110 | data += line 111 | data = data[:-2] 112 | bmg.strings.append((data.encode('shift-jis'), startFrame, endFrame, 69)) 113 | fin.close() 114 | bmg.write(open(basename+".bmg", 'wb')) 115 | elif ext.casefold() == '.bmg': 116 | fin = open(sys.argv[1], 'rb') 117 | bmg = BMessages() 118 | bmg.read(fin) 119 | fin.close() 120 | 121 | if bmg.inf1.size == 12 and (len(bmg.strings) == 0 or bmg.strings[0][2] > bmg.strings[0][1]): 122 | # subtitle format 123 | srtout = open(basename+".srt", 'w', encoding='utf_8_sig') 124 | for j, (data, start, end, soundIndex) in enumerate(bmg.strings): 125 | assert soundIndex == 69, soundIndex 126 | srtout.write(u"%d\n"%(j+1)) 127 | srtout.write(u"%02d:%02d:%02d,%03d --> "%frameToHMSMS(start)) 128 | srtout.write(u"%02d:%02d:%02d,%03d\n"%frameToHMSMS(end)) 129 | srtout.write(data.decode('shift-jis')) 130 | srtout.write(u"\n\n") 131 | srtout.close() 132 | else: 133 | import csv 134 | csvout = open(basename+".csv", 'w', encoding='utf_8_sig') 135 | writer = csv.writer(csvout) 136 | for data in bmg.strings: 137 | writer.writerow([data[0].decode('shift-jis', 'backslashreplace').replace('\0', r'\x00')]+list(map(str, data[1:]))) 138 | csvout.close() 139 | elif ext.casefold() == '.csv': 140 | import re, csv 141 | hexescapes = re.compile(rb"\\x([0-9a-fA-F][0-9a-fA-F])") 142 | fin = open(sys.argv[1], encoding='utf_8_sig') 143 | bmg = BMessages() 144 | bmg.strings = [] 145 | bmg.inf1 = Inf1() 146 | bmg.inf1.chunkId = b'INF1' 147 | bmg.inf1.size = 0 148 | bmg.inf1.someMessageIndex = 0 149 | for line in csv.reader(fin): 150 | bmg.inf1.size = max(bmg.inf1.size, Inf1.countToSize[len(line)]) 151 | enc = line[0].encode('shift-jis') 152 | matchh = hexescapes.search(enc) 153 | while matchh is not None: 154 | enc = enc[:matchh.start()] + bytes([int(matchh[1], 16)]) + enc[matchh.end():] 155 | matchh = hexescapes.search(enc) 156 | bmg.strings.append((enc,)+tuple(map(int, line[1:]))) 157 | fin.close() 158 | bmg.write(open(basename+".bmg", 'wb')) 159 | 160 | -------------------------------------------------------------------------------- /ghidra_scripts/annotate_virtual_call.py: -------------------------------------------------------------------------------- 1 | #Deduce the virtual function being called at the current selected location 2 | #@keybinding 3 | #@menupath Tools.annotate_virtual_call 4 | #@toolbar 5 | 6 | from ghidra.app.decompiler import ClangOpToken, DecompileOptions, DecompInterface 7 | from ghidra.program.model.address import Address 8 | from ghidra.program.model.data import FunctionDefinitionDataType 9 | from ghidra.program.model.listing import CodeUnit 10 | from ghidra.program.model.pcode import HighFunctionDBUtil 11 | from ghidra.program.model.symbol import SymbolType, SourceType 12 | from ghidra.util.exception import CancelledException 13 | from ghidra.util.task import ConsoleTaskMonitor 14 | 15 | VTABLE_LABEL = "__vt" 16 | 17 | options = DecompileOptions() 18 | monitor = ConsoleTaskMonitor() 19 | ifc = DecompInterface() 20 | ifc.setOptions(options) 21 | ifc.openProgram(currentProgram) 22 | 23 | startingFunc = getFunctionContaining(currentAddress) 24 | res = ifc.decompileFunction(startingFunc, 60, monitor) 25 | 26 | # Get the indirect-call instruction nearest to the user's selection 27 | inst = getInstructionAt(currentAddress) 28 | while not any([pcode.opcode == pcode.CALLIND for pcode in inst.pcode]): inst = inst.getNext() 29 | callAddr = inst.address 30 | 31 | # Need to use decompiled Pcode to get AST access 32 | callOp = [pcode for pcode in res.highFunction.getPcodeOps(callAddr) if pcode.opcode == pcode.CALLIND][0] 33 | 34 | # Travel AST to get the object that's being called 35 | castToCode = callOp.getInput(0).getDef() 36 | if castToCode.opcode == castToCode.CAST: 37 | loadFnPtr = castToCode.getInput(0).getDef() 38 | else: 39 | loadFnPtr = castToCode 40 | assert loadFnPtr.opcode == loadFnPtr.LOAD, "expected input 0 to cast to be load, not %s"%loadFnPtr.opcode 41 | indexVt = loadFnPtr.getInput(1).getDef() 42 | assert indexVt.opcode == indexVt.PTRADD, "expected input 1 to load to be ptradd, not %s"%indexVt.opcode 43 | vtableIndex = indexVt.getInput(1).getAddress().offset 44 | pointerSize = indexVt.getInput(1).size 45 | loadVt = indexVt.getInput(0).getDef() 46 | assert loadVt.opcode == loadVt.LOAD, "expected input 0 to ptradd to be load, not %s"%loadVt.opcode 47 | getVt = loadVt.getInput(1).getDef() 48 | assert getVt.opcode == getVt.PTRSUB, "expected input 1 to load to be ptrsub, not %s"%getVt.opcode 49 | assert getVt.getInput(1).isConstant() 50 | assert getVt.getInput(1).getAddress().offset == 0, "expected vtable to be at offset 0, not %s"%(getVt.getInput(1).getAddress().offset) 51 | theVariable = getVt.getInput(0) 52 | 53 | # Follow parent-class reference chains and memorize the ancestry 54 | superClassNames = [theVariable.high.dataType.dataType.name] 55 | overrideClass = theVariable.high.dataType 56 | defTheVariable = theVariable.getDef() 57 | while defTheVariable is not None and defTheVariable.opcode == defTheVariable.PTRSUB: 58 | if defTheVariable.getInput(1).getAddress().offset != 0: raise ValueError("multiple inheritance not currently supported") 59 | theVariable = defTheVariable.getInput(0) 60 | superClassNames.insert(0, theVariable.high.dataType.dataType.name) 61 | defTheVariable = theVariable.getDef() 62 | 63 | 64 | def getVtableSymbolsForClassName(className): 65 | symdb = currentProgram.symbolTable 66 | vtableSymbols = [] 67 | for sym in symdb.getSymbols(className): 68 | if sym.symbolType == SymbolType.NAMESPACE: 69 | vtableSymbols.extend(symdb.getSymbols(VTABLE_LABEL, sym.getObject())) 70 | return vtableSymbols 71 | 72 | def getVFunc(vtableSymbols, vtableIndex, pointerSize): 73 | listing = currentProgram.listing 74 | for vtableSymbol in vtableSymbols: 75 | vtableAddr = vtableSymbol.address 76 | vtableData = listing.getDataAt(vtableAddr) 77 | if vtableData is None: 78 | print vtableSymbol.getName(True), "has no data defined" 79 | continue 80 | funcPointer = vtableData.getComponent(vtableIndex) 81 | if funcPointer is None: 82 | funcPointer = listing.getDataAt(vtableAddr.add(vtableIndex*pointerSize)) 83 | if funcPointer is None: 84 | print vtableSymbol.getName(True), "has no data defined at index", vtableIndex 85 | continue 86 | funcAddr = funcPointer.value 87 | if not isinstance(funcAddr, Address): 88 | print "The function pointer at", vtableIndex, "in", vtableSymbol.getName(True), "is not an address" 89 | continue 90 | if funcAddr.offset == 0: 91 | print "The function pointer at", vtableIndex, "in", vtableSymbol.getName(True), "is NULL" 92 | continue 93 | calledFunc = getFunctionAt(funcAddr) 94 | if calledFunc is None: 95 | print "No function defined at", funcAddr 96 | continue 97 | if vtableSymbol.parentNamespace.name == calledFunc.parentNamespace.name: 98 | return calledFunc 99 | 100 | def annotateVirtualCall(calledFunc, startingFunc, callAddr, thisOverride=None): 101 | print calledFunc 102 | funcDef = FunctionDefinitionDataType(calledFunc.signature) 103 | if thisOverride is not None: 104 | originalThis = funcDef.arguments[0] 105 | funcDef.replaceArgument(0, originalThis.name, thisOverride, originalThis.comment, SourceType.DEFAULT) 106 | try: HighFunctionDBUtil.writeOverride(startingFunc, callAddr, funcDef) 107 | except: print startingFunc, callAddr, funcDef 108 | currentProgram.listing.setComment(callAddr, CodeUnit.PRE_COMMENT, "{@symbol %s}"%calledFunc.symbol.getName(True)) 109 | 110 | # Look through the vtables of the primary class and any superclasses for a function pointer at the called index 111 | calledFunc = None 112 | for className in superClassNames: 113 | calledFunc = getVFunc(getVtableSymbolsForClassName(className), vtableIndex, pointerSize) 114 | if calledFunc is not None: 115 | annotateVirtualCall(calledFunc, startingFunc, callAddr) 116 | break 117 | 118 | # If we didn't find any, offer to the user a subclass implementation instead 119 | if calledFunc is None: 120 | dataDb = currentProgram.getDataTypeManager() 121 | calledFuncs = [] 122 | subClassNames = [] 123 | while len(calledFuncs) == 0 and len(superClassNames) > 0: 124 | superClass = superClassNames.pop(0) 125 | for struct in dataDb.getAllStructures(): 126 | if struct.numComponents > 0 and struct.getComponent(0).dataType.name == superClass: 127 | subClassNames.append(struct.name) 128 | calledFunc = getVFunc(getVtableSymbolsForClassName(struct.name), vtableIndex, pointerSize) 129 | if calledFunc is not None: 130 | calledFuncs.append(calledFunc) 131 | if len(superClassNames) == 0: 132 | superClassNames = subClassNames 133 | subClassNames = [] 134 | 135 | if len(calledFuncs) > 0: 136 | try: 137 | choice = askChoice("Pure-virtual call", # title 138 | "I can't find an implementation for this fuction table for this type. Should I use one from a concrete subclass?", # message 139 | calledFuncs, # choices 140 | None) # defaultValue 141 | except CancelledException: 142 | choice = None 143 | 144 | if choice is not None: 145 | annotateVirtualCall(choice, startingFunc, callAddr, overrideClass) 146 | 147 | -------------------------------------------------------------------------------- /blo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys, os 4 | from struct import unpack, Struct 5 | from common import Section, BFile 6 | from enum import Enum 7 | 8 | class Bgn1(Section): 9 | header = Struct('') 10 | fields = [] 11 | 12 | class End1(Section): 13 | header = Struct('') 14 | fields = [] 15 | 16 | class Ext1(Section): 17 | header = Struct('') 18 | fields = [] 19 | 20 | class Inf1(Section): 21 | header = Struct('>HHI') 22 | fields = ['width', 'height', 'tintColor'] 23 | 24 | class J2DBasePosition(Enum): 25 | TopLeft = 0 26 | TopMiddle = 1 27 | TopRight = 2 28 | 29 | CenterLeft = 3 30 | CenterMiddle = 4 31 | CenterRight = 5 32 | 33 | BottomLeft = 6 34 | BottomMiddle = 7 35 | BottomRight = 8 36 | 37 | class Pan1(Section): 38 | def __init__(self, fin=None, pos=None): 39 | self.visible = False 40 | self.paneId = b'\0\0\0\0' 41 | self.x = 0 42 | self.y = 0 43 | self.width = 0 44 | self.height = 0 45 | self.angle = 0 46 | self.anchor = J2DBasePosition.TopLeft 47 | self.alpha = 0xFF 48 | self.inheritAlpha = 0 49 | super().__init__(fin, pos) 50 | 51 | def read(self, fin, start, size): 52 | numParams, = unpack('B', fin.read(1)) 53 | self.fields = ['visible', 'paneId', 'x', 'y', 'width', 'height', 'angle', ('anchor', J2DBasePosition), 'alpha', 'inheritAlpha'][:numParams] 54 | self.header = Struct('>'+''.join(['?2x','4s','h','h','h','h','h','B','B','?'][:numParams])) 55 | super().read(fin, start, size) 56 | 57 | def getResource(fin): 58 | resourceType, namelen = unpack('>BB', fin.read(2)) 59 | return resourceType, fin.read(namelen).decode('shift-jis') 60 | 61 | class Pic1(Pan1): 62 | def __init__(self, fin=None, pos=None): 63 | self.image = (0, '') 64 | self.lut = (0, '') 65 | self.binding = 0 66 | self.mirrorFlags = 0 67 | self.wrapFlags = 0 68 | self.fromColor = 0xFFFFFFFF 69 | self.toColor = 0xFFFFFFFF 70 | self.colors = [0xFFFFFFFF]*4 71 | super().__init__(fin, pos) 72 | 73 | def read(self, fin, start, size): 74 | super().read(fin, start, size) 75 | numParams, = unpack('B', fin.read(1)) 76 | if numParams > 0: 77 | self.image = getResource(fin) 78 | numParams -= 1 79 | if numParams > 0: 80 | self.lut = getResource(fin) 81 | numParams -= 1 82 | parentFields = self.fields 83 | self.fields = ['binding', 'mirrorFlags', 'wrapFlags', 'fromColor', 'toColor'][:numParams] 84 | parentHeader = self.header 85 | self.header = Struct('>'+'BBII'[:numParams]) 86 | Section.read(self, fin, start, size) 87 | numParams -= 4 88 | self.fields = parentFields + self.fields 89 | self.header = Struct(parentHeader.format + self.header.format[1:]) 90 | for i in range(4): 91 | if numParams > 0: 92 | self.colors[i], = unpack('>I', fin.read(4)) 93 | numParams -= 1 94 | 95 | def __repr__(self): 96 | return "{} image={} lut={} binding={} mirrorFlags={} wrapFlags={} fromColor={} toColor={} colors={}".format(super().__repr__(), self.image, self.lut, self.binding, self.mirrorFlags, self.wrapFlags, self.fromColor, self.toColor, self.colors) 97 | 98 | class Tbx1(Pan1): 99 | def __init__(self, fin=None, pos=None): 100 | self.topColor = 0 101 | self.bottomColor = 0 102 | self.binding = 0 103 | self.fontSpacing = 0 104 | self.fontLeading = 0 105 | self.fontWidth = 0 106 | self.fontHeight = 0 107 | self.strlen = 0 108 | self.connectParent = False 109 | self.fromColor = 0xFFFFFFFF 110 | self.toColor = 0xFFFFFFFF 111 | super().__init__(fin, pos) 112 | 113 | def read(self, fin, start, size): 114 | super().read(fin, start, size) 115 | numParams, = unpack('B', fin.read(1)) 116 | if numParams > 0: 117 | self.font = getResource(fin) 118 | numParams -= 1 119 | parentFields = self.fields 120 | self.fields = ['topColor', 'bottomColor', 'binding', 'fontSpacing', 'fontLeading', 'fontWidth', 'fontHeight', 'strlen'] 121 | parentHeader = self.header 122 | self.header = Struct('>'+'IIBhhHHH'[:numParams]) 123 | Section.read(self, fin, start, size) 124 | numParams -= 8 125 | self.fields = parentFields + self.fields 126 | self.header = Struct(parentHeader.format + self.header.format[1:]) 127 | self.string = fin.read(self.strlen).decode('shift-jis') 128 | parentFields = self.fields 129 | self.fields = ['connectParent', 'fromColor', 'toColor'][:numParams] 130 | parentHeader = self.header 131 | self.header = Struct('>'+'?II'[:numParams]) 132 | Section.read(self, fin, start, size) 133 | numParams -= 3 134 | self.fields = parentFields + self.fields 135 | self.header = Struct(parentHeader.format + self.header.format[1:]) 136 | 137 | class BLayout(BFile): 138 | sectionHandlers = { 139 | b'BGN1': Bgn1, 140 | b'END1': End1, 141 | b'PIC1': Pic1, 142 | b'PAN1': Pan1, 143 | b'INF1': Inf1, 144 | b'TBX1': Tbx1, 145 | b'EXT1': Ext1 146 | } 147 | 148 | def parsechunks(chunklist, i=0, indent=0): 149 | toWrite = "
" 150 | while i < len(chunklist): 151 | chunk = chunklist[i] 152 | print(' '*indent+str(chunk)) 153 | if isinstance(chunk, Bgn1): 154 | htmlout.write(toWrite) 155 | i = parsechunks(chunklist, i+1, indent+1) 156 | htmlout.write("
") 157 | elif isinstance(chunk, End1): 158 | return i 159 | elif isinstance(chunk, Pan1): 160 | style = 'style="position: absolute; left: %dpx; top: %dpx; width: %dpx; height: %dpx; visibility: %s'%(chunk.x, chunk.y, chunk.width, chunk.height, ["hidden", "inherit"][chunk.visible]) 161 | if chunk.angle != 0: 162 | style += '; transform: rotate(%fdeg)'%(chunk.angle*180/0x7FFF) 163 | if chunk.alpha != 255: 164 | style += '; opacity: %f'%(chunk.alpha/255) 165 | style += '"' 166 | cId = chunk.paneId.replace(b'\0', b'').decode() 167 | if len(cId) > 0: 168 | style += ' id="%s"'%(cId) 169 | if isinstance(chunk, Pic1): 170 | htmlout.write(''%(style, chunk.image[1].replace('.bti', '.png'))) 171 | elif isinstance(chunk, Tbx1): 172 | htmlout.write('
%s
'%(style, chunk.string)) 173 | else: 174 | toWrite = '
'%(style) 175 | i += 1 176 | return i 177 | 178 | if __name__ == "__main__": 179 | if len(sys.argv) != 2: 180 | sys.stderr.write("Usage: %s \n"%sys.argv[0]) 181 | exit(1) 182 | 183 | fin = open(sys.argv[1], 'rb') 184 | blo = BLayout() 185 | blo.read(fin) 186 | fin.close() 187 | 188 | htmlout = open(os.path.splitext(sys.argv[1])[0]+".html", 'w') 189 | htmlout.write(''%(blo.inf1.width, blo.inf1.height)) 190 | parsechunks(blo.chunks) 191 | htmlout.write("") 192 | htmlout.close() 193 | 194 | -------------------------------------------------------------------------------- /get-obj-info.py: -------------------------------------------------------------------------------- 1 | from struct import * 2 | from ..common import * 3 | from dolreader.dol import * 4 | import glob 5 | 6 | def getString2(f, pos): 7 | return getString(pos, f) 8 | 9 | dolToMemoryOffset = 0x80003000 10 | 11 | readCache = {} 12 | 13 | class ReadableStruct2(Readable): 14 | def read(self, fin): 15 | for i, (field, value) in enumerate(zip(self.fields, self.header.unpack(fin.read(self.header.size)))): 16 | if isinstance(field, str): 17 | setattr(self, field, value) 18 | elif len(field) == 2: 19 | fieldName, fieldType = field 20 | if value == 0: 21 | setattr(self, fieldName, None) 22 | else: 23 | offset = value 24 | if offset in readCache: 25 | value = readCache[offset] 26 | else: 27 | value = fieldType(fin, offset) 28 | readCache[offset] = value 29 | setattr(self, fieldName, value) 30 | else: 31 | fieldName, fieldType, fieldCount = field 32 | if value == 0: 33 | setattr(self, fieldName, None) 34 | else: 35 | offset = value 36 | if offset in readCache: 37 | value = readCache[offset] 38 | else: 39 | value = [fieldType(fin, offset+i*fieldType.header.size) for i in range(getattr(self, fieldCount))] 40 | readCache[offset] = value 41 | setattr(self, fieldName, value) 42 | 43 | def print(self, indent=1): 44 | print(self.__class__.__name__) 45 | for field in self.fields: 46 | print(' '*indent, end='') 47 | if isinstance(field, str): 48 | fieldName = field 49 | value = getattr(self, field) 50 | else: 51 | fieldName = field[0] 52 | value = getattr(self, fieldName) 53 | print(fieldName, "=", end=" ") 54 | if isinstance(value, ReadableStruct2): 55 | value.print(indent+1) 56 | elif isinstance(value, int): 57 | print(hex(value)) 58 | elif isinstance(value, list): 59 | for x in value: 60 | x.print(indent+1) 61 | else: 62 | print(value) 63 | 64 | class PhysicalData(ReadableStruct2): 65 | header = Struct('>fffffffffffff') 66 | fields = ["field_0x0", "field_0x4", "field_0x8", "field_0xc", "field_0x10", 67 | "field_0x14", "field_0x18", "field_0x1c", "field_0x20", "field_0x24", 68 | "field_0x28", "field_0x2c", "field_0x30"] 69 | 70 | class PhysicalInfo(ReadableStruct2): 71 | header = Struct('>iIi') 72 | fields = ["field_0x0", ("physicalData", PhysicalData), "field_0x8"] 73 | 74 | class CollisionData(ReadableStruct2): 75 | header = Struct('>II') 76 | fields = [("name", getString2), "field_0x4"] 77 | 78 | class AnimData(ReadableStruct2): 79 | header = Struct('>IIB3xII') 80 | fields = [("modelName", getString2), ("animBaseName", getString2), "animType", ("material", getString2), 81 | ("basName", getString2)] 82 | def read(self, fin): 83 | super().read(fin) 84 | 85 | class AnimInfo(ReadableStruct2): 86 | header = Struct('>HHI') 87 | fields = ["dataCount", "field_0x2", ("animData", AnimData, "dataCount")] 88 | 89 | class HitDataTable(ReadableStruct2): 90 | header = Struct('>ffff') 91 | fields = ["zScale1", "yScale1", "zScale2", "yScale2"] 92 | 93 | class HitInfo(ReadableStruct2): 94 | header = Struct('>IIfI') 95 | fields = ["field_0x0", "flags", "yScale", ("hitDataTable", HitDataTable)] 96 | 97 | class CollisionInfo(ReadableStruct2): 98 | header = Struct('>II') 99 | fields = ["field_0x0", ("collosionData", CollisionData)] 100 | 101 | class ObjSoundInfo(ReadableStruct2): 102 | header = Struct('>II') 103 | fields = ["always10", "soundDataOffset"] 104 | 105 | class SinkData(ReadableStruct2): 106 | header = Struct('>ff') 107 | fields = ["field_0x0", "field_0x4"] 108 | 109 | class ModelInfo(ReadableStruct2): 110 | header = Struct('>IIIII') 111 | fields = [("filename", getString2), ("jointName", getString2), "modelDataOffset", "modelOffset", "jointOffset"] 112 | 113 | class MoveData(ReadableStruct2): 114 | header = Struct('>III') 115 | fields = [("bckName", getString2), "animOffset", "frameCtrlOffset"] 116 | 117 | class ObjData(ReadableStruct2): 118 | header = Struct('>IIIIIIIIIIIIfII') 119 | fields = [("baseName", getString2), "hitFlags", ("managerName", getString2), ("groupName", getString2), 120 | ("animInfo", AnimInfo), ("hitInfo", HitInfo), ("collisionInfo", CollisionInfo), ("soundInfo", ObjSoundInfo), 121 | ("physicalInfo", PhysicalInfo), ("sinkData", SinkData), ("modelInfo", ModelInfo), ("moveData", MoveData), 122 | "xScale", "objFlags", "keyCode"] 123 | 124 | fin = DolFile(open("dol/boot.dol", 'rb')) 125 | fin.seek(0x803c8580) 126 | objInfoTable = [ObjData(fin, objPtr) for objPtr in unpack('>360I', fin.read(1440))] 127 | materialOverrides = { 128 | 0x4000009c: "LeafBoat", 129 | 0x20000068: "nozzleBox", 130 | 0x20000026: "nozzleItem", 131 | 0x40000048: "flower", 132 | 0x4000001c: "kibako", 133 | 0x4000001b: "ArrowBoard", 134 | 0x4000005a: "barrel", 135 | 0x400002c2: "BrickBlock", 136 | 0x400002c3: "WaterMelon", 137 | 0x400000d3: "SandBombBase", 138 | 0x400000ce: "mirror", 139 | 0x400000cd: "SandBombBase", 140 | 0x400000ce: "SandBombBase", 141 | 0x400000a5: "LeafBoat", 142 | 0x400000a0: "bianco", 143 | 0x400000ba: "riccoShip", 144 | 0x40000096: "bianco" 145 | } 146 | for o in []:#objInfoTable: 147 | if o.animInfo is not None: 148 | print("{ k: '%s'"%o.baseName, end='') 149 | if o.hitFlags in materialOverrides: 150 | print(", t: '%s'"%materialOverrides[o.hitFlags], end='') 151 | if o.animInfo.animData is not None: 152 | animData = o.animInfo.animData[0] 153 | print(", m: '%s'"%(animData.modelName), end='') 154 | if o.hitFlags not in materialOverrides and animData.material is not None: 155 | print(", t: '%s'"%animData.material, end='') 156 | if animData.animBaseName is not None: 157 | print(", n: '%s', u: %d"%(animData.animBaseName, animData.animType), end='') 158 | if animData.animType != 0: 159 | ext = [0, 0, ".bpk", ".btp", ".btk", ".brk"][animData.animType] 160 | #assert len(glob.glob("scene/*/mapobj/"+animData.animBaseName+ext)) > 0 161 | print(" },") 162 | elif o.hitFlags in materialOverrides: 163 | print("{ k: '%s', t: '%s'},"%(o.baseName,materialOverrides[o.hitFlags])) 164 | 165 | class ActorData(ReadableStruct2): 166 | header = Struct('>IIIffffIIIIIiIIB3xI') 167 | fields = [("baseName", getString2), "field_0x4", "field_0x8", "field_0xc", 168 | "field_0x10", "field_0x14", "field_0x18", ("groupName", getString2), 169 | ("modelName", getString2), "field_0x24", ("collisionManagerName", getString2), 170 | "field_0x2c", "field_0x30", ("particle", getString2), "particleId", 171 | "field_0x3c", "field_0x40"] 172 | 173 | actorDataTable = [ActorData(fin, 0x80389654+i*68) for i in range(29)] 174 | for a in actorDataTable: 175 | a.print() 176 | continue 177 | if not a.modelName and not a.particle: continue 178 | print("{ k: '%s'"%a.baseName, end='') 179 | if a.modelName: 180 | print(", m: '%s'"%(a.modelName), end='') 181 | if a.particle: 182 | assert a.particle.startswith("/scene/") 183 | print(", p: '%s'"%(a.particle[7:]), end='') 184 | print(" },") 185 | 186 | -------------------------------------------------------------------------------- /col2unity.py: -------------------------------------------------------------------------------- 1 | import unityparser, sys, os.path, array 2 | from col import ColReader 3 | from unityassets import * 4 | 5 | Mesh = unityparser.constants.UnityClassIdMap.get_or_create_class_id(43, 'Mesh') 6 | 7 | def exportCol(col, outputFolderLocation, physNameBase, shouldSplit=True): 8 | for groupIdx, group in enumerate(col.groups): 9 | zippedTriIndices = zip(group.indexBuffer[0::3], group.indexBuffer[1::3], group.indexBuffer[2::3]) 10 | if len(group.tribuf3): 11 | triangles = list(zip(zippedTriIndices, group.terrainTypes, group.tribuf2, group.tribuf3)) 12 | else: 13 | triangles = list(zip(zippedTriIndices, group.terrainTypes, group.tribuf2, [None]*group.numTriIndices)) 14 | if shouldSplit: 15 | # split large COLs into disconnected pieces, hopefully as an optimization 16 | connectedPieces = [] 17 | 18 | while len(triangles) > 0: 19 | connectedTriIndices, connectedTerrainType, connectedUnk2, connectedUnk3 = triangles.pop(0) 20 | connectedTris = [connectedTriIndices] 21 | connectedIndices = set(connectedTriIndices) 22 | i = 0 23 | while i < len(triangles): 24 | triIndices, terrainType, unk2, unk3 = triangles[i] 25 | 26 | if len(connectedIndices.intersection(triIndices)) > 0 and \ 27 | terrainType == connectedTerrainType and \ 28 | unk2 == connectedUnk2 and \ 29 | unk3 == connectedUnk3: 30 | 31 | del triangles[i] 32 | connectedTris.append(triIndices) 33 | connectedIndices.update(triIndices) 34 | i = 0 35 | 36 | else: 37 | i += 1 38 | 39 | connectedPieces.append((connectedTris, connectedTerrainType, connectedUnk2, connectedUnk3)) 40 | else: 41 | # only split different terrain types, so that they can be acted on separately 42 | connectedPieces = {} 43 | for connectedTriIndices, connectedTerrainType, connectedUnk2, connectedUnk3 in triangles: 44 | key = (connectedTerrainType, connectedUnk2, connectedUnk3) 45 | if key in connectedPieces: 46 | connectedPieces[key].append(connectedTriIndices) 47 | else: 48 | connectedPieces[key] = [connectedTriIndices] 49 | connectedPieces = [(connectedTriIndices, connectedTerrainType, connectedUnk2, connectedUnk3) for (connectedTerrainType, connectedUnk2, connectedUnk3), connectedTriIndices in connectedPieces.items()] 50 | 51 | for connectedIdx, (oldTriangles, terrainType, unk2, unk3) in enumerate(connectedPieces): 52 | usedIndices = list({i for tri in oldTriangles for i in tri}) 53 | usedIndices.sort() # keep original order - optimal? 54 | newVertexBuffer = array.array('f', [col.vertexBuffer[i*3+j] for i in usedIndices for j in range(3)]) 55 | newIndexBuffer = array.array('H', [usedIndices.index(i) for tri in oldTriangles for i in tri]) 56 | del usedIndices 57 | 58 | #newVertexBuffer = col.vertexBuffer 59 | #newIndexBuffer = group.indexBuffer 60 | 61 | physName = physName = '%s-%04x-%d-%d'%(physNameBase, group.surfaceId, terrainType, unk2) 62 | if unk3 is not None: 63 | physName += '-%d'%unk3 64 | if shouldSplit: 65 | physName += '.%d'%connectedIdx 66 | 67 | mesh = Mesh(str(4300000), '') 68 | asset = unityparser.UnityDocument([mesh]) 69 | 70 | mesh.m_Name = physName 71 | mesh.serializedVersion = 9 72 | mesh.m_IsReadable = 1 73 | mesh.m_KeepVertices = 1 74 | mesh.m_KeepIndices = 1 75 | mesh.m_IndexFormat = 0 76 | mesh.m_SubMeshes = [] 77 | 78 | minX = min([newVertexBuffer[i*3+0] for i in newIndexBuffer]) 79 | minY = min([newVertexBuffer[i*3+1] for i in newIndexBuffer]) 80 | minZ = min([newVertexBuffer[i*3+2] for i in newIndexBuffer]) 81 | maxX = max([newVertexBuffer[i*3+0] for i in newIndexBuffer]) 82 | maxY = max([newVertexBuffer[i*3+1] for i in newIndexBuffer]) 83 | maxZ = max([newVertexBuffer[i*3+2] for i in newIndexBuffer]) 84 | mesh.m_SubMeshes.append({ 85 | "firstByte": 0, 86 | "indexCount": len(newIndexBuffer), 87 | "topology": 0, 88 | "baseVertex": 0, 89 | "firstVertex": 0, 90 | "vertexCount": len(newVertexBuffer)//3, 91 | "serializedVersion": 2, 92 | "localAABB": { 93 | "m_Center": {'x': (minX+maxX)/2, 'y': (minY+maxY)/2, 'z': (minZ+maxZ)/2}, 94 | "m_Extent": {'x': (maxX-minX)/2, 'y': (maxY-minY)/2, 'z': (maxZ-minZ)/2} 95 | } 96 | }) 97 | 98 | channels = [ 99 | { # position 100 | "stream": 0, 101 | "offset": 0, 102 | "format": 0, # kVertexFormatFloat 103 | "dimension": 3 104 | }, 105 | { # normal 106 | "stream": 0, 107 | "offset": 0, 108 | "format": 0, 109 | "dimension": 0 110 | }, 111 | { # tangent 112 | "stream": 0, 113 | "offset": 0, 114 | "format": 0, 115 | "dimension": 0 116 | }, 117 | { # color 118 | "stream": 0, 119 | "offset": 0, 120 | "format": 0, 121 | "dimension": 0 122 | } 123 | ] 124 | for i in range(8): 125 | channels.append({ # uv 126 | "stream": 0, 127 | "offset": 0, 128 | "format": 0, 129 | "dimension": 0 130 | }) 131 | channels.append({ # blend weight 132 | "stream": 0, 133 | "offset": 0, 134 | "format": 0, 135 | "dimension": 0 136 | }) 137 | channels.append({ # blend indices 138 | "stream": 0, 139 | "offset": 0, 140 | "format": 0, 141 | "dimension": 0 142 | }) 143 | if sys.byteorder != 'little': newVertexBuffer.byteswap() 144 | mesh.m_VertexData = { 145 | "serializedVersion": 2, 146 | "m_VertexCount": len(newVertexBuffer)//3, 147 | "m_Channels": channels, 148 | "m_DataSize": len(newVertexBuffer)*4, 149 | "_typelessdata": newVertexBuffer.tobytes().hex() 150 | } 151 | if sys.byteorder != 'little': newIndexBuffer.byteswap() 152 | mesh.m_IndexBuffer = newIndexBuffer.tobytes().hex() 153 | mesh.m_LocalAABB = { 154 | "m_Center": {'x': (minX+maxX)/2, 'y': (minY+maxY)/2, 'z': (minZ+maxZ)/2}, 155 | "m_Extent": {'x': (maxX-minX)/2, 'y': (maxY-minY)/2, 'z': (maxZ-minZ)/2} 156 | } 157 | assetName = physName+".asset" 158 | asset.dump_yaml(os.path.join(outputFolderLocation, assetName)) 159 | yield physName, writeNativeMeta(assetName, 4300000, outputFolderLocation), mesh.m_LocalAABB["m_Center"] 160 | 161 | if __name__ == '__main__': 162 | fixUnityParserFloats() 163 | 164 | outputFolderLocation, physNameBase = os.path.split(sys.argv[1]) 165 | fin = open(sys.argv[1], 'rb') 166 | col = ColReader() 167 | col.read(fin) 168 | fin.close() 169 | for physName, uid in exportCol(col, outputFolderLocation, physNameBase): pass 170 | 171 | -------------------------------------------------------------------------------- /ghidra_scripts/classes.py: -------------------------------------------------------------------------------- 1 | # 2 | #@author 3 | #@category 4 | #@keybinding 5 | #@menupath 6 | #@toolbar 7 | from ghidra.program.model.symbol import FlowType 8 | 9 | glb = currentProgram.getNamespaceManager().getGlobalNamespace() 10 | symdb = currentProgram.symbolTable 11 | 12 | nameRef = getNamespace(getNamespace(glb, "JDrama"), "TNameRef") 13 | 14 | namespacesToExplore = [nameRef] 15 | exploredNamespaces = set() 16 | classesPotentialParents = {} 17 | 18 | while len(namespacesToExplore) > 0: 19 | classNamespace = namespacesToExplore.pop(0) 20 | exploredNamespaces.add(classNamespace) 21 | destructors = symdb.getSymbols(u"~"+classNamespace.name, classNamespace) 22 | vts = symdb.getSymbols(u"__vt", classNamespace) 23 | for sym in destructors+vts: 24 | for ref in getReferencesTo(sym.address): 25 | func = getFunctionContaining(ref.fromAddress) 26 | if func is None: continue 27 | derivedClassNamespace = func.parentNamespace 28 | if derivedClassNamespace == classNamespace: continue 29 | if func.name != u"~"+derivedClassNamespace.name: continue 30 | 31 | fullDerivedName = derivedClassNamespace.getName(True) 32 | fullParentName = classNamespace.getName(True) 33 | 34 | if fullDerivedName in classesPotentialParents: 35 | classesPotentialParents[fullDerivedName].add(fullParentName) 36 | else: 37 | classesPotentialParents[fullDerivedName] = set([fullParentName]) 38 | 39 | if derivedClassNamespace not in exploredNamespaces: 40 | namespacesToExplore.append(derivedClassNamespace) 41 | 42 | classParents = {} 43 | action = True 44 | while action: 45 | action = False 46 | for k in list(classesPotentialParents.keys()): 47 | if len(classesPotentialParents[k]) == 1: 48 | classParents[k] = list(classesPotentialParents[k])[0] 49 | del classesPotentialParents[k] 50 | action = True 51 | 52 | else: 53 | potentialParents = classesPotentialParents[k] 54 | for potentialParent in list(potentialParents): 55 | if potentialParent not in classParents: 56 | break 57 | if classParents[potentialParent] in potentialParents: 58 | potentialParents.remove(classParents[potentialParent]) 59 | action = True 60 | 61 | from pprint import pprint 62 | pprint(classesPotentialParents) 63 | 64 | jsystem = getNamespace(glb, "JSystem") 65 | operatorNew = symdb.getSymbol("operator_new", jsystem) 66 | libc = getNamespace(glb, "MSL_C.PPCEABI.bare.H") 67 | strcmp = symdb.getSymbol("strcmp", libc) 68 | 69 | sizes = {} 70 | names = {} 71 | for classNamespace in exploredNamespaces: 72 | constructors = symdb.getSymbols(classNamespace.name, classNamespace) 73 | vts = symdb.getSymbols(u"__vt", classNamespace) 74 | for sym in constructors+vts: 75 | for ref in getReferencesTo(sym.address): 76 | #if ref.referenceType != FlowType.UNCONDITIONAL_CALL: continue 77 | callConstructorInst = getInstructionAt(ref.fromAddress) 78 | 79 | inst = callConstructorInst.getPrevious() 80 | for i in range(10): 81 | if inst.getFlowType().isCall() and inst.getNumOperands() == 1 and inst.getOpObjects(0)[0] == operatorNew.address: 82 | break 83 | inst = inst.getPrevious() 84 | if not inst.getFlowType().isCall() or inst.getNumOperands() != 1 or inst.getOpObjects(0)[0] != operatorNew.address: 85 | continue 86 | callNewInst = inst 87 | 88 | inst = callNewInst.getPrevious() 89 | for i in range(2): 90 | if inst.mnemonicString == u'li': break 91 | inst = inst.getPrevious() 92 | if inst.mnemonicString != u'li': continue 93 | mallocSizeInst = inst 94 | 95 | name = classNamespace.getName(True) 96 | size = int(mallocSizeInst.getScalar(1).value) 97 | assert size is not None 98 | if name in sizes: 99 | sizes[name] = min(size, sizes[name]) 100 | else: 101 | sizes[name] = size 102 | 103 | 104 | inst = mallocSizeInst.getPrevious() 105 | for i in range(6): 106 | if inst.getFlowType().isCall(): 107 | break 108 | inst = inst.getPrevious() 109 | if not inst.getFlowType().isCall() or inst.getNumOperands() != 1 or inst.getOpObjects(0)[0] != strcmp.address: 110 | continue 111 | callStrcmpInst = inst 112 | 113 | inst = callStrcmpInst.getPrevious() 114 | for i in range(4): 115 | refs = getReferencesFrom(inst.address) 116 | if len(refs) == 1: 117 | break 118 | inst = inst.getPrevious() 119 | if len(refs) != 1: 120 | print "Couldn't find refs starting from", callStrcmpInst.address 121 | continue 122 | ref = refs[0] 123 | #if ref.referenceType != FlowType.DATA: 124 | # print "Ref not data at", ref 125 | # continue 126 | data = getDataAt(ref.getToAddress()) 127 | if data is None or data.dataType.name != u'string': 128 | print "data not string at", data 129 | continue 130 | instName = data.getValue() 131 | if name in names: 132 | names[name].add(instName) 133 | else: 134 | names[name] = set([instName]) 135 | 136 | import weakref 137 | 138 | class ClassObj: 139 | def __init__(self, name): 140 | self.name = name 141 | self.children = set() 142 | self.size = sizes.get(name, None) 143 | self.names = names.get(name, set()) 144 | def get(self, name): 145 | if self.name == name: 146 | return self 147 | else: 148 | for child in self.children: 149 | o = child.get(name) 150 | if o: return o 151 | def p(self, indent=0, parent=None, doPrint=False): 152 | if self.name == u'TMapObjBase': doPrint = True 153 | #print ' '*indent+self.name, '' if self.size is None else hex(self.size), u', '.join(self.names) 154 | if doPrint: 155 | for n in self.names: 156 | print " case "+repr(str(n))+":" 157 | #if len(self.names): 158 | # print "@register(%s)"%(", ".join(map(repr, map(str, self.names)))) 159 | #print "class", self.name, 160 | #if parent: 161 | # print "("+parent.name+")", 162 | # if self.size != parent.size: 163 | # print ":" 164 | # else: 165 | # print ": pass" 166 | #print 167 | for child in sorted(self.children, cmp=lambda x,y: cmp(x.name, y.name) if len(x.children) == len(y.children) else cmp(len(x.children), len(y.children))): 168 | child.p(indent+1, self, doPrint) 169 | #print 170 | def __repr__(self): 171 | return '%s(%d)'%(self.name, self.size) 172 | def cleanNames(self): 173 | for child in self.children: 174 | self.names.difference_update(child.names) 175 | child.cleanNames() 176 | 177 | root = ClassObj(nameRef.getName(True)) 178 | 179 | action = True 180 | while action:#len(classParents) > 0: 181 | action = False 182 | for derivedName, parentName in classParents.items(): 183 | parent = root.get(parentName) 184 | if parent is not None: 185 | o = ClassObj(derivedName) 186 | if o.size is not None and parent.size is not None: parent.size = min(o.size, parent.size) 187 | parent.children.add(o) 188 | del classParents[derivedName] 189 | action = True 190 | 191 | root.cleanNames() 192 | pprint(classParents) 193 | root.p() 194 | 195 | def setupStructs(k, parentDt=None): 196 | dt = currentProgram.getDataTypeManager().getDataType("boot.dol/Demangler/"+k.name.replace("::", "/")) 197 | if dt is not None and dt.isNotYetDefined() and k.size is not None and (parentDt is None or parentDt.getLength() <= k.size): 198 | dt.setDescription("generated from classes.py") 199 | if parentDt is not None: 200 | dt.add(parentDt, 0, "_base", "") 201 | assert dt.getLength() == parentDt.getLength() 202 | assert dt.getLength() > 0 203 | dt.growStructure(k.size-dt.getLength()) 204 | assert dt.getLength() == k.size 205 | print "Set up", k.name 206 | for child in k.children: 207 | setupStructs(child, dt) 208 | 209 | #setupStructs(root) 210 | -------------------------------------------------------------------------------- /scene2vmf.py: -------------------------------------------------------------------------------- 1 | from scenebin import * 2 | import sys, pathlib 3 | 4 | def bmd2vmfcoords(x, y, z, rx, ry, rz): 5 | return -x, z, y, -rx, rz, ry 6 | 7 | argpath = pathlib.Path(sys.argv[1]) 8 | if argpath.is_dir(): 9 | if argpath.name == "map": 10 | scenedirpath = argpath.parent 11 | scenebinpath = argpath / "scene.bin" 12 | else: 13 | scenedirpath = argpath 14 | scenebinpath = scenedirpath / "map" / "scene.bin" 15 | else: 16 | scenedirpath = argpath.parents[1] 17 | scenebinpath = argpath 18 | 19 | scenename = scenedirpath.name 20 | 21 | scene = readsection(open(scenebinpath, 'rb')) 22 | 23 | for o in scene.objects: 24 | if o.namehash == 0x3c2e: # MarScene 25 | marScene = o 26 | break 27 | 28 | vmfout = open(sys.argv[1][:sys.argv[1].rfind('.')]+".vmf", 'w') 29 | vmfout.write("""versioninfo 30 | { 31 | "editorversion" "400" 32 | "editorbuild" "5439" 33 | "mapversion" "1" 34 | "formatversion" "100" 35 | "prefab" "0" 36 | } 37 | visgroups 38 | { 39 | } 40 | viewsettings 41 | { 42 | "bSnapToGrid" "1" 43 | "bShowGrid" "1" 44 | "bShowLogicalGrid" "0" 45 | "nGridSpacing" "64" 46 | "bShow3DGrid" "0" 47 | } 48 | world 49 | { 50 | "id" "1" 51 | "mapversion" "1" 52 | "classname" "worldspawn" 53 | "skyname" "sky_day01_01" 54 | "maxpropscreenwidth" "-1" 55 | "detailvbsp" "detail.vbsp" 56 | "detailmaterial" "detail/detailsprites" 57 | } 58 | """) 59 | 60 | for o in marScene.objects: 61 | if o.namehash == 0x4746: # LightAry 62 | for o2 in o.objects: 63 | assert o2.namehash == 0x286a # Light 64 | x, y, z, rx, ry, rz = bmd2vmfcoords(o2.x, o2.y, o2.z, 0, 0, 0) 65 | vmfout.write("""entity 66 | { 67 | "id" "%d" 68 | "classname" "light" 69 | "_light" "%d %d %d 200" 70 | "_lightHDR" "-1 -1 -1 1" 71 | "_lightscaleHDR" "1" 72 | "_constant_attn" "1" 73 | "origin" "%r %r %r" 74 | } 75 | """%(o2.deschash, o2.r,o2.g,o2.b, x,y,z)) 76 | if o.namehash == 0xabc3: # Strategy 77 | strategy = o 78 | 79 | for group in strategy.objects: 80 | assert group.namehash == 0x2682 81 | for o in group.objects: 82 | if o.namehash in {0xc5d,0xfebc,0xf3d9,0x2639,0x372c}: 83 | # MapStaticObj, MapObjBase, MapObjGeneral, Fence, WindmillRoof 84 | x, y, z, rx, ry, rz = bmd2vmfcoords(o.x, o.y, o.z, o.rx, o.ry, o.rz) 85 | vmfout.write("""entity 86 | { 87 | "id" "%d" 88 | "classname" "prop_static" // %s 89 | "origin" "%r %r %r" 90 | "angles" "%r %r %r" 91 | "model" "models/bianco2/%s.mdl" 92 | } 93 | """%(o.deschash, o.name, x,y,z, rx,ry,rz, o.model)) 94 | elif o.namehash in {0xd8a,0x74e8,0xfba0,0x4a81,0xf591,0x31ae,0xd73c,0xf58f,0xc6c0,0x3887}: 95 | # Palm, BananaTree, RiccoLog, BigWindmill, BiaBell, BiaWatermill, LeafBoat, BiaTurnBridge, MiniWindmill, BellWatermill, BiaWatermillVertical 96 | x, y, z, rx, ry, rz = bmd2vmfcoords(o.x, o.y, o.z, o.rx, o.ry, o.rz) 97 | vmfout.write("""entity 98 | { 99 | "id" "%d" 100 | "classname" "prop_physics_multiplayer" // %s 101 | "origin" "%r %r %r" 102 | "angles" "%r %r %r" 103 | "model" "models/bianco2/%s.mdl" 104 | } 105 | """%(o.deschash, o.name, x,y,z, rx,ry,rz, o.model)) 106 | elif o.namehash == 0x574e: 107 | # MarioPositionObj 108 | for spawn in o.spawns: 109 | x, y, z, rx, ry, rz = bmd2vmfcoords(spawn.x, spawn.y, spawn.z, spawn.rx, spawn.ry, spawn.rz) 110 | vmfout.write("""entity 111 | { 112 | "id" "%d" 113 | "classname" "info_player_start" 114 | "angles" "%r %r %r" 115 | "origin" "%r %r %r" 116 | } 117 | """%(o.deschash, rx,ry,rz, x,y,z)) 118 | elif o.namehash == 0x6db4: 119 | # FlowerCoin 120 | x, y, z, rx, ry, rz = bmd2vmfcoords(o.x, o.y, o.z, o.rx, o.ry, o.rz) 121 | vmfout.write("""entity 122 | { 123 | "id" "%d" 124 | "classname" "prop_detail" // %s 125 | "origin" "%r %r %r" 126 | "angles" "%r %r %r" 127 | "model" "models/bianco2/%s.mdl" 128 | } 129 | """%(o.deschash, o.name, x,y,z, rx,ry,rz, o.model)) 130 | elif o.namehash == 0xa3d9: 131 | # SunModel --> env_sun 132 | x, y, z, rx, ry, rz = bmd2vmfcoords(o.x, o.y, o.z, o.rx, o.ry, o.rz) 133 | vmfout.write("""entity 134 | { 135 | "id" "%d" 136 | "classname" "env_sun" 137 | "angles" "%r %r %r" 138 | "HDRColorScale" "1.0" 139 | "material" "sprites/light_glow02_add_noz" 140 | "overlaycolor" "0 0 0" 141 | "overlaymaterial" "sprites/light_glow02_add_noz" 142 | "overlaysize" "-1" 143 | "rendercolor" "100 80 80" 144 | "size" "16" 145 | "origin" "%r %r %r" 146 | } 147 | """%(o.deschash, rx,ry,rz, x,y,z)) 148 | elif o.namehash == 0x2844: 149 | # Mario -> info_player_start 150 | x, y, z, rx, ry, rz = bmd2vmfcoords(o.x, o.y, o.z, o.rx, o.ry, o.rz) 151 | vmfout.write("""entity 152 | { 153 | "id" "%d" 154 | "classname" "info_player_start" 155 | "angles" "%r %r %r" 156 | "origin" "%r %r %r" 157 | "spawnflags" "1" // Master 158 | } 159 | """%(o.deschash, rx,ry,rz, x,y,z)) 160 | elif o.namehash in ():#(0xcad9, 0xee83): 161 | # AnimalBird, AnimalMew -> npc_seagull 162 | x, y, z, rx, ry, rz = bmd2vmfcoords(o.x, o.y, o.z, o.rx, o.ry, o.rz) 163 | vmfout.write("""entity 164 | { 165 | "id" "%d" 166 | "classname" "npc_seagull" 167 | "angles" "%r %r %r" 168 | "physdamagescale" "1.0" 169 | "renderamt" "255" 170 | "rendercolor" "255 255 255" 171 | "spawnflags" "516" 172 | "origin" "%r %r %r" 173 | } 174 | """%(o.deschash, rx,ry,rz, x,y,z)) 175 | elif o.namehash in ():#== 0x5cf: 176 | # EffectFire -> env_fire 177 | x, y, z, rx, ry, rz = bmd2vmfcoords(o.x, o.y, o.z, o.rx, o.ry, o.rz) 178 | vmfout.write("""entity 179 | { 180 | "id" "%d" 181 | "classname" "env_fire" 182 | "angles" "%r %r %r" 183 | "origin" "%r %r %r" 184 | } 185 | """%(o.deschash, rx,ry,rz, x,y,z)) 186 | elif o.namehash == 0x6133: 187 | x, y, z, rx, ry, rz = bmd2vmfcoords(o.x, o.y, o.z, o.rx, o.ry, o.rz) 188 | # MapObjChangeStage -> trigger_changelevel 189 | # assume a unit cube 190 | ax, ay, az = o.sx/2, o.sy/2, o.sz/2 191 | bx, by, bz = -o.sx/2, -o.sy/2, -o.sz/2 192 | #import transformations, numpy 193 | #Ma = transformations.compose_matrix(angles=(rx, ry, rz), translate=(ax, ay, az)) 194 | #Ma = numpy.dot(Ma, transformations.compose_matrix(translate=(x,y,z))) 195 | #Mb = transformations.compose_matrix(angles=(rx, ry, rz), translate=(bx, by, bz)) 196 | #Mb = numpy.dot(Mb, transformations.compose_matrix(translate=(x,y,z))) 197 | #ax, ay, az = transformations.translation_from_matrix(Ma) 198 | #bx, by, bz = transformations.translation_from_matrix(Mb) 199 | vmfout.write("""entity 200 | { 201 | "id" "%d" 202 | "classname" "trigger_changelevel" 203 | "spawnflags" "0" 204 | "StartDisabled" "0" 205 | "angles" "%r %r %r" 206 | "origin" "%r %r %r" 207 | solid 208 | { 209 | "id" "%d" 210 | side 211 | { 212 | "id" "%d" 213 | "plane" "(%r %r %r) (%r %r %r) (%r %r %r)" 214 | "material" "TOOLS/TOOLSTRIGGER" 215 | "uaxis" "[1 0 0 0] 0.25" 216 | "vaxis" "[0 -1 0 0] 0.25" 217 | "rotation" "0" 218 | "lightmapscale" "16" 219 | "smoothing_groups" "0" 220 | } 221 | side 222 | { 223 | "id" "%d" 224 | "plane" "(%r %r %r) (%r %r %r) (%r %r %r)" 225 | "material" "TOOLS/TOOLSTRIGGER" 226 | "uaxis" "[1 0 0 0] 0.25" 227 | "vaxis" "[0 -1 0 0] 0.25" 228 | "rotation" "0" 229 | "lightmapscale" "16" 230 | "smoothing_groups" "0" 231 | } 232 | side 233 | { 234 | "id" "%d" 235 | "plane" "(%r %r %r) (%r %r %r) (%r %r %r)" 236 | "material" "TOOLS/TOOLSTRIGGER" 237 | "uaxis" "[0 1 0 0] 0.25" 238 | "vaxis" "[0 0 -1 0] 0.25" 239 | "rotation" "0" 240 | "lightmapscale" "16" 241 | "smoothing_groups" "0" 242 | } 243 | side 244 | { 245 | "id" "%d" 246 | "plane" "(%r %r %r) (%r %r %r) (%r %r %r)" 247 | "material" "TOOLS/TOOLSTRIGGER" 248 | "uaxis" "[0 1 0 0] 0.25" 249 | "vaxis" "[0 0 -1 0] 0.25" 250 | "rotation" "0" 251 | "lightmapscale" "16" 252 | "smoothing_groups" "0" 253 | } 254 | side 255 | { 256 | "id" "%d" 257 | "plane" "(%r %r %r) (%r %r %r) (%r %r %r)" 258 | "material" "TOOLS/TOOLSTRIGGER" 259 | "uaxis" "[1 0 0 0] 0.25" 260 | "vaxis" "[0 0 -1 0] 0.25" 261 | "rotation" "0" 262 | "lightmapscale" "16" 263 | "smoothing_groups" "0" 264 | } 265 | side 266 | { 267 | "id" "%d" 268 | "plane" "(%r %r %r) (%r %r %r) (%r %r %r)" 269 | "material" "TOOLS/TOOLSTRIGGER" 270 | "uaxis" "[1 0 0 0] 0.25" 271 | "vaxis" "[0 0 -1 0] 0.25" 272 | "rotation" "0" 273 | "lightmapscale" "16" 274 | "smoothing_groups" "0" 275 | } 276 | } 277 | } 278 | """%(o.deschash, rx,ry,rz, x,y,z, o.deschash+1, o.deschash+2,bx,ay,az,ax,ay,az,ax,by,az, o.deschash+3,bx,by,bz,ax,by,bz,ax,ay,bz, o.deschash+4,bx,ay,az,bx,by,az,bx,by,bz, o.deschash+5,ax,ay,bz,ax,by,bz,ax,by,az, o.deschash+6,ax,ay,az,bx,ay,az,bx,ay,bz, o.deschash+7,ax,by,bz,bx,by,bz,bx,by,az)) 279 | elif hasattr(o, "rx"): 280 | x, y, z, rx, ry, rz = bmd2vmfcoords(o.x, o.y, o.z, o.rx, o.ry, o.rz) 281 | vmfout.write("""entity 282 | { 283 | "id" "%d" 284 | "classname" "info_null" 285 | "angles" "%r %r %r" 286 | "origin" "%r %r %r" 287 | //"scale" "%r %r %r" 288 | "comments" "%s" 289 | } 290 | """%(o.deschash, rx,ry,rz, x,y,z, o.sx,o.sy,o.sz, o.name)) 291 | 292 | vmfout.write("""cameras 293 | { 294 | "activecamera" "0" 295 | camera 296 | { 297 | "position" "[126.401 356.961 598.126]" 298 | "look" "[-88.097 -1817.03 -831.39]" 299 | } 300 | } 301 | """) 302 | vmfout.close() 303 | 304 | -------------------------------------------------------------------------------- /bck.py: -------------------------------------------------------------------------------- 1 | from warnings import warn 2 | import sys 3 | from common import * 4 | from struct import unpack, pack, Struct, error as StructError 5 | from warnings import warn 6 | from array import array 7 | from enum import Enum 8 | import math 9 | from bisect import bisect 10 | 11 | def convRotation(rots, scale): 12 | for r in rots: 13 | r.value *= scale 14 | r.tangentIn *= scale 15 | r.tangentOut *= scale 16 | 17 | class LoopMode(Enum): 18 | ONCE = 0 19 | ONCE_AND_RESET = 1 20 | REPEAT = 2 21 | MIRRORED_ONCE = 3 22 | MIRRORED_REPEAT = 4 23 | 24 | class J3DAnmTransformKeyData(Section): 25 | header = Struct('>BBHHHHHIIII') 26 | fields = [ 27 | ('loopMode', LoopMode), 'angleMultiplier', 'animationLength', 28 | 'numJoints', 'scaleCount', 'rotCount', 'transCount', 29 | 'offsetToJoints', 'offsetToScales', 'offsetToRots', 'offsetToTrans' 30 | ] 31 | def read(self, fin, start, size): 32 | super().read(fin, start, size) 33 | scales = array('f') 34 | fin.seek(start+self.offsetToScales) 35 | scales.fromfile(fin, self.scaleCount) 36 | if sys.byteorder == 'little': scales.byteswap() 37 | 38 | rotations = array('h') 39 | fin.seek(start+self.offsetToRots) 40 | rotations.fromfile(fin, self.rotCount) 41 | if sys.byteorder == 'little': rotations.byteswap() 42 | 43 | translations = array('f') 44 | fin.seek(start+self.offsetToTrans) 45 | translations.fromfile(fin, self.transCount) 46 | if sys.byteorder == 'little': translations.byteswap() 47 | 48 | rotationScale = (1<HHH') 172 | fields = ["count", "index", ("tangent", TangentType)] 173 | 174 | class Key(object): 175 | time: float 176 | value: float 177 | tangentIn: float 178 | tangentOut: float 179 | 180 | class Animation(object): 181 | scalesX: list[Key] 182 | scalesY: list[Key] 183 | scalesZ: list[Key] 184 | rotationsX: list[Key] 185 | rotationsY: list[Key] 186 | rotationsZ: list[Key] 187 | translationsX: list[Key] 188 | translationsY: list[Key] 189 | translationsZ: list[Key] 190 | 191 | def readComp(src, index): 192 | dst = [None]*index.count 193 | 194 | if index.count <= 0: 195 | warn("bck1: readComp(): count is <= 0") 196 | return 197 | elif index.count == 1: 198 | k = Key() 199 | k.time = 0 200 | k.value = src[index.index] 201 | k.tangentIn = 0 202 | k.tangentOut = 0 203 | dst[0] = k 204 | else: 205 | sz = {TangentType.In: 3, TangentType.InOut: 4}[index.tangent] 206 | for j in range(index.count): 207 | k = Key() 208 | k.time = src[index.index + sz*j] 209 | k.value = src[index.index + sz*j + 1] 210 | k.tangentIn = src[index.index + sz*j + 2] 211 | if index.tangent == TangentType.InOut: 212 | k.tangentOut = src[index.index + sz*j + 3] 213 | else: 214 | k.tangentOut = k.tangentIn 215 | dst[j] = k 216 | dst.sort(key=lambda a: a.time) 217 | 218 | return dst 219 | 220 | def addComp(idx: AnimIndex, keys: list[Key], out, scale=1.0, cnv=float): 221 | idx.count = len(keys) 222 | tangentSimple = all([key.tangentIn == key.tangentOut for key in keys]) 223 | idx.tangent = TangentType.In if tangentSimple else TangentType.InOut 224 | if idx.count == 1: 225 | values = [cnv(key.value/scale) for key in keys] 226 | elif tangentSimple: 227 | values = [cnv(v) for key in keys for v in (key.time, key.value/scale, key.tangentIn/scale)] 228 | else: 229 | values = [cnv(v) for key in keys for v in (key.time, key.value/scale, key.tangentIn/scale, key.tangentOut/scale)] 230 | idx.index = arrayStringSearch(out, values) 231 | if idx.index is None: 232 | idx.index = len(out) 233 | out.extend(values) 234 | 235 | def getPointCubic(cf, t): 236 | return ((cf[0] * t + cf[1]) * t + cf[2]) * t + cf[3] 237 | 238 | def getDerivativeCubic(cf, t): 239 | return (3 * cf[0] * t + 2 * cf[1]) * t + cf[2] 240 | 241 | def getCoeffHermite(p0, p1, s0, s1): 242 | return ( 243 | (p0 * 2) + (p1 * -2) + (s0 * 1) + (s1 * 1), 244 | (p0 * -3) + (p1 * 3) + (s0 * -2) + (s1 * -1), 245 | (p0 * 0) + (p1 * 0) + (s0 * 1) + (s1 * 0), 246 | (p0 * 1) + (p1 * 0) + (s0 * 0) + (s1 * 0) 247 | ) 248 | 249 | def getPointHermite(p0, p1, s0, s1, t): 250 | coeff = getCoeffHermite(p0, p1, s0, s1) 251 | return getPointCubic(coeff, t) 252 | 253 | def getDerivativeHermite(p0, p1, s0, s1, t): 254 | coeff = getCoeffHermite(p0, p1, s0, s1) 255 | return getDerivativeCubic(coeff, t) 256 | 257 | def hermiteInterpolate(k0, k1, frame, tangents=False): 258 | length = k1.time - k0.time 259 | t = (frame - k0.time) / length 260 | p0 = k0.value 261 | p1 = k1.value 262 | s0 = k0.tangentOut * length 263 | s1 = k1.tangentIn * length 264 | if tangents: 265 | return getPointHermite(p0, p1, s0, s1, t), getDerivativeHermite(p0, p1, s0, s1, t) 266 | else: 267 | return getPointHermite(p0, p1, s0, s1, t) 268 | 269 | def animateSingle(time, keyList, tangents=False): 270 | timeList = [key.time for key in keyList] 271 | i = bisect(timeList, time) 272 | if i <= 0: 273 | # the time is before any keys 274 | if tangents: 275 | return keyList[0].value, keyList[0].tangentIn 276 | else: 277 | return keyList[0].value 278 | elif i >= len(keyList): 279 | # the time is after all keys 280 | if tangents: 281 | return keyList[-1].value, keyList[0].tangentOut 282 | else: 283 | return keyList[-1].value 284 | else: 285 | keyBefore = keyList[i-1] 286 | keyAfter = keyList[i] 287 | return hermiteInterpolate(keyBefore, keyAfter, time, tangents) 288 | 289 | def animate(time, keyListSet, tangents=False): 290 | return (animateSingle(time, keyList, tangents) for keyList in keyListSet) 291 | 292 | -------------------------------------------------------------------------------- /bck_blender.py: -------------------------------------------------------------------------------- 1 | bl_info = { 2 | "name": "Import BCK", 3 | "author": "Spencer Alves", 4 | "version": (1,0,0), 5 | "blender": (2, 80, 0), 6 | "location": "Import", 7 | "description": "Import J3D BCK animation", 8 | "warning": "", 9 | "wiki_url": "", 10 | "tracker_url": "", 11 | "category": "Import-Export"} 12 | 13 | # ImportHelper is a helper class, defines filename and 14 | # invoke() function which calls the file selector. 15 | from bpy_extras.io_utils import ImportHelper 16 | from bpy.props import StringProperty, BoolProperty, EnumProperty 17 | from bpy.types import Operator 18 | import bpy 19 | from mathutils import * 20 | import mathutils.geometry 21 | import os 22 | from bck import * 23 | 24 | def doCurve(action, data_path, loopMode, animationLength, data): 25 | for i, subData in enumerate(data): 26 | curve = action.fcurves.new(data_path=data_path, index=i) 27 | 28 | if loopMode == LoopMode.ONCE: 29 | pass 30 | elif loopMode == LoopMode.ONCE_AND_RESET: 31 | repeat = curve.modifiers.new('CYCLES') 32 | repeat.mode_before = 'NONE' 33 | repeat.mode_after = 'REPEAT' 34 | repeat.cycles_after = 1 35 | limit = curve.modifiers.new('LIMITS') 36 | limit.use_max_x = True 37 | limit.max_x = animationLength 38 | elif loopMode == LoopMode.REPEAT: 39 | repeat = curve.modifiers.new('CYCLES') 40 | repeat.mode_before = 'NONE' 41 | repeat.mode_after = 'REPEAT' 42 | repeat.cycles_after = 0 43 | elif loopMode == LoopMode.MIRRORED_ONCE: 44 | repeat = curve.modifiers.new('CYCLES') 45 | repeat.mode_before = 'NONE' 46 | repeat.mode_after = 'MIRROR' 47 | repeat.cycles_after = 2 48 | limit = curve.modifiers.new('LIMITS') 49 | limit.use_max_x = True 50 | limit.max_x = animationLength*2 51 | elif loopMode == LoopMode.MIRRORED_REPEAT: 52 | repeat = curve.modifiers.new('CYCLES') 53 | repeat.mode_before = 'NONE' 54 | repeat.mode_after = 'MIRROR' 55 | repeat.cycles_after = 0 56 | 57 | curve.keyframe_points.add(len(subData)) 58 | lastKey = lastKeyPoint = None 59 | for key_point, key in zip(curve.keyframe_points, subData): 60 | key_point.co = Vector((key.time, key.value)) 61 | key_point.interpolation = 'LINEAR'#"BEZIER" # TODO add back after I figure out how to transform the handle 62 | 63 | deltaTime = 0.0 if lastKey is None else key.time-lastKey.time 64 | #key_point.handle_left = Vector((-1.0, -key.tangent))*deltaTime+key_point.co 65 | #key_point.handle_left_type = 'ALIGNED' 66 | 67 | #if lastKeyPoint is not None: 68 | # lastKeyPoint.handle_right = Vector((1.0, lastKey.tangent))*deltaTime+lastKeyPoint.co 69 | # lastKeyPoint.handle_right_type = 'ALIGNED' 70 | 71 | lastKeyPoint = key_point 72 | lastKey = key 73 | 74 | #lastKeyPoint.handle_right = lastKeyPoint.co 75 | #lastKeyPoint.handle_right_type = 'ALIGNED' 76 | 77 | def importFile(filepath, context): 78 | fin = open(filepath, 'rb') 79 | print("Reading", filepath) 80 | bck = Bck() 81 | bck.name = os.path.splitext(os.path.split(filepath)[-1])[0] 82 | bck.read(fin) 83 | fin.close() 84 | 85 | armObj = context.active_object 86 | assert armObj is not None 87 | assert armObj.type == 'ARMATURE' 88 | if len(armObj.data.bones) != len(bck.ank1.anims): 89 | context.window_manager.popup_menu(lambda self, context: self.layout.label(text="%d bones required (given %d)"%(len(bck.ank1.anims), len(armObj.data.bones))), 90 | title="Incompatible armature", icon='ERROR') 91 | return 92 | 93 | print("Importing", filepath) 94 | for b in armObj.pose.bones: 95 | b.rotation_mode = "XYZ" 96 | arm = armObj.data 97 | 98 | armObj.animation_data_create() 99 | action = bpy.data.actions.new(name=bck.name) 100 | armObj.animation_data.action = action 101 | 102 | for i, anim in enumerate(bck.ank1.anims): 103 | bone = arm.bones[i] 104 | 105 | # OKAY SO 106 | # Here's a problem. 107 | # BMD stores bone transformation relative to parent. 108 | # BCK stores pose transformation relative to posed parent. 109 | # Blender pose bones store transformation relative to *their own rest pose* (i.e., the edit bone) 110 | # So, we can just divide it out. Simple, right? 111 | # Not quite. 112 | # Pos/rot/scale are keyed separately, so we can't just compose matrix -> re-transform -> add key. 113 | # Pos/rot are inter-dependent, so we can't just compose a matrix out of one or the other. # XXX scratch that seems to work fine 114 | # Individual components of pos/rot/scale are inter-dependent, too 115 | # Pos/rot/scale keys can all be on separate frames, so we can't just grab the nearest one and compose a matrix from that. 116 | # Even then, Blender can't key by full matrix (that'd be silly anyway), so the full matrix has to be decomposed. 117 | # So the strategy is: 118 | # for each bone: 119 | # for each component keyframe: 120 | # figure out what data it's driving 121 | # evaluate the animation for that data at the keyframe time 122 | # make a matrix out of it 123 | # divide out the matrix 124 | # decompose the matrix (and pray that it's somewhat sane) 125 | # index into the decomposed to find the component this keyframe is for 126 | # make a key with the decomposed component at the current time 127 | # then, take that NEW list of re-transformed keyframes, and add them to the animation. 128 | 129 | # get the bone rest pose from the edit bone 130 | rest = bone.matrix_local 131 | # edit bone doesn't have a scale, so it grab it from the imported BMD, if there was one 132 | if '_bmd_rest_scale' in bone: 133 | s = Matrix() 134 | scale = tuple(map(float, bone['_bmd_rest_scale'].split(','))) 135 | s[0][0] = scale[0] 136 | s[1][1] = scale[1] 137 | s[2][2] = scale[2] 138 | rest = rest@s 139 | # adjust for bone placement 140 | rest = rest@Matrix(((0,0,1,0),(1,0,0,0),(0,1,0,0),(0,0,0,1))) 141 | # from armature-relative to parent-relative 142 | if bone.parent: 143 | parent = bone.parent.matrix_local 144 | parent = parent@Matrix(((0,0,1,0),(1,0,0,0),(0,1,0,0),(0,0,0,1))) 145 | rest = parent.inverted()@rest 146 | 147 | # big table of transformation components so that we can index them easily 148 | animList = (anim.scalesX, anim.scalesY, anim.scalesZ, 149 | anim.rotationsX, anim.rotationsY, anim.rotationsZ, 150 | anim.translationsX, anim.translationsY, anim.translationsZ) 151 | newAnim = tuple([None]*len(animData) for animData in animList) 152 | 153 | for animDataIndex, (animData, newAnimData) in enumerate(zip(animList, newAnim)): 154 | lastRot = None 155 | axisIndex = animDataIndex%3 156 | for animDataSubIndex, key in enumerate(animData): 157 | if 0: 158 | # animate the whole stack - not needed? 159 | scale = animate(key.time, animList[0:3]) 160 | rotation = animate(key.time, animList[3:6]) 161 | translation = animate(key.time, animList[6:9]) 162 | 163 | t = Matrix.Translation(translation).to_4x4() 164 | r = Euler(rotation).to_matrix().to_4x4() 165 | s = Matrix() 166 | scale = tuple(scale) 167 | s[0][0] = scale[0] 168 | s[1][1] = scale[1] 169 | s[2][2] = scale[2] 170 | mat = t@r@s 171 | else: 172 | if animDataIndex < 3: 173 | # can't just use this component 174 | #mat = Matrix() 175 | #mat[axisIndex][axisIndex] = key.value 176 | #print("XYZ"[axisIndex], "scale =", key.value) 177 | #print("Animated scale", tuple(animate(key.time, animList[0:3]))) 178 | scale = animate(key.time, animList[0:3]) 179 | mat = Matrix() 180 | scale = tuple(scale) 181 | mat[0][0] = scale[0] 182 | mat[1][1] = scale[1] 183 | mat[2][2] = scale[2] 184 | elif animDataIndex < 6: 185 | #e = Euler() 186 | #e[axisIndex] = key.value 187 | #mat = e.to_matrix().to_4x4() 188 | #print("XYZ"[axisIndex], "rotation =", key.value) 189 | #print("Animated rotation", tuple(animate(key.time, animList[3:6]))) 190 | rotation = animate(key.time, animList[3:6]) 191 | mat = Euler(rotation).to_matrix().to_4x4() 192 | else: 193 | #v = Vector() 194 | #v[axisIndex] = key.value 195 | #mat = Matrix.Translation(v).to_4x4() 196 | #print("XYZ"[axisIndex], "translation =", key.value) 197 | #print("Animated translation", tuple(animate(key.time, animList[6:9]))) 198 | translation = animate(key.time, animList[6:9]) 199 | mat = Matrix.Translation(translation).to_4x4() 200 | 201 | # here's where the magic happens 202 | mat = rest.inverted()@mat 203 | 204 | # from X-pointing to Y-pointing 205 | mat = Matrix((mat[2].zxyw, mat[0].zxyw, mat[1].zxyw, mat[3].zxyw)) 206 | 207 | # decompose the new matrix 208 | newLoc, newRot, newScale = mat.decompose() 209 | # euler-ize the rotation - that's what we were given in the first place, anyway 210 | newRot = newRot.to_euler('XYZ') if lastRot is None else newRot.to_euler('XYZ', lastRot) 211 | lastRot = newRot 212 | # put it into a big table 213 | newData = newScale[:]+newRot[:]+newLoc[:] 214 | 215 | newKey = Key() 216 | newAnimData[animDataSubIndex] = newKey 217 | newKey.time = key.time 218 | # now get the component that this key was originally for 219 | newKey.value = newData[animDataIndex] 220 | 221 | # Downside of this whole process is that there's no direct analog to transform the bezier handles. 222 | # TODO: Could probably get a good estimate by adding the tangent to the data, re-do the matrix undo, and subtract the undid data 223 | 224 | bone_path = 'pose.bones["%s"]' % bone.name 225 | 226 | doCurve(action, bone_path+'.scale', bck.ank1.loopMode, bck.ank1.animationLength, newAnim[0:3]) 227 | doCurve(action, bone_path+'.rotation_euler', bck.ank1.loopMode, bck.ank1.animationLength, newAnim[3:6]) 228 | doCurve(action, bone_path+'.location', bck.ank1.loopMode, bck.ank1.animationLength, newAnim[6:9]) 229 | 230 | # TODO: Shouldn't affect the scene state 231 | context.scene.frame_start = 0 232 | context.scene.frame_end = bck.ank1.animationLength 233 | context.scene.render.fps = 60 234 | context.scene.render.fps_base = 1.0 235 | 236 | class ImportBCK(Operator, ImportHelper): 237 | bl_idname = "import_anim.bck" # important since its how bpy.ops.import_test.some_data is constructed 238 | bl_label = "Import BCK" 239 | 240 | # ImportHelper mixin class uses this 241 | filename_ext = ".bck" 242 | 243 | filter_glob: StringProperty( 244 | default="*.bck", 245 | options={'HIDDEN'}, 246 | ) 247 | 248 | def execute(self, context): 249 | if context.active_object.type != "ARMATURE": 250 | context.window_manager.popup_menu(lambda self, context: None, 251 | title="Select an armature to animate!", icon='ERROR') 252 | return {'CANCELED'} 253 | importFile(self.filepath, context) 254 | return {'FINISHED'} 255 | 256 | # Only needed if you want to add into a dynamic menu 257 | def menu_func_import(self, context): 258 | self.layout.operator(ImportBCK.bl_idname, text="Import J3D BCK animation (*.bck)") 259 | 260 | 261 | def register(): 262 | bpy.utils.register_class(ImportBCK) 263 | bpy.types.TOPBAR_MT_file_import.append(menu_func_import) 264 | 265 | 266 | def unregister(): 267 | bpy.utils.unregister_class(ImportBCK) 268 | bpy.types.TOPBAR_MT_file_import.remove(menu_func_import) 269 | 270 | 271 | if __name__ == "__main__": 272 | register() 273 | 274 | # test call 275 | #bpy.ops.import_anim.bck('INVOKE_DEFAULT') 276 | -------------------------------------------------------------------------------- /jpa.py: -------------------------------------------------------------------------------- 1 | # copied from https://github.com/magcius/noclip.website/tree/master/src/Common/JSYSTEM/JPA.ts 2 | 3 | from common import * 4 | from bti import Image 5 | from struct import Struct 6 | from math import pi, log2 7 | from enum import Enum, IntEnum 8 | 9 | class VolumeType(Enum): 10 | Cube = 0x00 11 | Sphere = 0x01 12 | Cylinder = 0x02 13 | Torus = 0x03 14 | Point = 0x04 15 | Circle = 0x05 16 | Line = 0x06 17 | 18 | class JPADynamicsBlock(Section): 19 | header = Struct('>4xffffffhhhBB2xHfhhHHhhHhhhhhhhfffffhhhhII') 20 | fields = [ 21 | 'emitterSclX', 'emitterSclY', 'emitterSclZ', 22 | 'emitterTrsX', 'emitterTrsY', 'emitterTrsZ', 23 | 'emitterRotX', 'emitterRotY', 'emitterRotZ', 24 | ('volumeType', VolumeType), 25 | 'rateStep', 'divNumber', 'rate', '_rateRndm', 26 | 'maxFrame', 'startFrame', 27 | 'volumeSize', '_volumeSweep', '_volumeMinRad', 28 | 'lifeTime', '_lifeTimeRndm', 29 | '_moment', '_momentRndm', 30 | '_initialVelRatio', '_accelRndm', 31 | '_airResist', '_airResistRndm', 32 | 'initialVelOmni', 'initialVelAxis', 33 | 'initialVelRndm', 'initialVelDir', 34 | 'accel', 35 | 'emitterDirX', 'emitterDirY', 'emitterDirZ', 36 | '_spread', 'emitFlags', 'kfa1KeyTypeMask' 37 | ] 38 | def read(self, fin, start, length): 39 | super().read(fin, start, length) 40 | self.emitterScl = (self.emitterSclX, self.emitterSclY, self.emitterSclZ) 41 | self.emitterTrs = (self.emitterTrsX, self.emitterTrsY, self.emitterTrsZ) 42 | self.emitterDir = (self.emitterDirX, self.emitterDirY, self.emitterDirZ) 43 | self.emitterRot = ((self.emitterRotX/0x7FFF)*pi, (self.emitterRotY/0x7FFF)*pi, (self.emitterRotZ/0x7FFF)*pi) 44 | self.volumeSweep = self._volumeSweep / 0x8000 45 | self.volumeMinRad = self._volumeMinRad / 0x8000 46 | self.spread = self._spread / 0x8000 47 | self.rateRndm = self._rateRndm / 0x8000 48 | self.initialVelRatio = self._initialVelRatio / 0x8000 49 | self.lifeTimeRndm = self._lifeTimeRndm / 0x8000 50 | self.airResist = self._airResist / 0x8000 51 | self.airResistRndm = self._airResistRndm / 0x8000 52 | self.moment = self._moment / 0x8000 53 | self.momentRndm = self._momentRndm / 0x8000 54 | self.accelRndm = self._accelRndm / 0x8000 55 | 56 | def write(self, fout): 57 | self.emitterSclX, self.emitterSclY, self.emitterSclZ = self.emitterScl 58 | self.emitterTrsX, self.emitterTrsY, self.emitterTrsZ = self.emitterTrs 59 | self.emitterDirX, self.emitterDirY, self.emitterDirZ = self.emitterDir 60 | self.emitterRotX = int(self.emitterRot[0]*0x7FFF/pi) 61 | self.emitterRotY = int(self.emitterRot[1]*0x7FFF/pi) 62 | self.emitterRotZ = int(self.emitterRot[2]*0x7FFF/pi) 63 | self._volumeSweep = int(self.volumeSweep*0x8000) 64 | self._volumeMinRad = int(self.volumeMinRad*0x8000) 65 | self._spread = int(self.spread*0x8000) 66 | self._rateRndm = int(self.rateRndm*0x8000) 67 | self._initialVelRatio = int(self.initialVelRatio*0x8000) 68 | self._lifeTimeRndm = int(self.lifeTimeRndm*0x8000) 69 | self._airResist = int(self.airResist*0x8000) 70 | self._airResistRndm = int(self.airResistRndm*0x8000) 71 | self._moment = int(self.moment*0x8000) 72 | self._momentRndm = int(self.momentRndm*0x8000) 73 | self._accelRndm = int(self.accelRndm*0x8000) 74 | super().write(fout) 75 | 76 | class ShapeType(Enum): 77 | Point = 0x00 78 | Line = 0x01 79 | Billboard = 0x02 80 | Direction = 0x03 81 | DirectionCross = 0x04 82 | Stripe = 0x05 83 | StripeCross = 0x06 84 | Rotation = 0x07 85 | RotationCross = 0x08 86 | DirBillboard = 0x09 87 | YBillboard = 0x0A 88 | 89 | class DirType(Enum): 90 | Vel = 0 91 | Pos = 1 92 | PosInv = 2 93 | EmtrDir = 3 94 | PrevPctl = 4 95 | 96 | class RotType(Enum): 97 | Y = 0x00 98 | X = 0x01 99 | Z = 0x02 100 | XYZ = 0x03 101 | YJiggle = 0x04 102 | 103 | class CalcIdxType(Enum): 104 | Normal = 0x00 105 | Repeat = 0x01 106 | Reverse = 0x02 107 | Merge = 0x03 108 | Random = 0x04 109 | 110 | class JPABaseShape(Section): 111 | header = Struct('>4xHHHHHHffHBBBBB9xB4xBBBBBBBBBBBBBBBB7xBBBB12xHBxBBBBII20xhhhhhhhhhhhBx') 112 | fields = [ 113 | 'unk1', 'unk2', 'unk3', 114 | 'texIdxAnimDataOffs', 'colorPrmAnimDataOffs', 'colorEnvAnimDataOffs', 115 | 'baseSizeX', 'baseSizeY', 116 | 'anmRndm', 'texAnmCalcFlags', 'colorAnmCalcFlags', 117 | ('shapeType', ShapeType), ('dirType', DirType), ('rotType', RotType), 118 | 'colorInSelect', 'blendMode', 'blendSrcFactor', 'blendDstFactor', 'logicOp', 119 | 'alphaCmp0', 'alphaRef0', 'alphaOp', 'alphaCmp1', 'alphaRef1', 120 | ('zCompLoc', bool), ('zTest', bool), 'zCompare', ('zWrite', bool), ('zPrepass', bool), 121 | ('isEnableProjection', bool), 'flags', 122 | 'texAnimFlags', ('texCalcIdxType', CalcIdxType), 'texIdxAnimDataCount', 'texIdx', 123 | 'colorAnimMaxFrm', ('colorCalcIdxType', CalcIdxType), 124 | 'colorPrmAnimFlags', 'colorEnvAnimFlags', 125 | 'colorPrmAnimDataCount', 'colorEnvAnimDataCount', 126 | 'colorPrm', 'colorEnv', 127 | 'texInitTransX', 'texInitTransY', 128 | 'texInitScaleX', 'texInitScaleY', 129 | 'tilingS', 'tilingT', 130 | 'texIncTransX', 'texIncTransY', 131 | 'texIncScaleX', 'texIncScaleY', 132 | '_texIncRot', ('isEnableTexScrollAnm', bool) 133 | ] 134 | def read(self, fin, start, length): 135 | super().read(fin, start, length) 136 | 137 | self.texIdxAnimData = array('B') 138 | if self.texIdxAnimDataOffs != 0: 139 | fin.seek(start+self.texIdxAnimDataOffs) 140 | self.texIdxAnimData.fromfile(fin, self.texIdxAnimDataCount) 141 | 142 | self.colorPrmAnimData = [] 143 | if self.colorPrmAnimDataOffs != 0: 144 | fin.seek(start+self.colorPrmAnimDataOffs) 145 | self.colorPrmAnimData = self.readColorTable(fin, self.colorPrmAnimDataCount) 146 | 147 | self.colorEnvAnimData = [] 148 | if self.colorEnvAnimDataOffs != 0: 149 | fin.seek(start+self.colorEnvAnimDataOffs) 150 | self.colorEnvAnimData = self.readColorTable(fin, self.colorEnvAnimDataCount) 151 | 152 | self.baseSize = self.baseSizeX, self.baseSizeY 153 | self.texInitTrans = (self.texInitTransX/0x8000, self.texInitTransY/0x8000) 154 | self.texInitScale = (self.texInitScaleX/0x8000, self.texInitScaleY/0x8000) 155 | self.tiling = (self.tilingS/0x8000, self.tilingT/0x8000) 156 | self.texIncTrans = (self.texIncTransX/0x8000, self.texIncTransY/0x8000) 157 | self.texIncScale = (self.texIncScaleX/0x8000, self.texIncScaleY/0x8000) 158 | self.texIncRot = self._texIncRot/0x8000 159 | 160 | def readColorTable(self, fin, count): 161 | h = Struct('>HI') 162 | return [h.unpack(fin.read(h.size)) for i in range(count)] 163 | 164 | def writeColorTable(self, fout, table): 165 | h = Struct('>HI') 166 | for e in table: fout.write(h.pack(*e)) 167 | 168 | def write(self, fout): 169 | self.texIdxAnimDataOffs = self.header.size+8+alignAmt(self.header.size+8, 16) 170 | self.texIdxAnimDataCount = len(self.texIdxAnimData) 171 | self.colorPrmAnimDataOffs = self.texIdxAnimDataOffs+self.texIdxAnimDataCount 172 | self.colorPrmAnimDataCount = len(self.colorPrmAnimData) 173 | self.colorEnvAnimDataOffs = self.colorPrmAnimDataOffs+6*self.colorPrmAnimDataCount 174 | self.colorEnvAnimDataCount = len(self.colorEnvAnimData) 175 | self.baseSizeX, self.baseSizeY = self.baseSize 176 | self.texInitTransX = int(self.texInitTrans[0]*0x8000) 177 | self.texInitTransY = int(self.texInitTrans[1]*0x8000) 178 | self.texInitScaleX = int(self.texInitScale[0]*0x8000) 179 | self.texInitScaleY = int(self.texInitScale[1]*0x8000) 180 | self.tilingS = int(self.tiling[0]*0x8000) 181 | self.tilingT = int(self.tiling[1]*0x8000) 182 | self.texIncTransX = int(self.texIncTrans[0]*0x8000) 183 | self.texIncTransY = int(self.texIncTrans[1]*0x8000) 184 | self.texIncScaleX = int(self.texIncScale[0]*0x8000) 185 | self.texIncScaleY = int(self.texIncScale[1]*0x8000) 186 | self._texIncRot = int(self.texIncRot*0x8000) 187 | super().write(fout) 188 | alignFile(fout, 16, 8) 189 | self.texIdxAnimData.tofile(fout) 190 | self.writeColorTable(fout, self.colorPrmAnimData) 191 | self.writeColorTable(fout, self.colorEnvAnimData) 192 | 193 | class JPAExtraShape(Section): 194 | header = Struct('>4xI4xhhhhhBBhhhh12xhhhhhhBBHhhhBBHB3xh6xhhhh2xB') 195 | fields = [ 196 | 'unk1', '_alphaInTiming', '_alphaOutTiming', 197 | '_alphaInValue', '_alphaBaseValue', '_alphaOutValue', 198 | 'alphaAnmFlags', 'alphaWaveTypeFlag', 199 | '_alphaWaveParam1', '_alphaWaveParam2', '_alphaWaveParam3', '_alphaWaveRandom', 200 | '_scaleOutRandom', '_scaleInTiming', '_scaleOutTiming', 201 | '_scaleInValueY', 'unk2', '_scaleOutValueY', 'pivotY', 'anmTypeY', 'scaleAnmMaxFrameY', 202 | '_scaleInValueX', 'unk3', '_scaleOutValueX', 'pivotX', 'anmTypeX', 'scaleAnmMaxFrameX', 203 | 'scaleAnmFlags', 204 | '_rotateDirection', '_rotateAngle', '_rotateSpeed', 205 | '_rotateAngleRandom', '_rotateSpeedRandom', ('isEnableRotate', bool) 206 | ] 207 | def read(self, fin, start, length): 208 | super().read(fin, start, length) 209 | self.alphaInTiming = self._alphaInTiming/0x8000 210 | self.alphaOutTiming = self._alphaOutTiming/0x8000 211 | self.alphaInValue = self._alphaInValue/0x8000 212 | self.alphaBaseValue = self._alphaBaseValue/0x8000 213 | self.alphaOutValue = self._alphaOutValue/0x8000 214 | self.alphaWaveParam1 = self._alphaWaveParam1/0x8000 215 | self.alphaWaveParam2 = self._alphaWaveParam2/0x8000 216 | self.alphaWaveParam3 = self._alphaWaveParam3/0x8000 217 | self.alphaWaveRandom = self._alphaWaveRandom/0x8000 218 | self.scaleOutRandom = self._scaleOutRandom/0x8000 219 | self.scaleInTiming = self._scaleInTiming/0x8000 220 | self.scaleOutTiming = self._scaleOutTiming/0x8000 221 | self.scaleInValueY = self._scaleInValueY/0x8000 222 | self.scaleOutValueY = self._scaleOutValueY/0x8000 223 | self.scaleInValueX = self._scaleInValueX/0x8000 224 | self.scaleOutValueX = self._scaleOutValueX/0x8000 225 | self.rotateDirection = self._rotateDirection/0x8000 226 | self.rotateAngle = self._rotateAngle/0x8000 227 | self.rotateSpeed = self._rotateSpeed/0x8000 228 | self.rotateAngleRandom = self._rotateAngleRandom/0x8000 229 | self.rotateSpeedRandom = self._rotateSpeedRandom/0x8000 230 | def write(self, fout): 231 | self._alphaInTiming = int(self.alphaInTiming*0x8000) 232 | self._alphaOutTiming = int(self.alphaOutTiming*0x8000) 233 | self._alphaInValue = int(self.alphaInValue*0x8000) 234 | self._alphaBaseValue = int(self.alphaBaseValue*0x8000) 235 | self._alphaOutValue = int(self.alphaOutValue*0x8000) 236 | self._alphaWaveParam1 = int(self.alphaWaveParam1*0x8000) 237 | self._alphaWaveParam2 = int(self.alphaWaveParam2*0x8000) 238 | self._alphaWaveParam3 = int(self.alphaWaveParam3*0x8000) 239 | self._alphaWaveRandom = int(self.alphaWaveRandom*0x8000) 240 | self._scaleOutRandom = int(self.scaleOutRandom*0x8000) 241 | self._scaleInTiming = int(self.scaleInTiming*0x8000) 242 | self._scaleOutTiming = int(self.scaleOutTiming*0x8000) 243 | self._scaleInValueY = int(self.scaleInValueY*0x8000) 244 | self._scaleOutValueY = int(self.scaleOutValueY*0x8000) 245 | self._scaleInValueX = int(self.scaleInValueX*0x8000) 246 | self._scaleOutValueX = int(self.scaleOutValueX*0x8000) 247 | self._rotateDirection = int(self.rotateDirection*0x8000) 248 | self._rotateAngle = int(self.rotateAngle*0x8000) 249 | self._rotateSpeed = int(self.rotateSpeed*0x8000) 250 | self._rotateAngleRandom = int(self.rotateAngleRandom*0x8000) 251 | self._rotateSpeedRandom = int(self.rotateSpeedRandom*0x8000) 252 | super().write(fout) 253 | 254 | class JPASweepShape(Section): 255 | header = Struct('>8xBBBxHHHB13xffHH11xBBBB4xff2xBBII') 256 | fields = [ 257 | ('shapeType', ShapeType), ('dirType', DirType), ('rotType', RotType), 258 | 'life', 'rate', '_timing', 'step', 259 | 'posRndm', 'baseVel', '_velInfRate', '_rotateSpeed', 260 | '_inheritRGB', '_inheritAlpha', '_inheritScale', 261 | '_baseVelRndm', '_gravity', 262 | ('isEnableField', bool), ('isEnableDrawParent', bool), ('isEnableScaleOut', bool), ('isEnableAlphaOut', bool), 263 | 'texIdx', 'globalScale2DX', 'globalScale2DY', 264 | ('isEnableRotate', bool), 'flags', 'colorPrm', 'colorEnv' 265 | ] 266 | def read(self, fin, start, length): 267 | super().read(fin, start, length) 268 | self.timing = self._timing/0x8000 269 | self.velInfRate = self._velInfRate/0x8000 270 | self.rotateSpeed = self._rotateSpeed/0x8000 271 | self.inheritRGB = self._inheritRGB/0x8000 272 | self.inheritAlpha = self._inheritAlpha/0x8000 273 | self.inheritScale = self._inheritScale/0x8000 274 | self.baseVelRndm = self._baseVelRndm/0x8000 275 | self.gravity = self._gravity/0x8000 276 | self.globalScale2D = (self.globalScale2DX, self.globalScale2DY) 277 | def write(self, fout): 278 | self._timing = int(self.timing*0x8000) 279 | self._velInfRate = int(self.velInfRate*0x8000) 280 | self._rotateSpeed = int(self.rotateSpeed*0x8000) 281 | self._inheritRGB = int(self.inheritRGB*0x8000) 282 | self._inheritAlpha = int(self.inheritAlpha*0x8000) 283 | self._inheritScale = int(self.inheritScale*0x8000) 284 | self._baseVelRndm = int(self.baseVelRndm*0x8000) 285 | self._gravity = int(self.gravity*0x8000) 286 | super().write(fout) 287 | 288 | class IndTextureMode(Enum): 289 | Off = 0x00 290 | Normal = 0x01 291 | Sub = 0x02 292 | 293 | class JPAExTexShape(Section): 294 | header = Struct('>8xBBhhhhhhbBB15xB2xB') 295 | fields = [ 296 | (IndTextureMode, 'indTextureMode'), 'indTextureMtxID', 297 | 'p00', 'p01', 'p02', 'p10', 'p11', 'p12', 298 | 'power', 'indTextureID', 'subTextureID', 299 | 'secondTextureFlags', 'secondTextureIndex' 300 | ] 301 | def read(self, fin, start, length): 302 | super().read(fin, start, length) 303 | scale = 2**self.power 304 | self.indTextureMtx = [ 305 | self.p00*scale, self.p01*scale, self.p02*scale, scale, 306 | self.p10*scale, self.p11*scale, self.p12*scale, 0.0 307 | ] 308 | def write(self, fout): 309 | self.p00, self.p01, self.p02, scale, self.p10, self.p11, self.p12, z = self.indTextureMtx 310 | self.power = int(log2(scale)) 311 | scale = 2**self.power 312 | self.p00 /= scale 313 | self.p01 /= scale 314 | self.p02 /= scale 315 | self.p10 /= scale 316 | self.p11 /= scale 317 | self.p12 /= scale 318 | super().write(fout) 319 | 320 | class JPAKeyBlock(Section): 321 | header = Struct('>8xBxBx12x') 322 | fields = ['keyCount', ('isLoopEnable', bool)] 323 | def read(self, fin, start, length): 324 | super().read(fin, start, length) 325 | self.keyValues = array('f') 326 | self.keyValues.fromfile(self.keyCount*4) 327 | if sys.byteorder == 'little': self.keyValues.byteswap() 328 | def write(self, fout): 329 | self.keyCount = len(self.keyValues)//4 330 | super().write(fout) 331 | swapArray(self.keyValues).tofile(fout) 332 | 333 | class FieldType(Enum): 334 | Gravity = 0x00 335 | Air = 0x01 336 | Magnet = 0x02 337 | Newton = 0x03 338 | Vortex = 0x04 339 | Random = 0x05 340 | Drag = 0x06 341 | Convection = 0x07 342 | Spin = 0x08 343 | 344 | class FieldAddType(Enum): 345 | FieldAccel = 0x00 346 | BaseVelocity = 0x01 347 | FieldVelocity = 0x02 348 | 349 | class JPAFieldBlock(Section): 350 | header = Struct('>4xBxBBBB2xffffffffffffhhhh') 351 | fields = [ 352 | ('type', FieldType), ('velType', FieldAddType), 'cycle', 'sttFlag', 'unk1', 353 | 'mag', 'magRndm', 'maxDist', 354 | 'posX', 'posY', 'posZ', 355 | 'dirX', 'dirY', 'dirZ', 356 | 'param1', 'param2', 'param3', 357 | '_fadeIn', '_fadeOut', 358 | '_enTime', '_disTime' 359 | ] 360 | def read(self, fin, start, length): 361 | super().read(fin, start, length) 362 | self.pos = (self.posX, self.posY, self.posZ) 363 | self.dir = (self.dirX, self.dirY, self.dirZ) 364 | self.fadeIn = self._fadeIn/0x8000 365 | self.fadeOut = self._fadeOut/0x8000 366 | self.enTime = self._enTime/0x8000 367 | self.disTime = self._disTime/0x8000 368 | def write(self, fout): 369 | self.posX, self.posY, self.posZ = self.pos 370 | self.dirX, self.dirY, self.dirZ = self.dir 371 | self._fadeIn = int(self.fadeIn*0x8000) 372 | self._fadeOut = int(self.fadeOut*0x8000) 373 | self._enTime = int(self.enTime*0x8000) 374 | self._disTime = int(self.disTime*0x8000) 375 | super().write(fout) 376 | 377 | class JPATexture(Section): 378 | header = Struct('4x') 379 | fields = [] 380 | def read(self, fin, start, length): 381 | super().read(fin, start, length) 382 | self.texture = Image() 383 | name = fin.read(20) 384 | if name[0] == 0: 385 | self.texture.name = None 386 | textureHeaderOffset = 0 387 | else: 388 | self.texture.name = name.decode('shift-jis').rstrip("\0") 389 | textureHeaderOffset = 32 390 | fin.seek(start+textureHeaderOffset) 391 | self.texture.read(fin, start, textureHeaderOffset, 0) 392 | 393 | def write(self, fout): 394 | super().write(fout) 395 | if self.texture.name: 396 | fout.write(self.texture.name.encode('shift-jis').ljust(20, b'\0')) 397 | else: 398 | fout.write(b'\0'*20) 399 | self.texture.write(fout, 0) 400 | 401 | class JPA(BFile): 402 | sectionHandlers = { 403 | b'BEM1': JPADynamicsBlock, 404 | b'BSP1': JPABaseShape, 405 | b'ESP1': JPAExtraShape, 406 | b'SSP1': JPASweepShape, 407 | b'ETX1': JPAExTexShape, 408 | b'KFA1': JPAKeyBlock, 409 | b'FLD1': JPAFieldBlock, 410 | b'TEX1': JPATexture 411 | } 412 | 413 | if __name__ == "__main__": 414 | if len(sys.argv) != 2: 415 | sys.stderr.write("Usage: %s \n"%sys.argv[0]) 416 | exit(1) 417 | 418 | jpa = JPA() 419 | jpa.read(open(sys.argv[1], 'rb')) 420 | jpa.write(open(sys.argv[1][:sys.argv[1].rfind('.')]+"-out.jpa", 'wb')) 421 | 422 | -------------------------------------------------------------------------------- /sequence-com.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # Converts Zelda/Mario sequence files (*.bms, *.com, yaz0 *.szs) to MIDI 3 | # Requires python-midi (https://github.com/avsaj/python-midi) 4 | # It's quite a mess, but fairly capable. I need to decide if I should rewrite/refactor it or port/contribute to the other one 5 | 6 | # broken in Windows Media Player: k_manma, t_casino_fanf, t_chuboss, t_delfino, t_event, t_mechakuppa_, t_pinnapaco, t_select, t_shilena 7 | 8 | import struct, sys, os 9 | from struct import unpack 10 | from warnings import warn 11 | import midi 12 | 13 | def handlePerf(type, value, duration, maxValue, track, tick): 14 | event2 = None 15 | types = {2: 16, 4: 17, 9: 18, 10: 19} 16 | if type == 0: 17 | # volume 18 | event1 = midi.ControlChangeEvent(tick=tick) 19 | event1.control = 7 # Main Volume 20 | #if maxValue > 0x7F: 21 | # event1.value = (value>>8)&0xFF 22 | # event2 = midi.ControlChangeEvent(tick=0) 23 | # event2.control = 39 # Volume LSB 24 | # event2.value = value&0xFF 25 | #else: 26 | # event1.value = value 27 | event1.value = (value*0x7F)/maxValue 28 | elif type == 1: 29 | # pitch 30 | event1 = midi.PitchWheelEvent(tick=tick) 31 | event1.pitch = (value*0x7FFF)/maxValue 32 | elif type == 3: 33 | # pan 34 | event1 = midi.ControlChangeEvent(tick=tick) 35 | event1.control = 10 # Pan 36 | #if maxValue > 0x7F: 37 | # event1.value = (value>>8)&0xFF 38 | # event2 = midi.ControlChangeEvent(tick=0) 39 | # event2.control = 42 # Pan LSB 40 | # event2.value = value&0xFF 41 | #else: 42 | # event1.value = value 43 | event1.value = (value*0x7F)/maxValue 44 | else: 45 | warn("Unknown perf type %d"%type) 46 | #event1 = midi.TextMetaEvent(tick=tick, data=map(ord, "Perf %d (%d)"%(type, value))) 47 | event1 = midi.ControlChangeEvent(tick=tick) 48 | event1.control = types[type] 49 | event1.value = (value*0x7F)/maxValue 50 | track.append(event1) 51 | if event2 is not None: track.append(event2) 52 | if duration: 53 | if type == 0: 54 | # volume 55 | event = midi.ControlChangeEvent(tick=duration) 56 | event.control = 7 # Main Volume 57 | event.value = 0x7F 58 | elif type == 1: 59 | # pitch 60 | event = midi.PitchWheelEvent(tick=duration) 61 | event.pitch = 0x2000 62 | elif type == 3: 63 | # pan 64 | event = midi.ControlChangeEvent(tick=duration) 65 | event.control = 10 # Pan 66 | event.value = 0x40 67 | else: 68 | #event = midi.TextMetaEvent(tick=duration, data=map(ord, "Perf %d (%d)"%(type, value))) 69 | event1 = midi.ControlChangeEvent(tick=duration) 70 | event1.control = types[type] 71 | event1.value = 0 72 | return event 73 | 74 | def handleBankProgram(which, selection, track, tick): 75 | if which == 7: 76 | # pitch 77 | print "Pitch", selection 78 | event = midi.ControlChangeEvent(tick=tick) 79 | event.control = 101 # RPN MSB 80 | event.value = 0 81 | track.append(event) 82 | event = midi.ControlChangeEvent(tick=0) 83 | event.control = 100 # RPN LSB 84 | event.value = 0 85 | track.append(event) 86 | event = midi.ControlChangeEvent(tick=0) 87 | event.control = 6 # Data entry MSB 88 | event.value = selection 89 | track.append(event) 90 | event = midi.ControlChangeEvent(tick=0) 91 | event.control = 38 # Data entry LSB 92 | event.value = 0 93 | track.append(event) 94 | elif which == 0x20: 95 | # bank 96 | print "Bank", selection 97 | event = midi.ControlChangeEvent(tick=tick) 98 | event.control = 32 # Bank Select 99 | event.value = selection 100 | track.append(event) 101 | elif which == 0x21: 102 | # program 103 | print "Program", selection 104 | if selection >= 128: 105 | warn("Program %d >= 128"%selection) 106 | #event = midi.ControlChangeEvent(tick=tick) 107 | #event.control = 32 # Bank Select 108 | #event.value = 128 109 | #track.append(event) 110 | #selection -= 228 111 | event = midi.ProgramChangeEvent(tick=tick, value=selection%128) 112 | track.append(event) 113 | else: 114 | warn("Unknown bank/program %x (%d)"%(which, selection)) 115 | event = midi.TextMetaEvent(tick=tick, data=map(ord, "Bank/Program %d (%d)"%(which, selection))) 116 | track.append(event) 117 | 118 | def handleSeek(type, mode, point, track, tick, voices): 119 | #print ("Call", "Ret", "Jump")[(type-0xC3)/2], mode, point 120 | if mode == 0: pass # always 121 | #elif mode == 1: pass # zero 122 | #elif mode == 2: pass # nonzero 123 | #elif mode == 3: pass # one 124 | #elif mode == 4: pass # greater than 125 | #elif mode == 5: pass # less than 126 | else: warn("Unknown seek mode %d"%mode) 127 | # stop all notes before looping 128 | voicepairs = voices.items() 129 | if 0:#type in (0xC5, 0xC6, 0xC7, 0xC8): # Jump 130 | for voiceId, note in voicepairs: 131 | noteOff = midi.NoteOffEvent(tick=tick, pitch=note) 132 | tick = 0 133 | track.append(noteOff) 134 | del voices[voiceId] 135 | return tick 136 | 137 | def doNoteOffBurp(voiceId, track, tick, voices): 138 | voiceNoteOns = voices[voiceId] 139 | voiceNoteOns.sort(key=lambda a: a.tick) 140 | noteOn = None 141 | for i in range(len(voiceNoteOns)): 142 | if voiceNoteOns[i].tick >= tick: 143 | if i > 0: 144 | noteOn = voiceNoteOns[i-1] 145 | del voiceNoteOns[i-1] 146 | break 147 | if noteOn is not None: 148 | noteOff = midi.NoteOffEvent(tick=tick, pitch=noteOn.pitch) 149 | track.append(noteOff) 150 | 151 | def doNoteOff(voiceId, track, tick, voices): 152 | if voiceId in voices: 153 | noteOff = midi.NoteOffEvent(tick=tick, pitch=voices[voiceId]) 154 | track.append(noteOff) 155 | del voices[voiceId] 156 | else: 157 | warn("No voiceId %d to turn off"%voiceId) 158 | 159 | def readTrack(fin, pattern=None, trackId=-1, delay=0, endTime=-1, maxpos=-1): 160 | trackWasInit = False 161 | stack = [] 162 | totalTime = delay 163 | if pattern is not None: 164 | #voices = [[] for i in range(8)] 165 | voices = {} 166 | track = midi.Track() 167 | pattern.append(track) 168 | queuedEvents = [] 169 | tracksToDo = [] 170 | channel = (trackId)%16 171 | if channel >= 9: channel = (channel+1)%16 172 | if trackId >= 16: warn("Track ID %d >= 16"%trackId) 173 | #if trackId == 15: channel = 9 174 | bytePositionInTrack = {} 175 | delayAtBytePosition = {} 176 | while True: 177 | #if fin.tell() >= maxpos and maxpos != -1: 178 | #warn("Passed track bounds") 179 | #break 180 | #print hex(fin.tell()), 181 | #print totalTime, 182 | if pattern: 183 | bytePositionInTrack[fin.tell()] = len(track) 184 | delayAtBytePosition[fin.tell()] = delay 185 | c = fin.read(1) 186 | if c == '': break 187 | cmd = ord(c) 188 | if cmd in (0x80, 0x88, 0xF0, 0xB8): 189 | # delay 190 | nextDelay, = unpack('>B', fin.read(1)) if cmd in (0xF0, 0x80) else unpack('>H', fin.read(2)) 191 | delay += nextDelay 192 | totalTime += nextDelay 193 | print "Delay", hex(cmd), nextDelay 194 | if pattern is not None: 195 | queuedEvents.sort(key=lambda e: 0 if e is None else e.tick) 196 | i = 0 197 | while i < len(queuedEvents): 198 | event = queuedEvents[i] 199 | if event is None: 200 | del queuedEvents[i] 201 | continue 202 | #if i > 0 and queuedEvents[i-1].tick == event.tick: 203 | # del queuedEvents[i] 204 | # continue 205 | i += 1 206 | while i < len(queuedEvents): 207 | event = queuedEvents[i] 208 | event.tick -= nextDelay 209 | if event.tick <= 0: 210 | nextTick = -event.tick 211 | event.tick = delay+event.tick 212 | track.append(event) 213 | delay = nextTick 214 | del queuedEvents[i] 215 | else: 216 | i += 1 217 | elif cmd == 0x94: 218 | # perf 219 | type, value = unpack('>BB', fin.read(2)) 220 | print "Perf", type, value, 0 221 | if pattern is not None: queuedEvents.append(handlePerf(type, value, 0, 0xFF, track, delay)) 222 | delay = 0 223 | elif cmd == 0x96: 224 | # perf 225 | type, value, duration = unpack('>BBB', fin.read(3)) 226 | print "Perf", type, value, duration 227 | if pattern is not None: queuedEvents.append(handlePerf(type, value, duration, 0xFF, track, delay)) 228 | delay = 0 229 | elif cmd == 0x97: 230 | # perf 231 | type, value, duration = unpack('>BBH', fin.read(4)) 232 | print "Perf", type, value, duration 233 | if pattern is not None: queuedEvents.append(handlePerf(type, value, duration, 0xFF, track, delay)) 234 | delay = 0 235 | elif cmd == 0x98: 236 | # perf 237 | type, value = unpack('>Bb', fin.read(2)) 238 | print "Perf", type, value, 0 239 | if pattern is not None: queuedEvents.append(handlePerf(type, value, 0, 0x7F, track, delay)) 240 | delay = 0 241 | elif cmd == 0x9A: 242 | # perf 243 | type, value, duration = unpack('>BbB', fin.read(3)) 244 | print "Perf", type, value, duration 245 | if pattern is not None: queuedEvents.append(handlePerf(type, value, duration, 0x7F, track, delay)) 246 | delay = 0 247 | elif cmd == 0x9B: 248 | # perf 249 | type, value, duration = unpack('>BbH', fin.read(4)) 250 | print "Perf", type, value, duration 251 | if pattern is not None: queuedEvents.append(handlePerf(type, value, duration, 0x7F, track, delay)) 252 | delay = 0 253 | elif cmd == 0x9C: 254 | # perf 255 | type, value = unpack('>Bh', fin.read(3)) 256 | print "Perf", type, value, 0 257 | if pattern is not None: queuedEvents.append(handlePerf(type, value, 0, 0x7FFF, track, delay)) 258 | delay = 0 259 | elif cmd == 0x9E: 260 | # perf 261 | type, value, duration = unpack('>BhB', fin.read(4)) 262 | print "Perf", type, value, duration 263 | if pattern is not None: queuedEvents.append(handlePerf(type, value, duration, 0x7FFF, track, delay)) 264 | delay = 0 265 | elif cmd == 0x9F: 266 | # perf 267 | type, value, duration = unpack('>BhH', fin.read(5)) 268 | print "Perf", type, value, duration 269 | if pattern is not None: queuedEvents.append(handlePerf(type, value, duration, 0x7FFF, track, delay)) 270 | delay = 0 271 | elif cmd == 0xA4: 272 | which, selection = unpack('>BB', fin.read(2)) 273 | if pattern is not None: handleBankProgram(which, selection, track, delay) 274 | else: 275 | if which == 0x20: print "Bank", selection 276 | elif which == 0x21: print "Program", selection 277 | else: warn("Unknown bank/program %x (%d)"%(which, selection)) 278 | if which == 0x21 and selection > 127: 279 | channel = 9 280 | delay = 0 281 | elif cmd == 0xAC: 282 | which, selection = unpack('>BH', fin.read(3)) 283 | if pattern is not None: handleBankProgram(which, selection, track, delay) 284 | else: 285 | if which == 0x20: print "Bank", selection 286 | elif which == 0x21: print "Program", selection 287 | else: warn("Unknown bank/program %x (%d)"%(which, selection)) 288 | if which == 0x21 and selection > 127: 289 | channel = 9 290 | delay = 0 291 | elif cmd == 0xB8: 292 | warn("Unknown B8") 293 | fin.seek(2,1) 294 | elif cmd == 0xB9: 295 | warn("Unknown B9") 296 | fin.seek(3,1) 297 | elif cmd == 0xC1: 298 | # child track pointer 299 | # cmdOpenTrack 300 | childTrackId, tmp, trackPos = unpack('>BBH', fin.read(4)) 301 | trackPos |= tmp<<16 302 | print "New track", childTrackId, "at", hex(trackPos), "delay", delay 303 | tracksToDo.append((trackPos, childTrackId, delay)) 304 | elif cmd == 0xC2: 305 | # sibling track 306 | # cmdOpenTrackBros 307 | raise NotImplementedError("") 308 | elif cmd == 0xC3: 309 | warn("Unknown C3, prob call") 310 | mode, point = unpack('>BH', fin.read(3)) 311 | if pattern is not None: delay = handleSeek(0xC3, mode, point, track, delay, voices) 312 | stack.append(fin.tell()) 313 | fin.seek(point) 314 | elif cmd == 0xC4: 315 | # cmdCall 316 | mode, tmp, point = unpack('>BBH', fin.read(4)) 317 | point |= tmp<<16 318 | if pattern is not None: delay = handleSeek(0xC4, mode, point, track, delay, voices) 319 | else: print "cmdCall", mode, point 320 | stack.append(fin.tell()) 321 | fin.seek(point) 322 | elif cmd == 0xC5: 323 | warn("Unknown C5, prob return") 324 | mode, = unpack('>B', fin.read(1)) 325 | point = stack.pop() 326 | if pattern is not None: delay = handleSeek(0xC5, mode, point, track, delay, voices) 327 | fin.seek(point) 328 | elif cmd == 0xC6: 329 | # back 330 | # cmdRet 331 | mode, = unpack('>B', fin.read(1)) 332 | point = stack.pop() 333 | if pattern is not None: delay = handleSeek(0xC6, mode, point, track, delay, voices) 334 | else: print "cmdRet", mode, point 335 | fin.seek(point) 336 | elif cmd == 0xC7: 337 | warn("Unknown C7, prob jump") 338 | mode, point = unpack('>BH', fin.read(3)) 339 | if pattern is not None: delay = handleSeek(0xC7, mode, point, track, delay, voices) 340 | if 1:#totalTime < endTime: fin.seek(point) 341 | #else: 342 | track.append(midi.TextMetaEvent(tick=delay, data=map(ord, "Jump to 0x%X"%(point)))) 343 | jumpToInTrack = bytePositionInTrack[point] 344 | delay = delayAtBytePosition[point] 345 | track.insert(jumpToInTrack, midi.TrackLoopEvent(tick=delay, data=[0])) 346 | track[jumpToInTrack+1].tick -= delay 347 | delay = 0 348 | if mode == 0: 349 | print "Breaking out of loop" 350 | break 351 | elif cmd == 0xC8: 352 | # seek ex 353 | # cmdJmp 354 | mode, tmp, point = unpack('>BBH', fin.read(4)) 355 | point |= tmp<<16 356 | if pattern is not None: delay = handleSeek(0xC8, mode, point, track, delay, voices) 357 | #if totalTime < endTime: fin.seek(point) 358 | if pattern is not None: 359 | track.append(midi.TextMetaEvent(tick=delay, data=map(ord, "Jump to 0x%X"%(point)))) 360 | jumpToInTrack = bytePositionInTrack[point] 361 | delay = delayAtBytePosition[point] 362 | track.insert(jumpToInTrack, midi.TrackLoopEvent(tick=delay, data=[0])) 363 | track[jumpToInTrack+1].tick -= delay 364 | delay = 0 365 | if mode == 0: 366 | print "Breaking out of loop" 367 | break 368 | elif cmd == 0xC9: 369 | # loop begin 370 | # cmdLoopS 371 | raise NotImplementedError("") 372 | elif cmd == 0xCA: 373 | # loop end 374 | # cmdLoopE 375 | raise NotImplementedError("") 376 | #elif cmd == 0xCB: 377 | # cmdReadPort 378 | #assert fin.read(2) == '\0\0' 379 | elif cmd == 0xCC: 380 | # cmdWritePort 381 | fin.read(2) 382 | # 0xcd 0x8027ed80 cmdCheckPortImport 383 | # 0xce 0x8027ed98 cmdCheckPortExport 384 | elif cmd == 0xCF: 385 | # delay 386 | # cmdWait 387 | raise NotImplementedError("") 388 | # 0xD0 cmdConnectName 389 | # 0xD1 0x8027ebe0 cmdParentWritePort 390 | # 0xD2 0x8027ec68 cmdChildWritePort 391 | elif cmd == 0xD4: 392 | # prev note 393 | # cmdSetLastNote 394 | raise NotImplementedError("") 395 | # 0xd5 0x8027ee44 cmdTimeRelate 396 | # 0xd6 0x8027ee5c cmdSimpleOsc 397 | # 0xd7 0x8027ee8c cmdSimpleEnv 398 | # 0xd8 0x8027eec0 cmdSimpleADSR 399 | elif cmd == 0xD8: 400 | ppqn, = unpack('>xH', fin.read(3)) 401 | warn("Unknown D8, prob PPQN %s"%ppqn) 402 | if pattern is not None: pattern.resolution = ppqn 403 | elif cmd == 0xD9: 404 | # transpose 405 | # cmdTranspose 406 | raise NotImplementedError("") 407 | elif cmd == 0xDA: 408 | # stop child 409 | # cmdCloseTrack 410 | raise NotImplementedError("") 411 | elif cmd == 0xDC: 412 | warn("Unknown DC") 413 | fin.seek(1,1) 414 | # 0xdc 0x8027f02c cmdUpdateSync 415 | # 0xdd 0x8027f058 cmdBusConnect 416 | # TODO used by t_pinnapaco_m and k_kagemario 417 | elif cmd == 0xDD: 418 | warn("Unknown DD") 419 | fin.seek(3,1) 420 | elif cmd == 0xDE: 421 | # flags 422 | # cmdPauseStatus 423 | raise NotImplementedError("") 424 | elif cmd == 0xDF: 425 | # set dynamic 426 | # cmdSetInterrupt 427 | # TODO 428 | idx, tmp, point = unpack('>BBH', fin.read(4)) 429 | point |= tmp<<16 430 | #raise NotImplementedError("") 431 | elif cmd == 0xE0: 432 | # unset dynamic 433 | # cmdDisInterrupt 434 | #idx, = unpack('>B', fin.read(1)) 435 | #raise NotImplementedError("") 436 | tempo, = unpack('>H', fin.read(2)) 437 | warn("Unknown E0, prob Tempo %s"%tempo) 438 | if pattern is not None: track.append(midi.SetTempoEvent(bpm=tempo, tick=delay)) 439 | delay = 0 440 | elif cmd == 0xE1: 441 | # clear dynamic 442 | # cmdClrI 443 | #raise NotImplementedError("") 444 | warn("Unknown E1") 445 | fin.seek(1,1) 446 | # 0xe2 0x8027f124 cmdSetI 447 | elif cmd == 0xE2: 448 | warn("Unknown E2") 449 | fin.seek(1,1) 450 | # 0xe3 0x8027f134 cmdRetI 451 | elif cmd == 0xE3: 452 | warn("Unknown E3") 453 | fin.seek(1,1) 454 | # 0xe4 0x8027f178 cmdIntTimer 455 | elif cmd == 0xE5: 456 | # add pool 457 | # cmdVibDepth 458 | raise NotImplementedError("") 459 | elif cmd == 0xE6: 460 | # remove pool 461 | # cmdVibDepthMidi 462 | raise NotImplementedError("") 463 | elif cmd == 0xE7: 464 | # track init 465 | # cmdSyncCPU 466 | arg, = unpack('>h', fin.read(2)) 467 | print "Track init", arg 468 | if trackWasInit: 469 | raise Exception("Track was already initialized") 470 | else: 471 | trackWasInit = True 472 | # 0xe8 0x8027f1ec cmdFlushAll 473 | # 0xe9 0x8027f214 cmdFlushRelease 474 | elif cmd == 0xEA: 475 | # delay 476 | # cmdWait 477 | raise NotImplementedError("") 478 | # 0xEB 0x8027f2ac cmdPanPowSet 479 | # 0xEC 0x8027f544 cmdIIRSet 480 | # 0xED 0x8027f330 cmdFIRSet 481 | # 0xEE 0x8027f368 cmdEXTSet 482 | # 0xEF? 0x8027f3bc cmdPanSwSet 483 | # TODO used by t_pinnapaco_m and k_kagemario 484 | elif cmd == 0xEF: 485 | warn("Unknown EF") 486 | fin.seek(3, 1) 487 | # 0xF0 0x8027f460 cmdOscRoute 488 | elif cmd == 0xF0: 489 | warn("Unknown F0 "+hex(ord(fin.read(1)))) 490 | # 0xF1 0x8027f5c8 cmdIIRCutOff 491 | # 0xF2 0x8027f65c cmdOscFull 492 | # 0xF3 0x8027f098 cmdVolumeMode 493 | # 0x?? 0x8027f4fc cmdVibPitch 494 | # 0xFA 0x8027f698 cmdCheckWave 495 | # 0xFB 0x8027f6a8 cmdPrintf 496 | # 0xFC 0x8027f2a4 cmdNop 497 | 498 | elif cmd == 0xF9: 499 | warn("Unknown F9") 500 | fin.seek(2,1) 501 | elif cmd == 0xFD: 502 | # tempo 503 | # cmdTempo 504 | tempo, = unpack('>H', fin.read(2)) 505 | print "Tempo", tempo 506 | if pattern is not None: track.append(midi.SetTempoEvent(bpm=tempo, tick=delay)) 507 | delay = 0 508 | elif cmd == 0xFE: 509 | # PPQN (pulses per quarter note) 510 | # cmdTimeBase 511 | ppqn, = unpack('>H', fin.read(2)) 512 | print "PPQN", ppqn 513 | if pattern is not None: 514 | pattern.resolution = ppqn 515 | elif cmd == 0xFF: 516 | # end track 517 | # cmdFinish 518 | print "End track" 519 | if pattern is not None: track.append(midi.EndOfTrackEvent(tick=delay)) 520 | delay = 0 521 | break 522 | elif cmd < 0x90: 523 | # note 524 | if (cmd&0x88) == 0x88: 525 | # voice off 526 | voiceId = cmd & 0x07 527 | unk, = struct.unpack('>B', fin.read(1)) 528 | print "Voice off", voiceId, unk 529 | if pattern is not None: 530 | doNoteOff(voiceId, track, delay, voices) 531 | delay = 0 532 | elif (cmd&0x80) == 0x80: 533 | # voice off 534 | voiceId = cmd & 0x07 535 | print "Voice off", voiceId 536 | if pattern is not None: 537 | doNoteOff(voiceId, track, delay, voices) 538 | delay = 0 539 | else: 540 | # voice on 541 | note = cmd 542 | flags, velocity = struct.unpack('>BB', fin.read(2)) 543 | voiceId = flags&0x07 544 | flags &= 0xF8 545 | print "Voice on", cmd, voiceId, velocity 546 | if flags != 0: 547 | warn("Unsupported flags 0x%x"%flags) 548 | if pattern is not None and flags == 0: 549 | if voiceId in voices: 550 | warn("Voice id %d already on!"%voiceId) 551 | if note in voices.values(): 552 | warn("Note %d already on!"%note) 553 | if velocity == 0: 554 | velocity = 1 555 | noteOn = midi.NoteOnEvent(tick=delay, pitch=note, velocity=velocity) 556 | track.append(noteOn) 557 | #voices[voiceId].append(noteOn) 558 | voices[voiceId] = note 559 | delay = 0 560 | else: 561 | warn("Unknown command %x@%x"%(cmd,fin.tell())) 562 | #print hex(fin.tell()) 563 | break 564 | if pattern is not None: 565 | if len(track) > 0 and not isinstance(track[-1], midi.EndOfTrackEvent): 566 | track.append(midi.EndOfTrackEvent(tick=delay)) 567 | delay=0 568 | for event in track: 569 | event.channel = channel 570 | for i, (trackPos, childTrackId, delay) in enumerate(tracksToDo): 571 | #if childTrackId == 15: continue 572 | print "Track", childTrackId 573 | fin.seek(trackPos) 574 | readTrack(fin, pattern, childTrackId, delay, totalTime, tracksToDo[i+1][0] if i+1 < len(tracksToDo) else maxpos) 575 | 576 | import os 577 | if len(sys.argv) > 1: files = sys.argv[1:] 578 | else: files = os.listdir('.') 579 | for fname in files: 580 | if fname.endswith(".com") or fname.endswith(".bms") or len(sys.argv) > 1: 581 | print fname 582 | fin = open(fname, 'rb') 583 | pattern = midi.Pattern() 584 | fin.seek(0,2) 585 | maxpos = fin.tell()-2 586 | fin.seek(0,0) 587 | readTrack(fin, pattern, maxpos=maxpos) 588 | #except Exception, e: 589 | # print e 590 | # continue 591 | #finally: print hex(fin.tell()) 592 | #print pattern 593 | if pattern is not None: midi.write_midifile(os.path.splitext(fname)[0]+".mid", pattern) 594 | fin.close() 595 | -------------------------------------------------------------------------------- /texture.py: -------------------------------------------------------------------------------- 1 | # Common functions for reading, decoding, reformatting, and exporting block-based GameCube TEV/Flipper/GX texture data. 2 | 3 | import struct 4 | from array import array 5 | import sys 6 | from enum import Enum, Flag 7 | 8 | class TexFmt(Enum): 9 | I4 = 0x0 10 | I8 = 0x1 11 | IA4 = 0x2 12 | IA8 = 0x3 13 | RGB565 = 0x4 14 | RGB5A3 = 0x5 15 | RGBA8 = 0x6 16 | C4 = 0x8 17 | C8 = 0x9 18 | C14X2 = 0xA 19 | CMPR = 0xE # S3TC/DXT 20 | 21 | class TlutFmt(Enum): 22 | IA8 = 0x0 23 | RGB565 = 0x1 24 | RGB5A3 = 0x2 25 | 26 | formatBytesPerPixel = { 27 | TexFmt.I4: 0.5, 28 | TexFmt.I8: 1, 29 | TexFmt.IA4: 1, 30 | TexFmt.IA8: 2, 31 | TexFmt.RGB565: 2, 32 | TexFmt.RGB5A3: 2, 33 | TexFmt.RGBA8: 4, 34 | TexFmt.C4: 0.5, 35 | TexFmt.C8: 1, 36 | TexFmt.C14X2: 2, 37 | TexFmt.CMPR: 0.5 38 | } 39 | 40 | formatBlockWidth = { 41 | TexFmt.I4: 8, 42 | TexFmt.I8: 8, 43 | TexFmt.IA4: 8, 44 | TexFmt.IA8: 4, 45 | TexFmt.RGB565: 4, 46 | TexFmt.RGB5A3: 4, 47 | TexFmt.RGBA8: 4, 48 | TexFmt.C4: 8, 49 | TexFmt.C8: 8, 50 | TexFmt.C14X2: 4, 51 | TexFmt.CMPR: 8 52 | } 53 | 54 | formatBlockHeight = { 55 | TexFmt.I4: 8, 56 | TexFmt.I8: 4, 57 | TexFmt.IA4: 4, 58 | TexFmt.IA8: 4, 59 | TexFmt.RGB565: 4, 60 | TexFmt.RGB5A3: 4, 61 | TexFmt.RGBA8: 4, 62 | TexFmt.C4: 8, 63 | TexFmt.C8: 4, 64 | TexFmt.C14X2: 4, 65 | TexFmt.CMPR: 8 66 | } 67 | 68 | formatArrayTypes = { 69 | TexFmt.I4: 'B', 70 | TexFmt.I8: 'B', 71 | TexFmt.IA4: 'B', 72 | TexFmt.IA8: 'H', 73 | TexFmt.RGB565: 'H', 74 | TexFmt.RGB5A3: 'H', 75 | TexFmt.RGBA8: 'H', 76 | TexFmt.C4: 'B', 77 | TexFmt.C8: 'B', 78 | TexFmt.C14X2: 'H', 79 | TexFmt.CMPR: 'B' 80 | } 81 | 82 | def s3tc1ReverseByte(b): 83 | b1 = b & 0x3 84 | b2 = b & 0xc 85 | b3 = b & 0x30 86 | b4 = b & 0xc0 87 | return (b1 << 6) | (b2 << 2) | (b3 >> 2) | (b4 >> 6) 88 | 89 | def unpackRGB5A3(c): 90 | if (c & 0x8000) == 0x8000: 91 | a = 0xff 92 | r = (c & 0x7c00) >> 10 93 | r = (r << (8-5)) | (r >> (10-8)) 94 | g = (c & 0x3e0) >> 5 95 | g = (g << (8-5)) | (g >> (10-8)) 96 | b = c & 0x1f 97 | b = (b << (8-5)) | (b >> (10-8)) 98 | else: 99 | a = (c & 0x7000) >> 12 100 | a = (a << (8-3)) | (a << (8-6)) | (a >> (9-8)) 101 | r = (c & 0xf00) >> 8 102 | r = (r << (8-4)) | r 103 | g = (c & 0xf0) >> 4 104 | g = (g << (8-4)) | g 105 | b = c & 0xf 106 | b = (b << (8-4)) | b 107 | return r, g, b, a 108 | 109 | def rgb565toColor(rgb): 110 | r = (rgb & 0xf800) >> 11 111 | g = (rgb & 0x7e0) >> 5 112 | b = (rgb & 0x1f) 113 | #http://www.mindcontrol.org/~hplus/graphics/expand-bits.html 114 | r = (r << 3) | (r >> 2) 115 | g = (g << 2) | (g >> 4) 116 | b = (b << 3) | (b >> 2) 117 | return r,g,b 118 | 119 | def fixS3TC1Block(data): 120 | return array(data.typecode, [ 121 | data[1], 122 | data[0], 123 | data[3], 124 | data[2], 125 | s3tc1ReverseByte(data[4]), 126 | s3tc1ReverseByte(data[5]), 127 | s3tc1ReverseByte(data[6]), 128 | s3tc1ReverseByte(data[7]) 129 | ]) 130 | 131 | # Decode a block (format-dependent size) of texture into pixels 132 | def decodeBlock(format, data, dataidx, width, height, xoff, yoff, putpixel, palette=None): 133 | if format == TexFmt.I4: 134 | for y in range(yoff, yoff+8): 135 | for x in range(xoff, xoff+8, 2): 136 | if dataidx >= len(data): break 137 | c = data[dataidx] 138 | dataidx += 1 139 | if x+1 < width and y < height: 140 | t = c&0xF0 141 | putpixel(x, y, t | (t >> 4)) 142 | t = c&0x0F 143 | putpixel(x+1, y, (t << 4) | t) 144 | 145 | elif format == TexFmt.I8: 146 | for y in range(yoff, yoff+4): 147 | for x in range(xoff, xoff+8): 148 | if dataidx >= len(data): break 149 | c = data[dataidx] 150 | dataidx += 1 151 | if x < width and y < height: 152 | putpixel(x, y, c) 153 | 154 | elif format == TexFmt.IA4: 155 | for y in range(yoff, yoff+4): 156 | for x in range(xoff, xoff+8): 157 | if dataidx >= len(data): break 158 | c = data[dataidx] 159 | dataidx += 1 160 | if x < width and y < height: 161 | l = c&0x0F 162 | a = c&0xF0 163 | putpixel(x, y, ((l << 4) | l,a | (a >> 4))) 164 | 165 | elif format == TexFmt.IA8: 166 | for y in range(yoff, yoff+4): 167 | for x in range(xoff, xoff+4): 168 | if dataidx >= len(data): break 169 | c = data[dataidx] 170 | dataidx += 1 171 | if x < width and y < height: 172 | putpixel(x, y, (c&0xFF, c>>8)) 173 | 174 | elif format == TexFmt.RGB565: 175 | for y in range(yoff, yoff+4): 176 | for x in range(xoff, xoff+4): 177 | if dataidx >= len(data): break 178 | c = data[dataidx] 179 | dataidx += 1 180 | if x < width and y < height: 181 | putpixel(x, y, (rgb565toColor(c))) 182 | 183 | elif format == TexFmt.RGB5A3: 184 | for y in range(yoff, yoff+4): 185 | for x in range(xoff, xoff+4): 186 | if dataidx >= len(data): break 187 | c = data[dataidx] 188 | dataidx += 1 189 | if x < width and y < height: 190 | putpixel(x, y, unpackRGB5A3(c)) 191 | 192 | elif format == TexFmt.RGBA8: 193 | for iy in range(4): 194 | for x in range(4): 195 | r = (data[dataidx ] & 0x00FF) 196 | g = (data[dataidx+16] & 0xFF00)>>8 197 | b = (data[dataidx+16] & 0x00FF) 198 | a = (data[dataidx ] & 0xFF00)>>8 199 | putpixel(xoff+x, yoff+iy, (r, g, b, a)) 200 | dataidx += 1 201 | dataidx += 16 202 | 203 | elif format == TexFmt.C4: 204 | for y in range(yoff, yoff+8): 205 | for x in range(xoff, xoff+8, 2): 206 | if dataidx >= len(data): break 207 | c = data[dataidx] 208 | dataidx += 1 209 | if x < width and y < height: 210 | putpixel(x, y, palette[(c & 0xf0) >> 4]) 211 | putpixel(x+1, y, palette[c & 0x0f]) 212 | 213 | elif format == TexFmt.C8: 214 | for y in range(yoff, yoff+4): 215 | for x in range(xoff, xoff+8): 216 | if dataidx >= len(data): break 217 | c = data[dataidx] 218 | dataidx += 1 219 | if x < width and y < height: 220 | putpixel(x, y, palette[c]) 221 | 222 | elif format == TexFmt.C14X2: 223 | for y in range(yoff, yoff+4): 224 | for x in range(xoff, xoff+4): 225 | if dataidx >= len(data): break 226 | c = data[dataidx] 227 | dataidx += 1 228 | if x < width and y < height: 229 | putpixel(x, y, palette[c&0x3FFF]) 230 | 231 | elif format == TexFmt.CMPR: 232 | for y in range(yoff, yoff+8, 4): 233 | for x in range(xoff, xoff+8, 4): 234 | if dataidx >= len(data): break 235 | c = data[dataidx:dataidx+8] 236 | dataidx += 8 237 | color0, color1, pixels = struct.unpack('HHI', bytes(fixS3TC1Block(c))) 238 | colors = [rgb565toColor(color0)+(255,), 239 | rgb565toColor(color1)+(255,)] 240 | if color0 > color1: 241 | colors += [tuple((colors[0][j] * 5 + colors[1][j] * 3) >> 3 for j in range(3))+(255,)] 242 | colors += [tuple((colors[1][j] * 5 + colors[0][j] * 3) >> 3 for j in range(3))+(255,)] 243 | else: 244 | colors += [tuple((colors[0][j] + colors[1][j]) >> 1 for j in range(3))+(255,)] 245 | colors += [tuple((colors[0][j] + colors[1][j]) >> 1 for j in range(3))+(0,)] 246 | for j in range(16): 247 | pixel = colors[(pixels>>(j*2))&3] 248 | putpixel(x+(j&3), y+(j>>2), pixel) 249 | else: 250 | raise ValueError("Unsupported format %d"%format) 251 | return dataidx 252 | 253 | # Just transform the pixel data from blocked to linear, so we can put it in a PC format 254 | def deblock(format, data, width, height): 255 | dest = array(data.typecode, [0]*len(data)) 256 | dataidx = 0 257 | for y in range(0, height, formatBlockHeight[format]): 258 | for x in range(0, width, formatBlockWidth[format]): 259 | if format == TexFmt.CMPR: 260 | for dy in range(0, 8, 4): 261 | for dx in range(0, 8, 4): 262 | if dataidx >= len(data): break 263 | c = data[dataidx:dataidx+8] 264 | dataidx += 8 265 | if y+dy+4 <= height: dest[width*(y + dy)//2 + (x + dx)*2:width*(y + dy)//2 + (x + dx)*2 + 8] = fixS3TC1Block(c) 266 | elif format == TexFmt.RGBA8: 267 | for dy in range(formatBlockHeight[format]): 268 | for dx in range(int(formatBlockWidth[format])): 269 | if dataidx >= len(data): break 270 | idx = int((width*(y + dy) + x) + dx)*2 271 | dest[idx+1] = data[dataidx ] 272 | dest[idx ] = data[dataidx+16] 273 | dataidx += 1 274 | dataidx += 16 275 | else: 276 | for dy in range(formatBlockHeight[format]): 277 | for i in range(int(formatBlockWidth[format])): 278 | if dataidx >= len(data): break 279 | c = data[dataidx] 280 | dataidx += 1 281 | idx = int((width*(y + dy) + x) + i) 282 | dest[idx] = c 283 | return dest 284 | 285 | def calcTextureSize(format, width, height): 286 | blockWidth = formatBlockWidth[format] 287 | blockHeight = formatBlockHeight[format] 288 | if width%blockWidth == 0: fullWidth = width 289 | else: fullWidth = width+blockWidth-(width%blockWidth) 290 | return int(fullWidth*height*formatBytesPerPixel[format]) 291 | 292 | # Read texture data into an array object, byte-swapped and with various data sizes for convenience 293 | def readTextureData(fin, format, width, height, mipmapCount=1, arrayCount=1): 294 | data = array(formatArrayTypes[format]) 295 | # data length = sum from i=0 to mipCount of (w*h/(4^i)) 296 | mipSize = calcTextureSize(format, width, height) 297 | sliceSize = int(mipSize*(4-4**(1-mipmapCount))/3) 298 | start = fin.tell() 299 | try: 300 | data.fromfile(fin, int(arrayCount*sliceSize/data.itemsize)) 301 | except EOFError: 302 | fin.seek(0, 2) 303 | end = fin.tell() 304 | fin.seek(start) 305 | data.fromfile(fin, int((end-start)/data.itemsize)) 306 | if sys.byteorder == 'little': data.byteswap() 307 | return data 308 | 309 | def readPaletteData(fin, paletteFormat, paletteNumEntries): 310 | data = array('H') 311 | data.fromfile(fin, paletteNumEntries) 312 | if sys.byteorder == 'little': data.byteswap() 313 | return data 314 | 315 | def convertPalette(paletteData, paletteFormat): 316 | if paletteData is None: return paletteData 317 | palette = [None]*len(paletteData) 318 | for i, x in enumerate(paletteData): 319 | if paletteFormat == TlutFmt.IA8: 320 | palette[i] = x & 0x00FF, (x & 0xFF00) >> 8 321 | elif paletteFormat == TlutFmt.RGB565: 322 | palette[i] = rgb565toColor(x) 323 | elif paletteFormat == TlutFmt.RGB5A3: 324 | palette[i] = unpackRGB5A3(x) 325 | 326 | ## PIL 327 | 328 | formatImageTypes = { 329 | TexFmt.I4: 'L', 330 | TexFmt.I8: 'L', 331 | TexFmt.IA4: 'LA', 332 | TexFmt.IA8: 'LA', 333 | TexFmt.RGB565: 'RGB', 334 | TexFmt.RGB5A3: 'RGBA', 335 | TexFmt.RGBA8: 'RGBA', 336 | TexFmt.CMPR: 'RGBA' 337 | } 338 | 339 | paletteFormatImageTypes = { 340 | TlutFmt.IA8: 'LA', 341 | TlutFmt.RGB565: 'RGB', 342 | TlutFmt.RGB5A3: 'RGBA' 343 | } 344 | 345 | def decodeTexturePIL(data, format, width, height, paletteFormat=None, paletteData=None, mipmapCount=1, arrayCount=1): 346 | from PIL import Image 347 | 348 | dataIdx = 0 349 | imgs = [[None]*mipmapCount for i in range(arrayCount)] 350 | palette = convertPalette(paletteData, paletteFormat) 351 | for arrayIdx in range(arrayCount): 352 | for mipIdx in range(mipmapCount): 353 | im = Image.new(formatImageTypes[format] if format in formatImageTypes else paletteFormatImageTypes[paletteFormat], 354 | (width>>mipIdx, height>>mipIdx)) 355 | putpixelpil = lambda dx, dy, c: im.putpixel((dx, dy), c) 356 | for y in range(0, im.size[1], formatBlockHeight[format]): 357 | for x in range(0, im.size[0], formatBlockWidth[format]): 358 | dataIdx = decodeBlock(format, data, dataIdx, im.size[0], im.size[1], x, y, putpixelpil, palette) 359 | imgs[arrayIdx][mipIdx] = im 360 | return imgs 361 | 362 | ## BPY 363 | 364 | def decodeTextureBPY(im, data, format, width, height, paletteFormat=None, paletteData=None, mipmapCount=1, arrayCount=1): 365 | # Note: REALLY slow. 366 | # Like, EXTREMELY SLOW. 367 | # Should probably either profile or just export/import 368 | assert arrayCount <= 1 369 | dataIdx = 0 370 | def putpixelbpy(dx, dy, c): 371 | px = (dx+(height-dy-1)*width)*4 372 | if isinstance(c, int): c = (c,) 373 | if len(c) < 3: 374 | im.pixels[px ] = c[0]/255.0 375 | im.pixels[px+1] = c[0]/255.0 376 | im.pixels[px+2] = c[0]/255.0 377 | if len(c) == 2: 378 | im.pixels[px+3] = c[1]/255.0 379 | else: 380 | im.pixels[px ] = c[0]/255.0 381 | im.pixels[px+1] = c[1]/255.0 382 | im.pixels[px+2] = c[2]/255.0 383 | if len(c) == 4: 384 | im.pixels[px+3] = c[3]/255.0 385 | palette = convertPalette(paletteData, paletteFormat) 386 | for y in range(0, height, formatBlockHeight[format]): 387 | for x in range(0, width, formatBlockWidth[format]): 388 | dataIdx = decodeBlock(format, data, dataIdx, width, height, x, y, putpixelbpy, palette) 389 | im.update() 390 | 391 | ## DDS 392 | 393 | class DDSD(Flag): 394 | CAPS = 0x00000001 395 | HEIGHT = 0x00000002 396 | WIDTH = 0x00000004 397 | PITCH = 0x00000008 398 | PIXELFORMAT = 0x00001000 399 | MIPMAPCOUNT = 0x00020000 400 | LINEARSIZE = 0x00080000 401 | 402 | class DDPF(Flag): 403 | ALPHAPIXELS = 0x00000001 404 | FOURCC = 0x00000004 405 | RGB = 0x00000040 406 | LUMINANCE = 0x00020000 407 | 408 | class DDSCAPS(Flag): 409 | COMPLEX = 0x00000008 410 | TEXTURE = 0x00001000 411 | MIPMAP = 0x00400000 412 | 413 | ddsFormats = { 414 | TexFmt.I4: (DDPF.RGB, b'', 24, 0x00FF0000, 0x0000FF00, 0x000000FF, 0x00000000), 415 | TexFmt.I8: (DDPF.RGB, b'', 24, 0x00FF0000, 0x0000FF00, 0x000000FF, 0x00000000), 416 | TexFmt.IA4: (DDPF.ALPHAPIXELS|DDPF.RGB, b'', 32, 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000), 417 | TexFmt.IA8: (DDPF.ALPHAPIXELS|DDPF.RGB, b'', 32, 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000), 418 | # TexFmt.RGB565: (DDPF.RGB, b'', 16, 0x001F, 0x07E0, 0xF800, 0), 419 | TexFmt.RGB565: (DDPF.ALPHAPIXELS|DDPF.RGB, b'', 24, 0x00FF0000, 0x0000FF00, 0x000000FF, 0x00000000), 420 | TexFmt.RGB5A3: (DDPF.ALPHAPIXELS|DDPF.RGB, b'', 32, 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000), 421 | TexFmt.RGBA8: (DDPF.ALPHAPIXELS|DDPF.RGB, b'', 32, 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000), 422 | TexFmt.CMPR: (DDPF.ALPHAPIXELS|DDPF.FOURCC, b'DXT1', 0, 0, 0, 0, 0) 423 | } 424 | ddsPaletteFormats = { 425 | TlutFmt.IA8: (DDPF.ALPHAPIXELS|DDPF.RGB, b'', 32, 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000), 426 | TlutFmt.RGB565: (DDPF.RGB, b'', 16, 0xF800, 0x07E0, 0x001F, 0), 427 | TlutFmt.RGB5A3: (DDPF.ALPHAPIXELS|DDPF.RGB, b'', 32, 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000), 428 | } 429 | 430 | def decodeTextureDDS(fout, data, format, width, height, paletteFormat=None, paletteData=None, mipmapCount=1, arrayCount=1): 431 | fout.write(b'DDS ') 432 | flags = DDSD.CAPS|DDSD.HEIGHT|DDSD.WIDTH|DDSD.PIXELFORMAT 433 | if format == TexFmt.CMPR: 434 | flags |= DDSD.LINEARSIZE 435 | pitchOrLinearSize = len(data) 436 | else: 437 | flags |= DDSD.PITCH 438 | bytesPerPixel = ddsFormats[format][2]//8 439 | pitchOrLinearSize = int(width*bytesPerPixel) 440 | if mipmapCount > 1: 441 | flags |= DDSD.MIPMAPCOUNT 442 | fout.write(struct.pack(' 1: 450 | caps |= DDSCAPS.COMPLEX|DDSCAPS.MIPMAP 451 | elif arrayCount > 1: 452 | caps |= DDSCAPS.COMPLEX 453 | fout.write(struct.pack('>mipIdx, height>>mipIdx 472 | dataOffset = (arrayIdx*sliceSize + int(mipSize*(4-4**(1-mipIdx))/3)) 473 | if format in (TexFmt.I4, TexFmt.I8, TexFmt.IA4, TexFmt.IA8, TexFmt.RGB565, TexFmt.RGB5A3, TexFmt.C4, TexFmt.C8, TexFmt.C14X2): 474 | dest = array('B', (0,)*mipWidth*mipHeight*componentsOut) 475 | if componentsIn == 1: 476 | def putpixelarray(dx, dy, c): 477 | offset = (mipWidth*dy + dx)*componentsOut 478 | dest[offset:offset + componentsOut] = array('B', [c]*componentsOut) 479 | elif componentsIn == 2: 480 | def putpixelarray(dx, dy, c): 481 | offset = (mipWidth*dy + dx)*componentsOut 482 | dest[offset:offset + componentsOut] = array('B', [c[0], c[0], c[0], c[1]]) 483 | elif componentsIn == 3: 484 | def putpixelarray(dx, dy, c): 485 | offset = (mipWidth*dy + dx)*componentsOut 486 | dest[offset:offset + componentsOut] = array('B', [c[2], c[1], c[0]]) 487 | elif componentsIn == 4: 488 | def putpixelarray(dx, dy, c): 489 | offset = (mipWidth*dy + dx)*componentsOut 490 | dest[offset:offset + componentsOut] = array('B', [c[2], c[1], c[0], c[3]]) 491 | for y in range(0, mipHeight, formatBlockHeight[format]): 492 | for x in range(0, mipWidth, formatBlockWidth[format]): 493 | dataOffset = decodeBlock(format, data, dataOffset, mipWidth, mipHeight, x, y, putpixelarray, palette) 494 | dest.tofile(fout) 495 | else: 496 | deblocked = deblock(format, data[dataOffset:dataOffset+(mipSize>>(mipIdx*2))], mipWidth, mipHeight) 497 | if sys.byteorder == 'big': deblocked.byteswap() 498 | deblocked.tofile(fout) 499 | 500 | ## KTX 501 | 502 | class GL: 503 | UNSIGNED_BYTE = 0x1401 504 | RED = 0x1903 505 | RGB = 0x1907 506 | RGBA = 0x1908 507 | RGB8 = 0x8051 508 | RGBA8 = 0x8058 509 | BGRA = 0x80E1 510 | RG = 0x8227 511 | R8 = 0x8229 512 | RG8 = 0x822B 513 | UNSIGNED_SHORT_5_6_5 = 0x8363 514 | COMPRESSED_RGB_S3TC_DXT1_EXT = 0x83F0 515 | RGB565 = 0x8D62 516 | 517 | # glType glFormat glInternalFormat glBaseInternalFormat 518 | glFormats = { 519 | TexFmt.I4: (GL.UNSIGNED_BYTE, GL.RED, GL.R8, GL.RED), 520 | TexFmt.I8: (GL.UNSIGNED_BYTE, GL.RED, GL.R8, GL.RED), 521 | TexFmt.IA4: (GL.UNSIGNED_BYTE, GL.RG, GL.RG8, GL.RG), 522 | TexFmt.IA8: (GL.UNSIGNED_BYTE, GL.RG, GL.RG8, GL.RG), 523 | TexFmt.RGB565: (GL.UNSIGNED_SHORT_5_6_5, GL.RGB, GL.RGB565, GL.RGB), 524 | TexFmt.RGB5A3: (GL.UNSIGNED_BYTE, GL.RGBA, GL.RGBA8, GL.RGBA), 525 | TexFmt.RGBA8: (GL.UNSIGNED_BYTE, GL.BGRA, GL.RGBA8, GL.RGBA), 526 | TexFmt.CMPR: ( 0, 0, GL.COMPRESSED_RGB_S3TC_DXT1_EXT, GL.RGBA) 527 | } 528 | glPaletteFormats = { 529 | TlutFmt.IA8: (GL.UNSIGNED_BYTE, GL.RG, GL.RG8, GL.RG), 530 | TlutFmt.RGB565: (GL.UNSIGNED_SHORT_5_6_5, GL.RGB, GL.RGB565, GL.RGB), 531 | TlutFmt.RGB5A3: (GL.UNSIGNED_BYTE, GL.RGBA, GL.RGBA8, GL.RGBA), 532 | } 533 | 534 | def decodeTextureKTX(fout, data, format, width, height, paletteFormat=None, paletteData=None, mipmapCount=1, arrayCount=0, safety=False): 535 | if format in glFormats: 536 | glType, glFormat, glInternalFormat, glBaseInternalFormat = glFormats[format] 537 | else: 538 | glType, glFormat, glInternalFormat, glBaseInternalFormat = glPaletteFormats[paletteFormat] 539 | 540 | mipSize = calcTextureSize(format, width, height)//data.itemsize 541 | sliceSize = int(mipSize*(4-4**(1-mipmapCount))/3) 542 | palette = convertPalette(paletteData, paletteFormat) 543 | if format in (TexFmt.I4, TexFmt.I8): components = 1 544 | elif format in (TexFmt.IA4, TexFmt.IA8): components = 2 545 | elif format == TexFmt.RGB565: components = 3 546 | elif format in (TexFmt.RGB5A3, TexFmt.RGBA8, TexFmt.CMPR): components = 4 547 | elif format in (TexFmt.C4, TexFmt.C8, TexFmt.C14X2): 548 | if paletteFormat == TlutFmt.IA8: components = 2 549 | elif paletteFormat == TlutFmt.RGB565: components = 3 550 | elif paletteFormat == TlutFmt.RGB5A3: components = 4 551 | 552 | if safety and components <= 2: 553 | glFormat = GL.RGB 554 | glInternalFormat = GL.RGB8 555 | glBaseInternalFormat = GL.RGB 556 | 557 | fout.write(bytes([0xAB, 0x4B, 0x54, 0x58, 0x20, 0x31, 0x31, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A])) 558 | fout.write(struct.pack('IIIIIIIIIIIII', 559 | 0x04030201, 560 | glType, 561 | 1, # glTypeSize 562 | glFormat, 563 | glInternalFormat, 564 | glBaseInternalFormat, 565 | width, 566 | height, 567 | 0, # depth 568 | arrayCount, 569 | 1, # face count 570 | mipmapCount, 571 | 28)) # key-value length 572 | 573 | fout.write(struct.pack('I', 23)) 574 | fout.write(b'KTXorientation\0S=r,T=d\0\0') 575 | 576 | for mipIdx in range(mipmapCount): 577 | for arrayIdx in range(max(1, arrayCount)): 578 | mipWidth, mipHeight = width>>mipIdx, height>>mipIdx 579 | dataOffset = arrayIdx*sliceSize + int(mipSize*(4-4**(1-mipIdx))/3) 580 | if format in (TexFmt.I4, TexFmt.IA4, TexFmt.RGB5A3) or (format in (TexFmt.C4, TexFmt.C8, TexFmt.C14X2) and paletteFormat in (TlutFmt.IA8, TlutFmt.RGB5A3)): 581 | pixelData = array('B', (0,)*mipWidth*mipHeight*components) 582 | if components == 1: 583 | def putpixelarray(dx, dy, c): 584 | pixelData[mipWidth*dy + dx] = c 585 | else: 586 | def putpixelarray(dx, dy, c): 587 | offset = (mipWidth*dy + dx)*components 588 | pixelData[offset:offset + components] = array('B', c) 589 | for y in range(0, mipHeight, formatBlockHeight[format]): 590 | for x in range(0, mipWidth, formatBlockWidth[format]): 591 | dataOffset = decodeBlock(format, data, dataOffset, mipWidth, mipHeight, x, y, putpixelarray, palette) 592 | elif format in (TexFmt.C4, TexFmt.C8, TexFmt.C14X2) and paletteFormat == TlutFmt.RGB565: 593 | deblocked = deblock(format, data[dataOffset:dataOffset+(mipSize>>(mipIdx*2))], mipWidth, mipHeight) 594 | if sys.byteorder == 'big': deblocked.byteswap() 595 | pixelData = array('H', [paletteData[px] for px in deblocked]) 596 | else: 597 | pixelData = deblock(format, data[dataOffset:dataOffset+(mipSize>>(mipIdx*2))], mipWidth, mipHeight) 598 | if sys.byteorder == 'big': pixelData.byteswap() 599 | if safety and components <= 2: 600 | if pixelData.itemsize == 2: 601 | pixelData = array('B', pixelData.tobytes()) 602 | pixels = zip(*(pixelData[i::components] for i in range(components))) 603 | extraChannels = (0,)*(3-components) 604 | pixelData = array('B', [c for origPixel in pixels for c in origPixel+extraChannels]) 605 | fout.write(struct.pack('I', len(pixelData)*pixelData.itemsize)) 606 | pixelData.tofile(fout) 607 | 608 | --------------------------------------------------------------------------------