├── .gitignore ├── README.md ├── schemas ├── trpfs.fbs ├── trpak.fbs └── trpfd.fbs ├── trpak_extract.py ├── trpfs_extract.py └── full_extract.py /.gitignore: -------------------------------------------------------------------------------- 1 | info 2 | output 3 | tools 4 | files 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # This tool is shit, use https://github.com/pkZukan/gftool -------------------------------------------------------------------------------- /schemas/trpfs.fbs: -------------------------------------------------------------------------------- 1 | file_extension "trpfs"; 2 | 3 | // The first 8 bytes contain the magic value 'ONEPACK' 4 | // The next 8 bytes contain the offset to the root table (0x0141E72FC0 in leaked scarlet version) 5 | 6 | table TRPFS { 7 | hashes: [uint64]; // Offset 0x141E92764 8 | file_offsets: [uint64]; 9 | } 10 | 11 | root_type TRPFS; -------------------------------------------------------------------------------- /schemas/trpak.fbs: -------------------------------------------------------------------------------- 1 | file_extension "trpak"; 2 | 3 | enum Compression : uint8 { 4 | OODLE = 3, 5 | NONE = 255, 6 | } 7 | 8 | table File { 9 | unused: uint8; 10 | compression_type: Compression = 255; 11 | unk1: uint8; 12 | decompressed_size: uint64; 13 | data: [uint8]; 14 | } 15 | 16 | table TRPAK { 17 | hashes: [uint64]; 18 | files: [File]; 19 | } 20 | 21 | root_type TRPAK; -------------------------------------------------------------------------------- /schemas/trpfd.fbs: -------------------------------------------------------------------------------- 1 | file_extension "trpfd"; 2 | 3 | table InfoTable { 4 | size: uint64; 5 | count: uint64; 6 | } 7 | 8 | table EmptyTable { 9 | 10 | } 11 | 12 | table MapTable { 13 | index: uint32; 14 | unk2: EmptyTable; 15 | } 16 | 17 | table TRPFD { 18 | file_hashes: [uint64]; // 0x042DC6 elements, starts at 0x8DC6AC 19 | paths: [string]; // 0x003EF0 elements, starts at 0x7BC2F8 20 | map: [MapTable]; // 0x042DC6 elements, starts at 0x06E274 21 | info: [InfoTable]; // 0x003EF0 elements, starts at 0x000028 22 | } 23 | 24 | root_type TRPFD; -------------------------------------------------------------------------------- /trpak_extract.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | import subprocess 4 | import sys 5 | import json 6 | import glob 7 | from ctypes import cdll, c_char_p, create_string_buffer 8 | 9 | schema_dir = "schemas" 10 | info_dir = "info" 11 | tool_dir = "tools" 12 | 13 | def OodleDecompress(raw_bytes, size, output_size): 14 | for filename in glob.glob(os.path.join(tool_dir, "oo2core*.dll")): 15 | handle = cdll.LoadLibrary(filename) 16 | output = create_string_buffer(output_size) 17 | output_bytes = handle.OodleLZ_Decompress(c_char_p(raw_bytes), size, output, output_size, 0, 0, 0, None, None, None, None, None, None, 3) 18 | return output.raw 19 | 20 | def ParseFlatbuffer(filename): 21 | command = tool_dir + "\\flatc.exe --raw-binary -o info --strict-json --defaults-json -t schemas\\trpak.fbs -- " + filename 22 | subprocess.call(command) 23 | 24 | def WriteFiles(): 25 | global filename 26 | json_path = info_dir + "\\" + Path(filename).stem + ".json" 27 | with open(json_path, mode="r") as parsed_file: 28 | data = json.load(parsed_file) 29 | for i in range(len(data["files"])): 30 | out_file = open(os.path.dirname(filename) + "\\" + hex(data["hashes"][i]), mode="wb") 31 | compressed_data = [] 32 | data_size = 0 33 | for j in data["files"][i]["data"]: 34 | data_size += 1 35 | compressed_data.append(j) 36 | if data["files"][i]["compression_type"] == "OODLE": 37 | decompressed_data = OodleDecompress(bytes(compressed_data), data_size, data["files"][i]["decompressed_size"]) 38 | elif data["files"][i]["compression_type"] == "NONE": 39 | decompressed_data = bytes(compressed_data) 40 | out_file.write(decompressed_data) 41 | out_file.close() 42 | 43 | filename = sys.argv[1] 44 | ParseFlatbuffer(filename) 45 | WriteFiles() 46 | -------------------------------------------------------------------------------- /trpfs_extract.py: -------------------------------------------------------------------------------- 1 | import struct 2 | import os 3 | import subprocess 4 | import json 5 | 6 | fs_magic = "ONEPACK" 7 | file_dir = "files" 8 | schema_dir = "schemas" 9 | info_dir = "info" 10 | tool_dir = "tools" 11 | output_dir = "output" 12 | init_offset = 0 13 | name_dict = {} 14 | hash_dict = {} 15 | 16 | def FNV1a64(input_str): 17 | if input_str in hash_dict: 18 | return hash_dict[input_str] 19 | fnv_prime = 1099511628211 20 | offset_basis = 14695981039346656837 21 | for i in input_str: 22 | offset_basis ^= ord(i) 23 | offset_basis = (offset_basis * fnv_prime) % (2**64) 24 | hash_dict[input_str] = offset_basis 25 | return offset_basis 26 | 27 | def ExtractFS(): 28 | print("Extracting data from trpfs file...") 29 | with open(file_dir + "\data.trpfs", mode="rb") as fs, open(file_dir + "\\fs_data_separated.trpfs", mode="wb") as fs_sep: 30 | magic = fs.read(8).decode("utf-8") [:-1] 31 | assert (magic == fs_magic), "Invalid trpfs magic!" 32 | global init_offset 33 | init_offset = struct.unpack('Q', fs.read(8))[0] 34 | fs.seek(0, os.SEEK_END) 35 | eof_offset = fs.tell() 36 | fs.seek(init_offset) 37 | fs_sep.write(fs.read(eof_offset - init_offset)) 38 | 39 | command = tool_dir + "\\flatc.exe --raw-binary -o info --strict-json --defaults-json -t schemas\\trpfs.fbs -- files\\fs_data_separated.trpfs" 40 | subprocess.call(command) 41 | 42 | def ExtractFD(): 43 | print("Extracting data from trpfd file...") 44 | command = tool_dir + "\\flatc.exe --raw-binary -o info --strict-json --defaults-json -t schemas\\trpfd.fbs -- files\\data.trpfd" 45 | subprocess.call(command) 46 | 47 | with open(info_dir + "\\names_original.txt", mode="r") as onames_file, open(info_dir + "\\names_changed.txt", mode="r") as cnames_file: 48 | onames = onames_file.read().splitlines() 49 | cnames = cnames_file.read().splitlines() 50 | for i in range(len(onames)): 51 | name_dict[onames[i]] = cnames[i] 52 | 53 | def WriteFiles(): 54 | print("Extracting files...") 55 | with open(info_dir + "\data.json", mode="r") as fd_info, open(info_dir + "\\fs_data_separated.json", mode="r") as fs_info, open(file_dir + "\data.trpfs", mode="rb") as data: 56 | fd = json.load(fd_info) 57 | fs = json.load(fs_info) 58 | num_files = len(fs["file_offsets"]) 59 | global init_offset 60 | fs["file_offsets"].append(init_offset) 61 | 62 | for i in range(num_files): 63 | offset = fs["file_offsets"][i] 64 | end_offset = fs["file_offsets"][i + 1] 65 | name_hash = fs["hashes"][i] 66 | 67 | path = "ERROR_NO_MATCHING_FILENAME" 68 | for j in fd["paths"]: 69 | if name_hash == FNV1a64(j): 70 | if j in name_dict: 71 | path = output_dir + "/" + name_dict[j] 72 | else: 73 | path = output_dir + "/" + j 74 | break 75 | print(path) 76 | os.makedirs(os.path.dirname(path), exist_ok=True) 77 | 78 | data.seek(offset) 79 | out_file = open(path, mode="wb+") 80 | out_file.write(data.read(end_offset - offset)) 81 | out_file.close() 82 | print("\nExtraction complete!") 83 | 84 | ExtractFS() 85 | ExtractFD() 86 | WriteFiles() 87 | #print(hex(FNV1a64("pm0081_00_00_20146_stepout01.traef"))) 88 | -------------------------------------------------------------------------------- /full_extract.py: -------------------------------------------------------------------------------- 1 | import struct 2 | import os 3 | import subprocess 4 | import json 5 | from pathlib import Path 6 | from ctypes import cdll, c_char_p, create_string_buffer 7 | import glob 8 | 9 | fs_magic = "ONEPACK" 10 | file_dir = "files" 11 | schema_dir = "schemas" 12 | info_dir = "info" 13 | tool_dir = "tools" 14 | output_dir = "output" 15 | init_offset = 0 16 | name_dict = {} 17 | hash_dict = {} 18 | 19 | def FNV1a64(input_str): 20 | if input_str in hash_dict: 21 | return hash_dict[input_str] 22 | fnv_prime = 1099511628211 23 | offset_basis = 14695981039346656837 24 | for i in input_str: 25 | offset_basis ^= ord(i) 26 | offset_basis = (offset_basis * fnv_prime) % (2**64) 27 | hash_dict[input_str] = offset_basis 28 | return offset_basis 29 | 30 | def ExtractFS(): 31 | print("Extracting data from trpfs file...") 32 | with open(file_dir + "\data.trpfs", mode="rb") as fs, open(file_dir + "\\fs_data_separated.trpfs", mode="wb") as fs_sep: 33 | magic = fs.read(8).decode("utf-8") [:-1] 34 | assert (magic == fs_magic), "Invalid trpfs magic!" 35 | global init_offset 36 | init_offset = struct.unpack('Q', fs.read(8))[0] 37 | fs.seek(0, os.SEEK_END) 38 | eof_offset = fs.tell() 39 | fs.seek(init_offset) 40 | fs_sep.write(fs.read(eof_offset - init_offset)) 41 | 42 | command = tool_dir + "\\flatc.exe --raw-binary -o info --strict-json --defaults-json -t schemas\\trpfs.fbs -- files\\fs_data_separated.trpfs" 43 | subprocess.call(command) 44 | 45 | def ExtractFD(): 46 | print("Extracting data from trpfd file...") 47 | command = tool_dir + "\\flatc.exe --raw-binary -o info --strict-json --defaults-json -t schemas\\trpfd.fbs -- files\\data.trpfd" 48 | subprocess.call(command) 49 | 50 | with open(info_dir + "\\names_original.txt", mode="r") as onames_file, open(info_dir + "\\names_changed.txt", mode="r") as cnames_file: 51 | onames = onames_file.read().splitlines() 52 | cnames = cnames_file.read().splitlines() 53 | for i in range(len(onames)): 54 | name_dict[onames[i]] = cnames[i] 55 | 56 | def WriteFiles(): 57 | print("Extracting files...") 58 | with open(info_dir + "\data.json", mode="r") as fd_info, open(info_dir + "\\fs_data_separated.json", mode="r") as fs_info, open(file_dir + "\data.trpfs", mode="rb") as data: 59 | fd = json.load(fd_info) 60 | fs = json.load(fs_info) 61 | num_files = len(fs["file_offsets"]) 62 | global init_offset 63 | fs["file_offsets"].append(init_offset) 64 | 65 | for i in range(num_files): 66 | offset = fs["file_offsets"][i] 67 | end_offset = fs["file_offsets"][i + 1] 68 | name_hash = fs["hashes"][i] 69 | 70 | path = "ERROR_NO_MATCHING_FILENAME" 71 | for j in fd["paths"]: 72 | if name_hash == FNV1a64(j): 73 | if j in name_dict: 74 | path = output_dir + "/" + name_dict[j] 75 | else: 76 | path = output_dir + "/" + j 77 | break 78 | print(path) 79 | os.makedirs(os.path.dirname(path), exist_ok=True) 80 | 81 | data.seek(offset) 82 | out_file = open(path, mode="wb+") 83 | out_file.write(data.read(end_offset - offset)) 84 | out_file.close() 85 | print("\nExtraction complete!") 86 | 87 | def OodleDecompress(raw_bytes, size, output_size): 88 | for filename in glob.glob(os.path.join(tool_dir, "oo2core*.dll")): 89 | handle = cdll.LoadLibrary(filename) 90 | output = create_string_buffer(output_size) 91 | output_bytes = handle.OodleLZ_Decompress(c_char_p(raw_bytes), size, output, output_size, 0, 0, 0, None, None, None, None, None, None, 3) 92 | return output.raw 93 | 94 | def ParseFlatbuffer(foldername): 95 | for filename in glob.glob(os.path.join(foldername, "**/*.trpak"), recursive=True): 96 | print("Parsing " + filename) 97 | command = tool_dir + "\\flatc.exe --raw-binary -o info --strict-json --defaults-json -t schemas\\trpak.fbs -- " + filename 98 | subprocess.call(command) 99 | 100 | folderName = os.path.dirname(filename) + "\\" + os.path.basename(filename.replace(".trpak", "")) 101 | 102 | if not os.path.exists(folderName): 103 | os.mkdir(folderName) 104 | 105 | json_path = info_dir + "\\" + Path(filename).stem + ".json" 106 | with open(json_path, mode="r") as parsed_file: 107 | data = json.load(parsed_file) 108 | for i in range(len(data["files"])): 109 | hashValue = data["hashes"][i] 110 | out_file = open(folderName + "\\" + hex(hashValue), mode="wb") 111 | compressed_data = [] 112 | data_size = 0 113 | for j in data["files"][i]["data"]: 114 | data_size += 1 115 | compressed_data.append(j) 116 | if data["files"][i]["compression_type"] == "OODLE": 117 | decompressed_data = OodleDecompress(bytes(compressed_data), data_size, data["files"][i]["decompressed_size"]) 118 | elif data["files"][i]["compression_type"] == "NONE": 119 | decompressed_data = bytes(compressed_data) 120 | out_file.write(decompressed_data) 121 | out_file.close() 122 | os.remove(filename) 123 | 124 | if __name__ == "__main__": 125 | 126 | if not os.path.exists(output_dir): 127 | ExtractFS() 128 | ExtractFD() 129 | WriteFiles() 130 | ParseFlatbuffer(output_dir) 131 | --------------------------------------------------------------------------------