├── oo2core_8_win64.dll ├── datasheets ├── extract-and-convert.mjs ├── extract-worker.js ├── datasheet-extractor.mjs └── datasheet-converter.mjs ├── package.json ├── README.md ├── models ├── 3d │ ├── converter-worker.js │ └── 3d-converter.mjs ├── extract-and-assemble.mjs └── pak-extractors │ └── models-and-materials │ ├── extract-worker.js │ └── get-models-materials-textures.mjs ├── nw-model-miner.mjs ├── viewer └── index.html └── pak-headers └── pak-header-dumper.mjs /oo2core_8_win64.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kattoor/nw-model-miner/HEAD/oo2core_8_win64.dll -------------------------------------------------------------------------------- /datasheets/extract-and-convert.mjs: -------------------------------------------------------------------------------- 1 | import {extractDatasheets} from './datasheet-extractor.mjs'; 2 | import {convertDatasheets} from './datasheet-converter.mjs'; 3 | 4 | export async function extract(pakFilePaths, outPath) { 5 | await extractDatasheets(pakFilePaths, outPath); 6 | } 7 | 8 | export async function convert(outPath) { 9 | await convertDatasheets(outPath); 10 | } 11 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nw-model-miner", 3 | "version": "1.0.0", 4 | "description": "", 5 | "scripts": { 6 | "test": "echo \"Error: no test specified\" && exit 1" 7 | }, 8 | "keywords": [], 9 | "author": "Jasper Catthoor", 10 | "license": "ISC", 11 | "dependencies": { 12 | "ffi-napi": "^4.0.3", 13 | "globby": "^12.0.1", 14 | "sharp": "^0.29.0", 15 | "workerpool": "^6.1.5", 16 | "yauzl": "https://github.com/Kattoor/yauzl.git" 17 | }, 18 | "type": "module", 19 | "exports": "./nw-model-miner.mjs", 20 | "engines": { 21 | "node": "^12.20.0 || ^14.13.1 || >=16.0.0" 22 | }, 23 | "devDependencies": { 24 | "esmify": "^2.1.1" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ⛏️ nw-model-miner ⛏️ 2 | 3 | 1. Download [cgf-converter.exe](https://github.com/Markemp/Cryengine-Converter/releases) and add the path of cgf-converter.exe to your PATH environment variable. 4 | 2. Download [texconv.exe](https://github.com/microsoft/DirectXTex/releases) and add the path of texconv.exe to your PATH environment variable. 5 | 3. Download [COLLADA2GLTF](https://github.com/KhronosGroup/COLLADA2GLTF/releases) and add the path of COLLADA2GLTF-bin.exe to your PATH environment variable. 6 | 4. `npm install` - you'll need to have a [Python installation](https://www.python.org/downloads/) since I use [ffi-napi](https://www.npmjs.com/package/ffi-napi) (which uses [node-gyp](https://github.com/nodejs/node-gyp)) to automatically create bindings to the Oodle compression DLL. 7 | 5. `node nw-model-miner.mjs "PATH_TO_ASSETS"` 8 | 6. Browse [The Useless Web](https://theuselessweb.com/) for about 20 minutes until the application finishes. 9 | 7. Open nw-model-miner/viewer/index.html - drag a GLTF file onto the page to load it. 10 | 11 | https://user-images.githubusercontent.com/8040542/131267284-9e853211-6fcb-4e26-a70f-32786e39939b.mp4 12 | -------------------------------------------------------------------------------- /models/3d/converter-worker.js: -------------------------------------------------------------------------------- 1 | import {execSync} from 'child_process'; 2 | import workerpool from 'workerpool'; 3 | import {promises as fs} from 'fs'; 4 | 5 | async function runColladaConverter(modelPath) { 6 | const fileName = modelPath.slice(modelPath.lastIndexOf('/') + 1); 7 | const directory = modelPath.slice(0, modelPath.lastIndexOf('/')); 8 | 9 | try { 10 | execSync('cgf-converter.exe "' + fileName + '"', { 11 | env: process.env, 12 | cwd: directory 13 | }); 14 | await fs.unlink(modelPath); 15 | } catch (e) { 16 | console.log('ColladaConverter: error for ' + modelPath); 17 | } 18 | return directory + '/' + fileName.slice(0, fileName.lastIndexOf('.')) + '.dae'; 19 | } 20 | 21 | async function runGltfConverter(colladaFilePath) { 22 | const gltfFilePath = colladaFilePath.slice(0, colladaFilePath.lastIndexOf('.')) + '.gltf'; 23 | 24 | try { 25 | execSync('COLLADA2GLTF-bin.exe "' + colladaFilePath + '" --doubleSided -o "' + gltfFilePath + '"'); 26 | await fs.unlink(colladaFilePath); 27 | } catch (e) { 28 | console.log('GltfConverter: error for ' + colladaFilePath); 29 | // probably a missing isEmpty === true); 55 | if (subDirsAreEmpty) { 56 | await fs.rmdir(pathPrefix + '/' + path.name); 57 | return true; 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /datasheets/datasheet-extractor.mjs: -------------------------------------------------------------------------------- 1 | import {open} from 'yauzl'; 2 | import workerpool from 'workerpool'; 3 | import {fileURLToPath} from 'url'; 4 | import {dirname} from 'path'; 5 | 6 | const __dirname = dirname(fileURLToPath(import.meta.url)); 7 | 8 | const pool = workerpool.pool(__dirname + '/extract-worker.js', {workerType: 'process'}); 9 | 10 | let resultCount = 0; 11 | 12 | export async function extractDatasheets(pakFilePaths, outPath) { 13 | const start = Date.now(); 14 | 15 | process.stdout.write('Extracting datasheets..\r'); 16 | 17 | const recordsPromises = pakFilePaths.map(extractRelevantRecords); 18 | const records = [].concat(...(await Promise.all(recordsPromises))); 19 | const groupedByPakFile = 20 | Object.entries( 21 | records.reduce((acc, entry) => { 22 | if (acc[entry.pakFile] == null) { 23 | acc[entry.pakFile] = []; 24 | } 25 | acc[entry.pakFile].push(entry); 26 | return acc; 27 | }, {})); 28 | 29 | return new Promise(resolve => { 30 | for (let [pakFilePath, fileEntries] of groupedByPakFile) { 31 | const serializedParameters = JSON.stringify({pakFilePath, fileEntries, outPath}); 32 | 33 | pool.exec('extractFromPak', [serializedParameters]) 34 | .then(async () => { 35 | resultCount += 1; 36 | if (resultCount === groupedByPakFile.length) { 37 | console.log('Extracting datasheets.. finished in ' + (Date.now() - start) + 'ms'); 38 | await pool.terminate(); 39 | resolve(); 40 | } 41 | }); 42 | } 43 | }); 44 | } 45 | 46 | async function extractRelevantRecords(filePath) { 47 | return new Promise(resolve => { 48 | const entries = []; 49 | 50 | open(filePath, {lazyEntries: true}, (err, zipFile) => { 51 | zipFile.readEntry(); 52 | 53 | zipFile.on('entry', entry => { 54 | if (/\.(datasheet|xml)$/gm.test(entry.fileName)) { 55 | entries.push({ 56 | pakFile: filePath, 57 | offset: entry.relativeOffsetOfLocalHeader, 58 | fileName: entry.fileName, 59 | compressedSize: entry.compressedSize, 60 | uncompressedSize: entry.uncompressedSize 61 | }); 62 | } 63 | zipFile.readEntry(); 64 | }); 65 | 66 | zipFile.once('end', () => resolve(entries)); 67 | }); 68 | }); 69 | } 70 | -------------------------------------------------------------------------------- /viewer/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | NW Model Viewer 6 | 7 | 8 | 18 | 19 | 20 | 107 | 108 | 109 | -------------------------------------------------------------------------------- /datasheets/datasheet-converter.mjs: -------------------------------------------------------------------------------- 1 | import {promises as fs} from 'fs'; 2 | import {globby} from 'globby'; 3 | 4 | const amountOfColumnsOffset = 0x44; 5 | const amountOfRowsOffset = 0x48; 6 | const headersOffset = 0x5c; 7 | const amountOfBytesInHeader = 12; 8 | const amountOfBytesInCell = 8; 9 | 10 | export async function convertDatasheets(path) { 11 | const start = Date.now(); 12 | 13 | const filePaths = await globby(path + '**/*.datasheet'); 14 | 15 | process.stdout.write('Converting datasheets..\r'); 16 | 17 | for (let filePath of filePaths) { 18 | const data = await fs.readFile(filePath); 19 | 20 | const amountOfColumns = data.readInt32LE(amountOfColumnsOffset); 21 | const amountOfRows = data.readInt32LE(amountOfRowsOffset); 22 | 23 | const cellsOffset = headersOffset + amountOfColumns * amountOfBytesInHeader; 24 | const amountOfBytesInRow = amountOfBytesInCell * amountOfColumns; 25 | const stringsOffset = cellsOffset + amountOfRows * amountOfColumns * amountOfBytesInCell; 26 | 27 | const headers = []; 28 | for (let i = 0; i < amountOfColumns; i++) { 29 | const headerOffset = headersOffset + i * amountOfBytesInHeader; 30 | const stringValue = readStringValue(data, headerOffset); 31 | const type = data.readInt32LE(headerOffset + 8); 32 | headers.push({stringValue, type}); 33 | } 34 | 35 | let sb = headers.map(h => readCString(data, stringsOffset, h.stringValue.stringOffset)).join(';') + '\n'; 36 | 37 | const rows = []; 38 | for (let i = 0; i < amountOfRows; i++) { 39 | const cells = []; 40 | for (let j = 0; j < amountOfColumns; j++) { 41 | const cellOffset = cellsOffset + i * amountOfBytesInRow + j * amountOfBytesInCell; 42 | const cellValue = readCell(data, cellOffset); 43 | const columnType = headers[j].type; 44 | cells.push(parseCellValueToType(data, stringsOffset, cellValue, columnType)); 45 | } 46 | rows.push(cells); 47 | } 48 | 49 | sb += rows.map(cells => cells.join(';')).join('\n'); 50 | 51 | await saveFile(filePath.slice(0, -9) + 'csv', sb); 52 | await fs.unlink(filePath); 53 | } 54 | 55 | console.log('Converting datasheets.. finished in ' + (Date.now() - start) + 'ms'); 56 | } 57 | 58 | function readCString(data, stringsOffset, value) { 59 | const offset = stringsOffset + value.readInt32LE(0); 60 | let lengthUntilNullTermination = 0; 61 | let nextByte; 62 | do { 63 | nextByte = data.readInt8(offset + lengthUntilNullTermination++); 64 | } while (nextByte !== 0) 65 | return data.slice(offset, offset + lengthUntilNullTermination - 1).toString(); 66 | } 67 | 68 | function parseCellValueToType(data, stringsOffset, cellValue, type) { 69 | switch (type) { 70 | case 1: 71 | return readCString(data, stringsOffset, cellValue); 72 | case 2: 73 | return cellValue.readFloatLE(0); 74 | case 3: 75 | return !!cellValue.readInt32LE(0); 76 | } 77 | } 78 | 79 | function readCell(data, offset) { 80 | const stringOffset = data.readInt32LE(offset); 81 | const cellValue = data.slice(offset + 4, offset + 8); 82 | return cellValue; 83 | } 84 | 85 | function readStringValue(data, offset) { 86 | const hash = data.slice(offset, offset + 4); 87 | const stringOffset = data.slice(offset + 4, offset + 8); 88 | return {hash, stringOffset}; 89 | } 90 | 91 | async function saveFile(path, out) { 92 | const directory = path.slice(0, path.lastIndexOf('/')); 93 | await fs.mkdir(directory, {recursive: true}); 94 | await fs.writeFile(path, out); 95 | } 96 | -------------------------------------------------------------------------------- /models/3d/3d-converter.mjs: -------------------------------------------------------------------------------- 1 | import workerpool from 'workerpool'; 2 | import {dirname} from 'path'; 3 | import {fileURLToPath} from 'url'; 4 | import {promises as fs} from 'fs'; 5 | 6 | const __dirname = dirname(fileURLToPath(import.meta.url)); 7 | const pool = workerpool.pool(__dirname + '/converter-worker.js', {workerType: 'process'}); 8 | 9 | export async function convertModels(records, outPath) { 10 | const colladaFilePaths = await convertToColladaFiles(records, outPath + 'gltf/'); 11 | await fixColladaFiles(colladaFilePaths); 12 | await convertToGltfFiles(colladaFilePaths, outPath + 'gltf/'); 13 | await pool.terminate(); 14 | } 15 | 16 | async function convertToColladaFiles(records, outPath) { 17 | const toExtract = []; 18 | 19 | for (let record of records) { 20 | const itemId = record.itemId; 21 | 22 | const skin1Model = record.skin1.model.toLocaleLowerCase(); 23 | const skin2Model = record.skin2.model.toLocaleLowerCase(); 24 | 25 | if (skin1Model !== '') { 26 | const model1Name = skin1Model.slice(skin1Model.lastIndexOf('/') + 1); 27 | toExtract.push(outPath + itemId + '/' + model1Name); 28 | } 29 | 30 | if (skin2Model !== '') { 31 | const model2Name = skin2Model.slice(skin2Model.lastIndexOf('/') + 1); 32 | toExtract.push(outPath + itemId + '/' + model2Name); 33 | } 34 | } 35 | 36 | const start = Date.now(); 37 | let finishedTasks = 0; 38 | const colladaFilePaths = []; 39 | 40 | return new Promise(resolve => { 41 | for (let modelPath of toExtract) { 42 | pool.exec('runColladaConverter', [modelPath]) 43 | .always(createdColladaFilePath => { 44 | finishedTasks += 1; 45 | process.stdout.write('Converting to Collada files.. ' + Math.round(finishedTasks * 100 / toExtract.length) + '%\r'); 46 | colladaFilePaths.push(createdColladaFilePath); 47 | if (finishedTasks === toExtract.length) { 48 | console.log('Converting to Collada files.. finished in ' + (Date.now() - start) + 'ms'); 49 | resolve(colladaFilePaths); 50 | } 51 | }); 52 | } 53 | }); 54 | } 55 | 56 | async function fixColladaFiles(colladaFilePaths) { 57 | const start = Date.now(); 58 | 59 | for (let i = 0; i < colladaFilePaths.length; i++) { 60 | process.stdout.write('Fixing Collada files.. ' + Math.round(i * 100 / colladaFilePaths.length) + '%\r'); 61 | 62 | const colladaFilePath = colladaFilePaths[i]; 63 | 64 | const content = await fs.readFile(colladaFilePath, 'utf-8'); 65 | 66 | const lines = content.split('\n'); 67 | const withoutNormals = lines.filter(line => !line.trim().startsWith('.*?[\/\\]?(.*)\.(png|dds|tif)<\/init_from>/gm, 'textures/$1.png'); 70 | 71 | await fs.writeFile(colladaFilePath, fixed); 72 | } 73 | 74 | console.log('Fixing Collada files.. finished in ' + (Date.now() - start) + 'ms'); 75 | } 76 | 77 | export async function convertToGltfFiles(colladaFilePaths) { 78 | const start = Date.now(); 79 | let finishedTasks = 0; 80 | 81 | return new Promise(resolve => { 82 | for (let colladaFilePath of colladaFilePaths) { 83 | pool.exec('runGltfConverter', [colladaFilePath]) 84 | .always(() => { 85 | finishedTasks += 1; 86 | process.stdout.write('Converting to Gltf files.. ' + Math.round(finishedTasks * 100 / colladaFilePaths.length) + '%\r'); 87 | if (finishedTasks === colladaFilePaths.length) { 88 | console.log('Converting to Gltf files.. finished in ' + (Date.now() - start) + 'ms'); 89 | resolve(colladaFilePaths); 90 | } 91 | }); 92 | } 93 | }); 94 | } 95 | -------------------------------------------------------------------------------- /pak-headers/pak-header-dumper.mjs: -------------------------------------------------------------------------------- 1 | import {open} from 'yauzl'; 2 | import {promises as fs, existsSync} from 'fs'; 3 | 4 | export async function dumpPakHeaders(pakFilePaths, outPath) { 5 | const start = Date.now(); 6 | 7 | process.stdout.write('Dumping headers..\r'); 8 | 9 | let textures = []; 10 | let models = []; 11 | let materials = []; 12 | 13 | for (let pakFilePath of pakFilePaths) { 14 | const entries = await getHeaders(pakFilePath); 15 | textures = textures.concat(entries.textures); 16 | models = models.concat(entries.models); 17 | materials = materials.concat(entries.materials); 18 | } 19 | 20 | if (!existsSync(outPath + '/header-entries')) { 21 | await fs.mkdir(outPath + '/header-entries'); 22 | } 23 | 24 | await fs.writeFile(outPath + '/header-entries/textures.json', JSON.stringify(toMapAndRemoveRedundantTextures(textures))); 25 | await fs.writeFile(outPath + '/header-entries/models.json', JSON.stringify(toMap(models))); 26 | await fs.writeFile(outPath + '/header-entries/materials.json', JSON.stringify(toMap(materials))); 27 | 28 | console.log('Dumping headers.. finished in ' + (Date.now() - start) + 'ms'); 29 | } 30 | 31 | function getHeaders(pakFilePath) { 32 | return new Promise(resolve => { 33 | const textures = []; 34 | const models = []; 35 | const materials = []; 36 | 37 | open(pakFilePath, {lazyEntries: true}, (err, zipFile) => { 38 | zipFile.readEntry(); 39 | 40 | zipFile.on('entry', entry => { 41 | let collectionToPushTo = null; 42 | 43 | if (/(\.dds(.[0-9])?|\.tif(.[0-9])?)$/gm.test(entry.fileName)) { 44 | collectionToPushTo = textures; 45 | } else if (/(\.cgf|(? resolve({textures, models, materials})); 65 | }); 66 | }); 67 | } 68 | 69 | function toMapAndRemoveRedundantTextures(textureHeaders) { 70 | const groupedByFileName = 71 | textureHeaders 72 | .reduce((acc, textureHeader) => { 73 | const isFirst = textureHeader.fileName.endsWith('.dds') || textureHeader.fileName.endsWith('.tif'); 74 | const fileNameWithoutExtension = 75 | textureHeader.fileName.slice(0, 76 | isFirst 77 | ? -4 78 | : (textureHeader.fileName.lastIndexOf('.') - 4)); 79 | 80 | if (acc[fileNameWithoutExtension] == null) { 81 | acc[fileNameWithoutExtension] = [textureHeader]; 82 | } else { 83 | acc[fileNameWithoutExtension].push(textureHeader); 84 | } 85 | 86 | return acc; 87 | }, {}); 88 | 89 | for (let textureHeadersGroup of Object.values(groupedByFileName)) { 90 | textureHeadersGroup.sort((textureHeader1, textureHeader2) => textureHeader1.fileName.localeCompare(textureHeader2.fileName)); 91 | if (textureHeadersGroup.length > 2) { 92 | textureHeadersGroup.splice(1, textureHeadersGroup.length - 2); 93 | } 94 | } 95 | 96 | return groupedByFileName; 97 | } 98 | 99 | function toMap(recordHeaders) { 100 | return recordHeaders 101 | .reduce((acc, recordHeader) => { 102 | const fileName = recordHeader.fileName; 103 | delete recordHeader.fileName; 104 | acc[fileName] = recordHeader; 105 | return acc; 106 | }, {}); 107 | } 108 | -------------------------------------------------------------------------------- /models/extract-and-assemble.mjs: -------------------------------------------------------------------------------- 1 | import {globby} from 'globby'; 2 | import {promises as fs} from 'fs'; 3 | 4 | import {convertModels} from './3d/3d-converter.mjs'; 5 | import {extractModelsMaterialsTextures} from './pak-extractors/models-and-materials/get-models-materials-textures.mjs'; 6 | 7 | export async function extractAndAssemble(path) { 8 | const filePaths = await globby(path + '**/*.csv'); 9 | 10 | /* Read item definitions */ 11 | const itemDefinitions = []; 12 | for (let filePath of filePaths) { 13 | if (/javelindata_itemdefinitions_master_(named|common|quest|crafting|faction|store)\.csv$/.test(filePath)) { 14 | const data = await fs.readFile(filePath, 'utf-8'); 15 | const lines = data.split('\n'); 16 | 17 | const itemIdIndex = 0; 18 | const nameIndex = 1; 19 | const itemTypeIndex = 2; 20 | const armorAppearanceMIndex = 41; 21 | const armorAppearanceFIndex = 42; 22 | 23 | for (let line of lines) { 24 | const cells = line.split(';'); 25 | if (cells[itemTypeIndex] === 'Armor' && (cells[armorAppearanceMIndex] !== '' || cells[armorAppearanceFIndex] !== '')) { 26 | itemDefinitions.push({ 27 | itemId: cells[itemIdIndex], 28 | name: cells[nameIndex], 29 | armorAppearanceF: cells[armorAppearanceFIndex], 30 | armorAppearanceM: cells[armorAppearanceMIndex] 31 | }); 32 | } 33 | } 34 | } 35 | } 36 | 37 | /* Read item appearance definitions */ 38 | const itemAppearanceDefinitions = []; 39 | for (let filePath of filePaths) { 40 | if (/javelindata_itemappearancedefinitions.csv$/.test(filePath)) { 41 | const data = await fs.readFile(filePath, 'utf-8'); 42 | const lines = data.split('\n'); 43 | 44 | const itemIdIndex = 0; 45 | const skin1Index = 27; 46 | const material1Index = 28; 47 | const isSkin1Index = 29; 48 | const mask1Index = 30; 49 | const skin2Index = 31; 50 | const material2Index = 32; 51 | const isSkin2Index = 33; 52 | const mask2Index = 34; 53 | 54 | for (let line of lines) { 55 | const cells = line.split(';'); 56 | itemAppearanceDefinitions.push({ 57 | itemId: cells[itemIdIndex], 58 | skin1: cells[skin1Index], 59 | material1: cells[material1Index], 60 | isSkin1: cells[isSkin1Index], 61 | mask1: cells[mask1Index], 62 | skin2: cells[skin2Index], 63 | material2: cells[material2Index], 64 | isSkin2: cells[isSkin2Index], 65 | mask2: cells[mask2Index] 66 | }); 67 | } 68 | } 69 | } 70 | 71 | /* Couple definitions */ 72 | const records = []; 73 | for (let itemDefinition of itemDefinitions) { 74 | const itemAppearanceDefinition = itemAppearanceDefinitions.find(appearanceDefinition => itemDefinition.armorAppearanceM === appearanceDefinition.itemId); 75 | records.push({ 76 | itemId: itemDefinition.itemId, 77 | name: itemDefinition.name, 78 | armorAppearanceM: itemDefinition.armorAppearanceM, 79 | armorAppearanceF: itemDefinition.armorAppearanceF, 80 | skin1: { 81 | model: itemAppearanceDefinition.skin1, 82 | material: itemAppearanceDefinition.material1, 83 | isSkin: itemAppearanceDefinition.isSkin1, 84 | mask: itemAppearanceDefinition.mask1 85 | }, 86 | skin2: { 87 | model: itemAppearanceDefinition.skin2, 88 | material: itemAppearanceDefinition.material2, 89 | isSkin: itemAppearanceDefinition.isSkin2, 90 | mask: itemAppearanceDefinition.mask2 91 | } 92 | }); 93 | } 94 | 95 | await extractModelsMaterialsTextures(records, path); 96 | 97 | await convertModels(records, path); 98 | 99 | const filesToDelete = await globby(path + '**/!(*.gltf)', {expandDirectories: true}); 100 | for (let fileToDelete of filesToDelete) { 101 | await fs.unlink(fileToDelete); 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /models/pak-extractors/models-and-materials/extract-worker.js: -------------------------------------------------------------------------------- 1 | import {promises as fs} from 'fs'; 2 | import workerpool from 'workerpool'; 3 | import ffi from 'ffi-napi'; 4 | import sharp from 'sharp'; 5 | import {execSync} from 'child_process'; 6 | 7 | const lib = ffi.Library('oo2core_8_win64.dll', { 8 | 'OodleLZ_Decompress': ['void', ['char *', 'int', 'char *', 'int', 'int', 'int', 'int', 'void *', 'void *', 'void *', 'void *', 'void *', 'void *', 'int']] 9 | }); 10 | 11 | async function extractModelsAndMaterials(recordPath, skin1ModelHeader, skin1MaterialHeader, skin1FileName, skin2ModelHeader, skin2MaterialHeader, skin2FileName) { 12 | if (skin1ModelHeader != null) { 13 | await extractFromPak(skin1ModelHeader, recordPath + '/' + getFileName(skin1FileName)); 14 | } 15 | 16 | if (skin1MaterialHeader != null) { 17 | const materialOutPath = recordPath + '/' + getMaterialFileName(skin1FileName); 18 | await extractFromPak(skin1MaterialHeader, materialOutPath); 19 | } 20 | 21 | if (skin2ModelHeader != null) { 22 | await extractFromPak(skin2ModelHeader, recordPath + '/' + getFileName(skin2FileName)); 23 | } 24 | 25 | if (skin2MaterialHeader != null) { 26 | const materialOutPath = recordPath + '/' + getMaterialFileName(skin2FileName); 27 | await extractFromPak(skin2MaterialHeader, materialOutPath); 28 | } 29 | } 30 | 31 | async function extractTextures(texturesOutputPath, textureHeader, textureFileName) { 32 | await extractTextureFromPak(textureHeader, texturesOutputPath + getTextureFileName(textureFileName)); 33 | } 34 | 35 | function getFileName(path) { 36 | return path.slice(path.lastIndexOf('/') + 1); 37 | } 38 | 39 | function getMaterialFileName(path) { 40 | const fileNameWithExtension = getFileName(path); 41 | return fileNameWithExtension.slice(0, fileNameWithExtension.lastIndexOf('.') + 1) + 'mtl'; 42 | } 43 | 44 | function getTextureFileName(path) { 45 | return getFileName(path).replace('.tif', '.dds'); 46 | } 47 | 48 | async function extract(header) { 49 | const fileHandle = await fs.open(header.pakFile, 'r'); 50 | 51 | const localHeader = Buffer.alloc(4); 52 | await fileHandle.read({buffer: localHeader, position: header.offset + 26}); 53 | const fileNameLength = localHeader.readUInt16LE(0); 54 | const extraFieldLength = localHeader.readUInt16LE(2); 55 | 56 | const compressedData = Buffer.alloc(header.compressedSize); 57 | await fileHandle.read({ 58 | buffer: compressedData, 59 | position: header.offset + 30 + fileNameLength + extraFieldLength 60 | }); 61 | 62 | const uncompressedData = Buffer.alloc(header.uncompressedSize); 63 | lib.OodleLZ_Decompress(compressedData, header.compressedSize, uncompressedData, header.uncompressedSize, 0, 0, 0, null, null, null, null, null, null, 3); 64 | 65 | await fileHandle.close() 66 | 67 | return uncompressedData; 68 | } 69 | 70 | async function extractFromPak(header, savePath) { 71 | const data = await extract(header); 72 | await fs.writeFile(savePath, data); 73 | } 74 | 75 | async function extractTextureFromPak(textureHeader, savePath) { 76 | if (textureHeader == null) { 77 | console.log('JASPER ERROR', textureHeader, savePath); 78 | return; 79 | } 80 | 81 | if (textureHeader.length === 1) { 82 | await extractFromPak(textureHeader, savePath); 83 | } else { 84 | const firstFileData = await extract(textureHeader[0]); 85 | const secondFileData = await extract(textureHeader[1]); 86 | 87 | firstFileData[0x1c] = 0; // set mip count 88 | const header = firstFileData.slice(0, 0x94); 89 | 90 | await fs.writeFile(savePath, Buffer.concat([header, secondFileData])); 91 | } 92 | 93 | /* to png */ 94 | await convertDds(savePath); 95 | 96 | /* shrink */ 97 | const pngPath = savePath.slice(0, -3) + 'png'; 98 | const pngFile = await fs.readFile(pngPath); 99 | const {width, height} = await sharp(pngFile).metadata(); 100 | const w = Math.round(width / 2); 101 | const h = Math.round(height / 2); 102 | await fs.writeFile(pngPath, await sharp(pngFile).resize(w, h).toBuffer()); 103 | } 104 | 105 | async function convertDds(path) { 106 | const fileName = path.slice(path.lastIndexOf('/') + 1); 107 | const directory = path.slice(0, path.lastIndexOf('/')); 108 | 109 | if (fileName.endsWith('ddn.dds') || fileName.endsWith('ddna.dds')) { 110 | await convertSpecialDds(fileName, directory, path); 111 | return; 112 | } 113 | 114 | try { 115 | execSync(`texconv.exe "${fileName}" -ft png -y`, {env: process.env, cwd: directory}); 116 | await fs.unlink(path); 117 | } catch (e) { 118 | await convertSpecialDds(fileName, directory, path); 119 | } 120 | } 121 | 122 | async function convertSpecialDds(fileName, directory, path) { 123 | try { 124 | execSync(`texconv.exe "${fileName}" -ft png -f rgba -y`, {env: process.env, cwd: directory}); 125 | await fs.unlink(path); 126 | } catch (e) { 127 | console.log('Couldn\'t convert ' + directory + '\t' + fileName); 128 | } 129 | } 130 | 131 | workerpool.worker({ 132 | extractModelsAndMaterials: extractModelsAndMaterials, 133 | extractTextures: extractTextures 134 | }); 135 | -------------------------------------------------------------------------------- /models/pak-extractors/models-and-materials/get-models-materials-textures.mjs: -------------------------------------------------------------------------------- 1 | import {promises as fs} from 'fs'; 2 | import {fileURLToPath} from 'url'; 3 | import {dirname} from 'path'; 4 | import workerpool from 'workerpool'; 5 | import {globby} from 'globby'; 6 | 7 | const __dirname = dirname(fileURLToPath(import.meta.url)); 8 | const pool = workerpool.pool(__dirname + '/extract-worker.js', {workerType: 'process'}); 9 | 10 | export async function extractModelsMaterialsTextures(records, outPath) { 11 | const modelHeaders = JSON.parse(await fs.readFile(outPath + 'header-entries/models.json', 'utf-8')); 12 | const materialHeaders = JSON.parse(await fs.readFile(outPath + 'header-entries/materials.json', 'utf-8')); 13 | const textureHeaders = JSON.parse(await fs.readFile(outPath + 'header-entries/textures.json', 'utf-8')); 14 | 15 | await extractModelsAndMaterials(records, modelHeaders, materialHeaders, outPath + 'gltf/'); 16 | await extractNecessaryTextures(records, textureHeaders, outPath + 'gltf/'); 17 | 18 | await fs.unlink(outPath + 'header-entries/models.json'); 19 | await fs.unlink(outPath + 'header-entries/materials.json'); 20 | await fs.unlink(outPath + 'header-entries/textures.json'); 21 | 22 | await pool.terminate(); 23 | } 24 | 25 | async function extractModelsAndMaterials(records, modelHeaders, materialHeaders, outPath) { 26 | let finishedTasks = 0; 27 | 28 | const start = Date.now(); 29 | 30 | return new Promise(async resolve => { 31 | for (let record of records) { 32 | const itemId = record.itemId; 33 | const skin1Model = record.skin1.model.toLocaleLowerCase(); 34 | const skin1Material = record.skin1.material.toLocaleLowerCase(); 35 | const skin2Model = record.skin2.model.toLocaleLowerCase(); 36 | const skin2Material = record.skin2.material.toLocaleLowerCase(); 37 | 38 | const recordPath = outPath + itemId; 39 | const texturesPath = recordPath + '/textures'; 40 | await fs.mkdir(texturesPath, {recursive: true}); 41 | 42 | pool.exec('extractModelsAndMaterials', [ 43 | recordPath, 44 | skin1Model !== '' ? modelHeaders[skin1Model] : null, 45 | skin1Material !== '' ? materialHeaders[skin1Material] : null, 46 | skin1Model, 47 | skin2Model !== '' ? modelHeaders[skin2Model] : null, 48 | skin2Material !== '' ? materialHeaders[skin2Material] : null, 49 | skin2Model 50 | ]) 51 | .always(() => { 52 | finishedTasks += 1; 53 | process.stdout.write('Extracting models and materials.. ' + Math.round(finishedTasks * 100 / records.length) + '%\r'); 54 | if (finishedTasks === records.length) { 55 | console.log('Extracting models and materials.. finished in ' + (Date.now() - start) + 'ms'); 56 | resolve(); 57 | } 58 | }); 59 | } 60 | }); 61 | } 62 | 63 | async function extractNecessaryTextures(records, textureHeaders, outPath) { 64 | const materialFilePaths = await globby(outPath + '**/@(*.mtl)'); 65 | 66 | const toExtract = []; 67 | 68 | let finishedTasks = 0; 69 | 70 | const start = Date.now(); 71 | 72 | for (let materialFilePath of materialFilePaths) { 73 | const texturesOutputPath = materialFilePath.slice(0, materialFilePath.lastIndexOf('/')) + '/textures/'; 74 | const requiredTextureFiles = await getRequiredTexturePaths(materialFilePath); 75 | for (let requiredTextureFile of requiredTextureFiles) { 76 | toExtract.push({ 77 | texturesOutputPath, 78 | textureHeader: textureHeaders[requiredTextureFile.slice(0, -4)], 79 | requiredTextureFile 80 | }); 81 | } 82 | } 83 | 84 | return new Promise(async resolve => { 85 | for (let {texturesOutputPath, textureHeader, requiredTextureFile} of toExtract) { 86 | pool.exec('extractTextures', [ 87 | texturesOutputPath, 88 | textureHeader, 89 | requiredTextureFile 90 | ]) 91 | .always(() => { 92 | finishedTasks += 1; 93 | process.stdout.write('Extracting necessary textures.. ' + Math.round(finishedTasks * 100 / toExtract.length) + '%\r'); 94 | if (finishedTasks === toExtract.length) { 95 | console.log('Extracting necessary textures.. finished in ' + (Date.now() - start) + 'ms'); 96 | resolve(); 97 | } 98 | }); 99 | } 100 | }); 101 | } 102 | 103 | async function getRequiredTexturePaths(materialFilePath) { 104 | const materialFileContent = await fs.readFile(materialFilePath, 'utf-8'); 105 | const fileSplit = materialFileContent.split('File="'); 106 | if (fileSplit.length > 1) { 107 | return [...new Set(fileSplit 108 | .slice(1) 109 | .map(part => part.split('"')[0]) 110 | .filter(imageUrl => imageUrl.endsWith('.dds') || imageUrl.endsWith('.tif')) 111 | .map(imageUrl => imageUrl.toLocaleLowerCase()))]; 112 | } 113 | } 114 | --------------------------------------------------------------------------------