├── SIMSUN2.7z ├── GlyphEntry.py ├── ChangeBCW_main.py ├── ExportNftr_main.py ├── InjectTXTNarc.py ├── MakeFonts_main.py ├── CodeListTotal.py ├── binary16.py ├── CodeListCover.py ├── SetCharCounts.py ├── ExtractNarc.py ├── ExtractNftr.py ├── MakeNarc.py ├── ChangeBitmap.py ├── CodeList_main.py ├── CMP_Div_main.py ├── GetDivision.py ├── MakeFonts.py ├── narc.py ├── ChangeCodeList.py ├── MakeCMP_Div.py ├── GetCMP.py ├── ChangeWidth.py ├── README.md ├── InjectNftr.py ├── CheckHelper.py ├── MakeString.py ├── nftr.py └── FreetypeMakeFonts.py /SIMSUN2.7z: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WD8844/Project-PMBW-TEXT-experiment/HEAD/SIMSUN2.7z -------------------------------------------------------------------------------- /GlyphEntry.py: -------------------------------------------------------------------------------- 1 | class GlyphEntry:#字模类 2 | def __init__(self, offset=0, width=0 ,rows=0, top=0, left=0, charCode=None, buffer=None): 3 | self.offset = offset#字模地址 4 | self.width = width#字模宽度(单扫描行的长度) 5 | self.rows = rows#字模的扫描行总数(高度) 6 | self.top = top 7 | self.left = left 8 | self.charCode = charCode#字符编码 9 | self.buffer = buffer#字模数据 10 | 11 | if __name__ == "__main__": 12 | test = GlyphEntry 13 | test.offset = 24 14 | print(test.offset) 15 | -------------------------------------------------------------------------------- /ChangeBCW_main.py: -------------------------------------------------------------------------------- 1 | from ChangeCodeList import * 2 | from ChangeWidth import * 3 | from ChangeBitmap import * 4 | import os 5 | if __name__ == "__main__": 6 | try: 7 | filename = input("请输入已经导出的Nftr的原Narc文件名:")#(一般命名为a023) 8 | dirpath = filename + '_extr/' 9 | if os.path.exists(dirpath): 10 | NCpath = input("请输入码表路径:") 11 | ChangeCodeList(NCpath,filename) 12 | ChangeWidth(NCpath,filename) 13 | ChangeBitmap(NCpath,filename) 14 | else: 15 | raise FileExistsError(f"{dirpath}不存在,请确认是否已利用ExtractNarc.py从目标Narc中正确提取了Nftr文件") 16 | except Exception as e: 17 | print(f"錯誤: {e},请重新操作。") 18 | input("按任意键结束...") 19 | -------------------------------------------------------------------------------- /ExportNftr_main.py: -------------------------------------------------------------------------------- 1 | from ExtractNftr import * 2 | import os 3 | if __name__ == "__main__": 4 | try: 5 | filename = input("请输入预导出Nftr的原Narc文件名:")#(一般命名为a023) 6 | dirpath = filename + '_extr/' 7 | if os.path.exists(dirpath): 8 | num = input("请输入需要导出的Nftr文件总数(例:BW只需要修改前3个字库,输入3):")#(BW只需要修改前3个字库,输入3) 9 | if len(os.listdir(dirpath)) > int(num): 10 | for i in range(int(num)): 11 | filepath = dirpath +filename+"-" + str(i) 12 | ExtractNftr(filepath) 13 | else: 14 | raise LookupError(f"预处理文件数超过{dirpath}内的总文件数") 15 | else: 16 | raise FileExistsError(f"{dirpath}不存在,请确认是否已利用ExtractNarc.py从目标Narc中正确提取了Nftr文件") 17 | except Exception as e: 18 | print(f"錯誤: {e},请重新操作。") 19 | input("按任意键结束...") 20 | -------------------------------------------------------------------------------- /InjectTXTNarc.py: -------------------------------------------------------------------------------- 1 | import MakeString 2 | import os 3 | #批量将txt导入各分节,并加密 4 | def InjectNarcFiles_byTXT(dirpath,encoding = 'utf-16'): 5 | fileslist = os.listdir(dirpath) 6 | for filepath in fileslist: 7 | if '.txt' in filepath or "CMP" in filepath: 8 | continue 9 | if dirpath[-1]!='/': 10 | dirpath += '/' 11 | aimpath = dirpath + filepath 12 | with open(aimpath + '.txt', 'r',encoding=encoding)as txtf: 13 | raw = txtf.readlines() 14 | texts = MakeString.maketxtput(raw)#将文本处理为gen5put()可处理的entry列表形式 15 | print(filepath) 16 | #print(texts) 17 | inputs = MakeString.gen5put(texts) 18 | with open(aimpath, 'wb')as f: 19 | f.write(inputs) 20 | 21 | if __name__ == "__main__": 22 | import sys 23 | if len(sys.argv) != 2: 24 | print("使用方法: python .\InjectTXTNarc.py <拟定的新Narc文件名>") 25 | exit() 26 | else: 27 | dirpath = sys.argv[1]+"_extr/" 28 | try: 29 | InjectNarcFiles_byTXT(dirpath) 30 | except Exception as e: 31 | print(f"錯誤: {e},请重新操作。") 32 | input("按任意键结束...") 33 | -------------------------------------------------------------------------------- /MakeFonts_main.py: -------------------------------------------------------------------------------- 1 | from MakeFonts import main_CHS 2 | def MakeFonts_BW_CHS(CHSCodeSource,FONT,narcfilename,bpp=2): 3 | for num in range(3): 4 | if num == 0: 5 | fontsize = 12 6 | width = fontsize 7 | height = fontsize + 3 8 | method = "rightdownShadow" 9 | elif num == 1: 10 | fontsize = 10 11 | width = fontsize 12 | height = fontsize 13 | method = "rightdownShadow" 14 | else: 15 | fontsize = 10 16 | width = fontsize + 1 17 | height = fontsize + 3 18 | method = "blodShadow" 19 | num = str(num) 20 | reshape = (width,height) 21 | ofilepath = narcfilename+'-'+ num + '-chs' 22 | main_CHS(CHSCodeSource,FONT,ofilepath,bpp,fontsize = fontsize,reshape = reshape,method = method) 23 | if __name__ == "__main__": 24 | import sys 25 | if len(sys.argv) != 4: 26 | print("使用方法: python .\MakeFonts_main.py <码表> <字形文件(*.ttf|*.ttc)> ") 27 | exit() 28 | else: 29 | CHSCodeSource = sys.argv[1] 30 | FONT = sys.argv[2] 31 | narcfilename = sys.argv[3] 32 | MakeFonts_BW_CHS(CHSCodeSource,FONT,narcfilename) 33 | -------------------------------------------------------------------------------- /CodeListTotal.py: -------------------------------------------------------------------------------- 1 | #从nftr导出的所有序码表按顺序整合为一个码表 2 | def Total_txt(nftrfilepath,dirpath = "./"): 3 | if dirpath != "./": 4 | if dirpath[-1]!="/": 5 | dirpath += "/" 6 | llists = []#将所有序码表中的[字模序,字符]读入 7 | for i in range(8):#每个字库都有8个序码表 8 | lpath =dirpath + nftrfilepath + '序码表_' + str(i) + '.txt' 9 | with open(lpath,"r",encoding='utf-16') as lfile: 10 | raw = lfile.read().split("\n") 11 | for c in raw: 12 | if c != "": 13 | if "==" in c: 14 | llists.append([c.split("=")[0],"="]) 15 | elif "NULL" in c: 16 | pass 17 | else: 18 | llists.append(c.split("=")) 19 | llists = sorted(llists, key=lambda x: int(x[0])) 20 | totalpath = nftrfilepath + '_Total.txt' 21 | with open(totalpath,"w",encoding='utf-16')as w: 22 | for l in llists: 23 | s = l[0] + "=" +l[1] + "\n" 24 | w.write(s) 25 | return totalpath 26 | if __name__ == "__main__":#Just for the code test. 27 | nftrfile = 'a023' 28 | dirpath = 'a023_extr/' 29 | for i in range(3): 30 | nftrfilepath = nftrfile + "-" + str(i) 31 | Total_txt(nftrfilepath,dirpath = dirpath) 32 | 33 | -------------------------------------------------------------------------------- /binary16.py: -------------------------------------------------------------------------------- 1 | import array 2 | 3 | class binaryreader: 4 | def __init__(self, string): 5 | self.s = array.array('H',string)#相当于初始化了一个2Byte字节流读取窗格 6 | self.ofs = 0 7 | self.ReadUInt16 = self.read16 8 | self.ReadUInt32 = self.read32 9 | self.Seek = self.seek 10 | def read16(self):#2Byte窗格扫描字节流s 每次调用顺序向后读2Byte(即16bit) 11 | ret = self.s[self.ofs] 12 | self.ofs += 1 13 | return ret 14 | def read32(self):#4Byte窗格扫描字节流s 每次调用顺序向后读4Byte(即32bit) 15 | ret = self.s[self.ofs] | (self.s[self.ofs+1]<<16) 16 | self.ofs += 2 17 | return ret 18 | def seek(self, ofs):#返回当前文件指针的位置 19 | self.ofs = ofs>>1 20 | 21 | class binarywriter: 22 | def __init__(self): 23 | self.s = array.array('H')#相当于初始化了一个2Byte字节流写入窗格 24 | def write16(self, i):#按2Byte将i作为预写入字节流加入预写入表s 25 | self.s.append(i) 26 | def write32(self, i):#按4Byte将i作为预写入字节流加入预写入表s 27 | self.s.append(i&0xFFFF) 28 | self.s.append((i>>16)&0xFFFF) 29 | def writear(self, a):#按2Byte将a作为预写入字节流加入预写入表s 30 | self.s.extend(a) 31 | def tobytes(self): 32 | return self.s.tobytes()#得到预写入的整个字节流 33 | def toarray(self):#返回整个预写入表s 34 | return self.s 35 | def pos(self):#当前文件指针的位置 36 | return len(self.s)<<1 37 | -------------------------------------------------------------------------------- /CodeListCover.py: -------------------------------------------------------------------------------- 1 | def Cover_txt(totalCLpath,coverL,newCLpath = "NewCodeList.txt"): 2 | with open(totalCLpath,"r",encoding='utf-16')as total: 3 | totalraws = total.read().split("\n") 4 | totalL = [] 5 | for i in range(len(totalraws)):#已经做好的全码表 6 | if totalraws[i]!="": 7 | trans = totalraws[i].split("=") 8 | #print(trans) 9 | if "==" in totalraws[i]: 10 | totalL.append([trans[0],"="]) 11 | else: 12 | totalL.append([trans[0],trans[1]]) 13 | pos = -1 14 | for i in range(len(totalL)): 15 | if pos == -1: 16 | if totalL[i][1] == coverL[0]:#锁定中文开头 17 | if len(totalL)-i < len(coverL): 18 | raise LookupError(f"超过可覆盖范围!原始码表可覆盖长度为{len(totalL)-i},实际所需覆盖长度为{len(coverL)}") 19 | else: 20 | pos += 2 21 | print(f"预替换的第1个字符为编号{i}的{totalL[i]},开始替换。") 22 | elif (pos > 0) and (pos < len(coverL)): 23 | totalL[i][1] = coverL[pos] 24 | print(f"已替换编号为{totalL[i][0]}的字符为{coverL[pos]}") 25 | pos += 1 26 | else: 27 | continue 28 | with open(newCLpath,"w",encoding='utf-16')as w: 29 | for l in totalL: 30 | s = l[0] + "=" +l[1] + "\n" 31 | w.write(s) 32 | 33 | if __name__ == "__main__":#Just for the code test. 34 | from SetCharCounts import * 35 | totalCLpath = "a023-0_Total.txt" 36 | coverL = SetCharCounts("./Counts") 37 | Cover_txt(totalCLpath,coverL,newCLpath = "NewCodeList.txt") 38 | -------------------------------------------------------------------------------- /SetCharCounts.py: -------------------------------------------------------------------------------- 1 | import os 2 | #统计出现的目标字符 3 | def SetCharCounts(dirpath, chrange = "CHS"): 4 | subline = '\n------------------------------\n' 5 | chl = [] 6 | def count(dirpath): 7 | if dirpath[-1] != "/": 8 | dirpath += "/" 9 | fileslist = os.listdir(dirpath) 10 | chl = [] 11 | for filepath in fileslist: 12 | if ".txt" in filepath:#只统计*.txt 13 | with open(dirpath+filepath,"r",encoding="utf-16") as r: 14 | t = r.read() 15 | texts = t.split(subline) 16 | for i in range(len(texts)): 17 | #print(texts[i]) 18 | if "_" in texts[i]: 19 | continue 20 | else: 21 | cl = list(set(texts[i]))#利用set分字且去掉重复的字 22 | #print(cl) 23 | chl.extend(cl) 24 | return chl 25 | if type(dirpath) == list:#说明有多个路径下的*.txt需要统计 26 | for dirp in dirpath: 27 | chl.extend(count(dirp)) 28 | else: 29 | chl = count(dirpath) 30 | chl = list(set(chl))#合并去掉与之前的文本中相重复的字 31 | #print(chl) 32 | #转换为字符码并排序 33 | chol = [] 34 | charl = [] 35 | for j in range(len(chl)): 36 | chol.append(ord(chl[j])) 37 | chol.sort()#默认按字符码升序排列 38 | if chrange == "CHS":#统计出现的所有常用中文字 39 | for o in chol: 40 | if (o >= 0x4E00) and (o <= 0x9FFF):#只要中文部分 41 | charl.append(chr(o)) 42 | else: 43 | pass#待开发 44 | return charl#返回统计好的按字符码升序排列的目标字符列表 45 | if __name__ == "__main__":#Just for the code test. 46 | print(SetCharCounts(['./B(CH)2_extr','./B(CH)3_extr'])) 47 | -------------------------------------------------------------------------------- /ExtractNarc.py: -------------------------------------------------------------------------------- 1 | import os 2 | import narc, MakeString 3 | #解析解包目标Narc 4 | 5 | def ExtractNarc(filepath,dirpath,type = 'text'): 6 | try: 7 | os.mkdir(dirpath) 8 | print(dirpath+" 已创建!") 9 | except FileExistsError: 10 | print("对应文件夹{}已存在。".format(dirpath)) 11 | pass 12 | with open(filepath, 'rb')as file: 13 | rawdata = file.read() 14 | Narc = narc.NARC(rawdata) 15 | if type == "text":#对于文本,批量导出并解密其中的所有分节 16 | n = 0 17 | for f in Narc.gmif.files: 18 | tfilepath = dirpath+'/'+filepath+'-'+str(n) 19 | print(tfilepath) 20 | texts = MakeString.gen5get(f) 21 | with open(tfilepath, 'wb')as nf: 22 | nf.write(f) 23 | with open(tfilepath + '.txt','w',encoding='utf16') as w: 24 | for line in texts: 25 | w.writelines(line) 26 | n += 1 27 | else: 28 | n = 0 29 | for f in Narc.gmif.files: 30 | tfilepath = dirpath+'/'+ filepath+'-'+str(n) 31 | print(tfilepath) 32 | with open(tfilepath, 'wb')as nf: 33 | nf.write(f) 34 | n += 1 35 | 36 | 37 | if __name__ =="__main__": 38 | import sys 39 | # 检查是否提供了参数 40 | 41 | if len(sys.argv) != 3: 42 | print("使用方法: python .\ExtractNarc.py <处理类型:输入text或file>") 43 | else: 44 | filepath = sys.argv[1] 45 | type = sys.argv[2] 46 | try: 47 | dirpath = filepath+'_extr' 48 | ExtractNarc(filepath,dirpath,type = type) 49 | except FileNotFoundError: 50 | print(f"未找到路径为{filepath}的文件,请重新操作。") 51 | input("按任意键结束...") 52 | 53 | -------------------------------------------------------------------------------- /ExtractNftr.py: -------------------------------------------------------------------------------- 1 | import csv 2 | from nftr import * 3 | 4 | def ExtractNftr(filepath): 5 | with open(filepath,'rb')as f: 6 | f.seek(0) 7 | rawdata = f.read() 8 | print("len(rawdata)",len(rawdata)) 9 | nftr = NFTR(rawdata) 10 | with open(filepath + "宽度表.csv","w",newline="")as w: 11 | writer = csv.writer(w) 12 | writer.writerow(["loc","left","width","advance"]) 13 | for i in range(len(nftr.fontsdata)): 14 | l = [i] 15 | l.extend(nftr.fontsdata[i]) 16 | writer.writerow(l) 17 | with open(filepath + "CWDH.csv","w",newline="")as w: 18 | writer = csv.writer(w) 19 | writer.writerow(["loc","width","tag"]) 20 | widthtable = nftr.cwdh.WidthTable 21 | for i in range(len(widthtable)): 22 | trans = [i] 23 | trans.extend(widthtable[i]) 24 | writer.writerow(trans) 25 | with open(filepath + ".bitmap","wb")as w: 26 | for bitmap in nftr.bitmaps: 27 | w.write(bitmap) 28 | i = 0 29 | for cmap in nftr.CMAPTable: 30 | idxs = list(cmap.CodeTableDict.keys()) 31 | codes = list(cmap.CodeTableDict.values()) 32 | with open(filepath +"序码表_"+str(i)+ ".txt","w",encoding="utf16")as w: 33 | for j in range(len(idxs)): 34 | s = str(idxs[j]) + "=" + chr(codes[j])+"\n" 35 | w.write(s) 36 | i += 1 37 | 38 | 39 | 40 | if __name__ =="__main__": 41 | import sys 42 | if len(sys.argv) != 2: 43 | print("使用方法: python .\ExtractNftr.py ") 44 | else: 45 | filepath = sys.argv[1] 46 | try: 47 | ExtractNftr(filepath) 48 | except FileNotFoundError: 49 | print(f"未找到文件夹{filepath},请重新操作。") 50 | input("按任意键结束...") 51 | -------------------------------------------------------------------------------- /MakeNarc.py: -------------------------------------------------------------------------------- 1 | import narc 2 | import re 3 | import os 4 | import struct 5 | #Narc文件打包 6 | 7 | def MakeNarc(aimfile, dirpath): 8 | #构造按名称数字排序的文件名列表 9 | dirlist = os.listdir(dirpath) 10 | pathlist = [] 11 | foward = dirlist[0].split('-')[0] + '-' 12 | for filepath in dirlist: 13 | if '.' in filepath or "CMP" in filepath:#文件夹内需要打包的文件没有后缀,因此需保证所有没有后缀的文件是打包文件 14 | continue 15 | pathlist.append(filepath.split('-')[1]) 16 | pathlist.sort(key=lambda x: int(re.findall(r'\d+', x)[0]))#按数字大小从小到大排序 17 | for i in range(len(pathlist)): 18 | pathlist[i] = foward + pathlist[i] 19 | 20 | rawdata = [] 21 | offset = 0 22 | NewNarc = narc.NARC(rawdata) 23 | for filepath in pathlist: 24 | aimfilepath = dirpath + '/' + filepath 25 | with open(aimfilepath, 'rb')as rawfile: 26 | raw = rawfile.read() 27 | #print(filepath) 28 | #print(len(raw)) 29 | table = struct.pack('II', offset, offset+len(raw)) 30 | NewNarc.btaf.table.append(table) 31 | rawdata.append(raw) 32 | offset += len(raw) 33 | 34 | #完善btaf信息 35 | NewNarc.btaf.header[0] = NewNarc.btaf.header[0] + len(NewNarc.btaf.table)*8 #header长度+btaf大小即偏移地址表总大小 36 | NewNarc.btaf.header[1] = len(rawdata)#文件数据分节数 37 | 38 | #完善gmif信息 39 | #NewNarc.gmif.size = NewNarc.gmif.size + offset#header长度+文件数据集总长度??? 40 | # 虽然narc中的toString写入逻辑会按照gmif长度重新给gmif.size赋值,但为了正确计算narc的header,必须在此处给出gmif.size??? 41 | NewNarc.gmif.files = rawdata 42 | 43 | #完善narc信息 44 | NewNarc.header[1] = 16 + NewNarc.btaf.header[0] + NewNarc.btnf.header[0] + len(NewNarc.gmif.toString())#gmif可能存在填充,因此只有写入长度算数 45 | 46 | with open('New_'+ aimfile, 'wb')as f: 47 | NewNarc.toFile(f) 48 | 49 | if __name__ == "__main__": 50 | import sys 51 | if len(sys.argv) != 2: 52 | print("使用方法: python .\MakeNarc.py ") 53 | else: 54 | aimfile = sys.argv[1] 55 | dirpath = aimfile + '_extr' 56 | try: 57 | if os.path.exists(dirpath): 58 | MakeNarc(aimfile, dirpath) 59 | print(f"已完成打包,打包后的文件是在与本程式脚本同目录下的{'New_'+aimfile}.") 60 | else: 61 | raise FileExistsError(f"{dirpath}不存在,请确认是否已利用ExtractNarc.py从目标Narc中正确提取了Nftr文件") 62 | except Exception as e: 63 | print(f"错误:{e},请重新操作。") 64 | input("按任意键结束...") 65 | 66 | -------------------------------------------------------------------------------- /ChangeBitmap.py: -------------------------------------------------------------------------------- 1 | def ChangeBitmap(newCodeListpath,narcFilename,Fnum=3,encoding= 'utf-16'): 2 | with open(newCodeListpath,"r",encoding= encoding)as cf:# Make sure the first and the last code of Chinese characters in codelist 3 | raws = cf.read().split("\n") 4 | flag = 0 5 | for raw in raws: 6 | if "==" in raw: 7 | continue 8 | trans = raw.split("=") 9 | bo = (ord(trans[1]) >= 0x4E00) and (ord(trans[1]) <= 0x9FFF)#中文编码范围 10 | if flag == 0 and bo: 11 | first = int(trans[0]) 12 | flag = -1 13 | elif not bo and flag == -1: 14 | last = int(trans[0])-1#上一个才是最后 15 | break 16 | #print(first) 17 | #print(last) 18 | print(f"修改字模序号范围为:{first}~{last}") 19 | for num in range(Fnum): 20 | filenum = str(num) 21 | file = narcFilename+"-" + filenum 22 | chsfile = file + "-chs" 23 | filepath = narcFilename+"_extr/" + file 24 | sizedict = {"0":45,"1":25,"2":36} 25 | size = sizedict[filenum] 26 | 27 | def buffersplit(buffer,size): 28 | sp = [] 29 | for i in range(0,len(buffer),size): 30 | sp.append(buffer[i:i+size]) 31 | #print(len(sp)) 32 | return sp 33 | 34 | with open(chsfile,"rb")as f: 35 | chsbuffer = f.read() 36 | schsbuffer = buffersplit(chsbuffer,size) 37 | #print(len(schsbuffer[0])) 38 | #print(schsbuffer) 39 | with open(filepath + ".bitmap","rb")as f: 40 | buffer = f.read() 41 | sbuffer = buffersplit(buffer,size) 42 | 43 | #print(len(buffer)) 44 | #print(len(sbuffer)) 45 | 46 | with open(filepath + "_new.bitmap","wb")as w: 47 | for i in range(len(schsbuffer)): 48 | sbuffer[first + i] = schsbuffer[i] 49 | w.writelines(sbuffer) 50 | if __name__ =="__main__": 51 | import sys,os 52 | if len(sys.argv) != 3: 53 | print("使用方法: python .\ChangeBitmap.py <新码表> ") 54 | else: 55 | NCpath = sys.argv[1] 56 | narcname = sys.argv[2] 57 | extrpath = narcname+"_extr/" 58 | try: 59 | if os.path.exists(extrpath): 60 | ChangeBitmap(NCpath,narcname) 61 | else: 62 | raise FileExistsError(f"{extrpath}不存在,请确认是否已利用ExtractNarc.py从目标Narc中正确提取了Nftr文件") 63 | except Exception as e: 64 | print(f"錯誤: {e},请重新操作。") 65 | input("按任意键结束...") 66 | -------------------------------------------------------------------------------- /CodeList_main.py: -------------------------------------------------------------------------------- 1 | import CodeListCover,CodeListTotal,os 2 | from SetCharCounts import * 3 | if __name__ == "__main__": 4 | print("注:此脚本仅用于处理Pokemon Black and White(宝可梦黑白)Nftr的3个中文字库的制作,\n仅执行常用中文部分(0x4E00~0x9FFF)的覆盖操作。\n") 5 | nftrfile = input("请输入Nftr对应的原Narc文件名:")#a023 6 | dirpath = nftrfile+'_extr/' 7 | c = input("是否已有完整的原始码表?输入Y为是,N为否:") 8 | try: 9 | if os.path.exists(dirpath): 10 | if c == "Y": 11 | totalCLpath = input("请输入原始码表的文件路径:") 12 | elif c == "N": 13 | tlist = [] 14 | for i in range(3): 15 | nftrfilepath = nftrfile + "-" + str(i) 16 | totalpath = CodeListTotal.Total_txt(nftrfilepath,dirpath = dirpath) 17 | tlist.append(totalpath) 18 | i = input(f"已在程式脚本同目录下生成3个字库对应的全码表\n{tlist[0]}、\n{tlist[1]}、\n{tlist[2]},\n这3个码表的内容应当完全相同。" 19 | f"\n确认无误后,请任选一个码表作为原始码表(输入0,1,2中的任意一个数字):") 20 | totalCLpath = tlist[int(i)] 21 | else: 22 | input("未给出正确指令,请按任意键结束...") 23 | exit() 24 | c = input("翻译后的文本*.txt放在多个文件夹中吗?输入Y为是,N为否:") 25 | if c == "Y": 26 | textspath = [] 27 | pos = 1 28 | while True: 29 | tp = input(f"请输入第{pos}个路径(输入“END”或按ENTER结束):") 30 | if tp == "" or tp == "END": 31 | break 32 | else: 33 | pos += 1 34 | textspath.append(tp) 35 | elif c == "N": 36 | textspath = input("请输入翻译后的文本*.txt所在的文件夹路径:") 37 | else: 38 | input("未给出正确指令,请按任意键结束...") 39 | exit() 40 | coverL = SetCharCounts(textspath) 41 | tbl= "CHS.TBL" 42 | with open(tbl,"w",encoding='utf-16')as w: 43 | for i in range(len(coverL)): 44 | s = str(i) + "=" +coverL[i]+"\n" 45 | w.write(s) 46 | input(f"已在程式脚本同目录下生成了名为{tbl}的中文编码表,专用于制作中文字库。\n确认无误后,请按任意键继续后续操作:") 47 | nclname = "NewCodeList.txt" 48 | CodeListCover.Cover_txt(totalCLpath,coverL,newCLpath = nclname) 49 | input(f"已在程式脚本同目录下生成了名为{nclname}的新码表。\n全操作完毕,请按任意键结束...") 50 | else: 51 | raise FileExistsError(f"{dirpath}不存在,请确认是否已利用ExtractNarc.py从目标Narc中正确提取了Nftr文件") 52 | except Exception as e: 53 | print(f"錯誤: {e},请重新操作。") 54 | input("按任意键结束...") 55 | 56 | -------------------------------------------------------------------------------- /CMP_Div_main.py: -------------------------------------------------------------------------------- 1 | from MakeCMP_Div import * 2 | def main(jpdir,chdir,nstr,opath = None,method = None,path = None,blocknums = 2,encoding = 'utf-16',newdir = True,name = None): 3 | jpdirpath = jpdir + nstr 4 | chdirpath = chdir + nstr 5 | if method == "CMP": 6 | CMPdirpath = MakeCMP(jpdirpath,chdirpath,encoding = encoding,mknewdir = newdir) 7 | DivCMP(CMPdirpath,blocknums = blocknums,encoding = encoding,mknewdir = newdir) 8 | elif method == "Restore": 9 | CMPdirpath = path 10 | Div_Restore(CMPdirpath,blocknums = blocknums,encoding = encoding,fmnewdir = newdir) 11 | path = MakeCMP_TH(CMPdirpath,dirpath = opath,encoding = encoding,name = name) 12 | return path 13 | 14 | if __name__ == "__main__": 15 | import os 16 | print("注:此脚本仅用于处理Pokemon Black and White(宝可梦黑白)的文本。\n") 17 | try: 18 | jpdir = input("※由命名规则,源Narc文件名应当是:版本(语言)编号,例如B(JP)2\n请输入当前目录下已利用ExtractNarc.py导出的文本对应的源Narc文件名:") 19 | nstr = '_extr' 20 | ft = jpdir[0] + jpdir[-1] + "_CMP" 21 | CMPdirpath = jpdir + nstr+"/" + ft 22 | c = input("\n※分片制造对照文本请输入:CMP\n※由已经制造的对照文本合并复原请输入:Restore\n请选择操作:") 23 | if os.path.exists(jpdir+nstr): 24 | if c == "CMP": 25 | main(jpdir,jpdir,nstr,method = "CMP",path = None,blocknums = 2,encoding = 'utf-16',newdir = True) 26 | print(f"已完成对比文本的制造,在{CMPdirpath}内各分块文件夹下。\n※请无视{CMPdirpath}下的txt文本,这些文本是没有分块的对照文本,属于程式处理的中间产物,请不要动它。※\n如欲做文本翻译,请对同级目录分块文件夹下的txt做后续处理。") 27 | input("按任意键结束...") 28 | elif c == "Restore": 29 | chdir = input("\n※由命名规则,建议是:版本(语言)编号,例如B(CH)2\n请输入拟定创造的新Narc文件名:") 30 | opath = chdir+nstr 31 | if os.path.exists(opath): 32 | print(f"{opath}已存在。") 33 | else: 34 | os.mkdir(opath) 35 | newpath = main(jpdir,chdir,nstr,opath=opath,method = "Restore",path = CMPdirpath,blocknums = 2,encoding = 'utf-16',newdir = True,name=chdir) 36 | print(f"已对{CMPdirpath}下的所有翻译文本完成格式还原。\n还原格式后的新文本在{newpath}文件夹下。\n请用InjectTXTNarc.py做后续处理。") 37 | input("按任意键结束...") 38 | else: 39 | c = input("未指定正确操作类型,请按任意键结束。") 40 | exit() 41 | else: 42 | raise FileExistsError(f"{jpdir+nstr}不存在,请确认是否已利用ExtractNarc.py正确导出了文本。") 43 | except Exception as e: 44 | print(f"錯誤: {e},请重新操作。") 45 | input("按任意键结束...") 46 | '''#Just for the code test. 47 | for num in range(2): 48 | nstr = str(num+2) + '_extr' 49 | jpdir = 'B(JP)' 50 | chdir = 'B(CH)' 51 | #divide 52 | #main(jpdir,chdir,nstr,method = "CMP",path = None,blocknums = 2,encoding = 'utf-16',newdir = True) 53 | #combine 54 | ft = jpdir[0] + nstr[0] + "_CMP" 55 | CMPdirpath = jpdir + nstr+"/" + ft 56 | main(jpdir,chdir,nstr,method = "Restore",path = CMPdirpath,blocknums = 2,encoding = 'utf-16',newdir = True)''' 57 | -------------------------------------------------------------------------------- /GetDivision.py: -------------------------------------------------------------------------------- 1 | import MakeString 2 | import re,os 3 | #将文本按块分片为不同的txt 4 | #mknewdir为True时,会对应块编号在当前目录创建文件夹,并按文件夹导出分块文本 5 | def txtDivision_byBlock(raw ,filepath ,blocknums = 2 ,encoding = 'utf-16',mknewdir = False ):#输入文本文件readlines表 6 | texts = MakeString.maketxtput(raw)#处理为按节分的文本 7 | #print(texts) 8 | for i in range(blocknums): 9 | writerlist = [] 10 | if "/" in filepath:#在文本文件面前增加标识数方便排序筛选 11 | finda = re.findall(r'(.*)/(.*)',filepath)#finda[0][0]是文件目录,finda[0][1]是文件名 12 | if mknewdir: 13 | dirpath = finda[0][0] + '/' + str(i) 14 | try: 15 | os.mkdir(dirpath) 16 | print(dirpath+" 已创建!") 17 | except FileExistsError: 18 | print("对应文件夹{}已存在。".format(dirpath)) 19 | pass 20 | output_filepath = dirpath + "/" +str(i)+"_" + finda[0][1] 21 | else: 22 | output_filepath = finda[0][0] + '/' + str(i)+"_" + finda[0][1] 23 | else: 24 | if mknewdir: 25 | dirpath = str(i) 26 | try: 27 | os.mkdir(dirpath) 28 | print(dirpath+" 已创建!") 29 | except FileExistsError: 30 | print("对应文件夹{}已存在。".format(dirpath)) 31 | pass 32 | output_filepath = dirpath + "/" +str(i)+"_"+filepath 33 | else: 34 | output_filepath = str(i)+"_"+filepath 35 | for text in texts: 36 | #print(text) 37 | if text[0][0] == str(i):#每块第一个字符必定是块编号 38 | writerlist.extend(text) 39 | textsrteam = "".join(writerlist) 40 | with open(output_filepath,"w",encoding = encoding)as w: 41 | w.write(textsrteam) 42 | 43 | #fmnewdir为True时,当前文件夹下必须有对应块编号的文件夹 44 | def txtCombine_byBlock(filepath, blocknums = 2,encoding = 'utf-16',fmnewdir = False):#将分开的txt重组为导出时的样子 45 | #输入文本文件流(直接read()) 46 | writerlist = [] 47 | for i in range(blocknums): 48 | if "/" in filepath:#在文本文件面前增加标识数方便排序筛选 49 | finda = re.findall(r'(.*)/(.*)',filepath) 50 | if fmnewdir: 51 | dirpath = finda[0][0] + '/' + str(i) 52 | print(dirpath) 53 | input_filepath = dirpath + "/" + str(i)+"_" + finda[0][1] 54 | else: 55 | input_filepath = finda[0][0] + '/' + str(i)+"_" + finda[0][1] 56 | else: 57 | if fmnewdir: 58 | dirpath = str(i) 59 | input_filepath = dirpath+"/"+str(i)+"_"+filepath 60 | else: 61 | input_filepath = str(i)+"_"+filepath 62 | print(input_filepath) 63 | with open(input_filepath,'r',encoding=encoding)as f: 64 | writerlist.extend(f.read()) 65 | if f.tell() == 0: 66 | raise KeyError("文本文件{}是空的!".format(input_filepath)) 67 | textsrteam = "".join(writerlist) 68 | with open(filepath,'w',encoding=encoding)as w: 69 | w.write(textsrteam) 70 | 71 | if __name__ == "__main__":#This is just for code testing 72 | #分片 73 | filepath = './testdir/B2_CMP/CMP_B2-44.txt' 74 | with open(filepath,encoding='utf-16')as f: 75 | txtDivision_byBlock(f.readlines(), filepath = filepath,mknewdir = True) 76 | #重新组合复原 77 | txtCombine_byBlock(filepath,fmnewdir = True) 78 | -------------------------------------------------------------------------------- /MakeFonts.py: -------------------------------------------------------------------------------- 1 | import struct 2 | from FreetypeMakeFonts import * 3 | def MakeFont_1bppTo(char,FONT,fontsize,bpp,reshape = (0,0),method = "rightdownShadow"): 4 | #仅用于Freetype默认生成的是1bpp的字体,例如SIMSUN2 5 | if method == "blodShadow": 6 | blod = True 7 | else: 8 | blod = False 9 | bitmapbuffer = CharBitmapCreator(char,FONT,fontsize = fontsize,blod=blod).buffer 10 | if bpp == 1: 11 | data = bitmapbuffer 12 | elif bpp == 2: 13 | data = trans2bpp(bitmapbuffer) 14 | else: 15 | data = [] 16 | if method == "rightdownShadow": 17 | buffer = rightdownShadow(data,bpp) 18 | elif method == "blodShadow": 19 | buffer = blodShadow(data,bpp) 20 | else: 21 | buffer = rightdownShadow(data,bpp) 22 | if reshape: 23 | width, height = reshape[0],reshape[1] 24 | buffer = reshape16(buffer,width=width,height=height,bpp=bpp,blod=blod) 25 | return buffer 26 | 27 | def MakeFont_8bppTo(char,FONT,fontsize,bpp,method = 'gamma',baseline = None): 28 | font = CharBitmapCreator(char,FONT,fontsize = fontsize) 29 | bitmapbuffer = font.buffer 30 | sourcebuffer = debpp(bitmapbuffer,width=font.width, bpp = 8, dbpp = bpp,method = method) 31 | font.buffer = sourcebuffer 32 | qbuffer = full_Q(font,fontsize,bpp,baseline = baseline).buffer 33 | divnum = 8/bpp 34 | size = int(fontsize*fontsize/divnum) 35 | if size != len(qbuffer):# The length of qbuffer which has been filled by fontsize should be equal to (fontsize*fontsize/divnum) 36 | raise ValueError(f"可能是baseline选取不当。当前字符 {char},正确的Byte长度为{size},但实际长度为{len(qbuffer)}。") 37 | return qbuffer 38 | 39 | def main(CHSCodeSource,FONT,ofilepath,bpp, 40 | fontsize = 12,method = "rightdownShadow",bitmaps = '1bppTo',encoding = "utf16", 41 | reshape = None,baseline = None): 42 | CHSCodelist = open(CHSCodeSource,"rt",encoding=encoding) 43 | CHSFontlist = [] 44 | for line in CHSCodelist: 45 | if '==' in line: 46 | CHSFontlist.append('=')#防止映射为字符“=”被split处理掉 47 | else: 48 | s=line.split("=") 49 | CHSFontlist.append(s[1].strip("\n")) 50 | bufferList = [] 51 | if bitmaps == '1bppTo': 52 | for char in CHSFontlist: 53 | bufferList.extend(MakeFont_1bppTo(char, FONT, fontsize, bpp, reshape=reshape,method=method)) 54 | elif bitmaps == "8bppTo": 55 | for char in CHSFontlist: 56 | bufferList.extend(MakeFont_8bppTo(char, FONT, fontsize, bpp, method=method, baseline=baseline)) 57 | else: 58 | raise TypeError("未指定正确的bpp转换类型。") 59 | with open(ofilepath,'wb') as f: 60 | for i in bufferList: 61 | f.write(struct.pack('B',i)) 62 | 63 | if __name__ == "__main__": 64 | import sys 65 | if len(sys.argv) != 9: 66 | print("使用方法: python .\MakeFonts_main.py <中文码表文件> <中文字体文件:例如SIMSUN2.TTC> \ 67 | <字体大小> <单字模宽width> <单字模高height> <阴影方法:rightdownShadow|blodShadow> <输出文件路径>") 68 | else: 69 | CHSCodeSource = sys.argv[1] 70 | FONT = sys.argv[2] 71 | bpp = sys.argv[3] 72 | fontsize = sys.argv[4] 73 | width = sys.argv[5] 74 | height = sys.argv[6] 75 | method = sys.argv[7] 76 | ofilepath = sys.argv[8] 77 | reshape = (width,height) 78 | main_CHS(CHSCodeSource,FONT,ofilepath,bpp,fontsize = fontsize,reshape = reshape,method = method) 79 | 80 | 81 | -------------------------------------------------------------------------------- /narc.py: -------------------------------------------------------------------------------- 1 | from struct import pack,unpack 2 | 3 | class BTAF: 4 | def __init__(self, rawdata): 5 | if len(rawdata)>0: 6 | self.magic = rawdata[:4] 7 | self.header = unpack("II", rawdata[4:12]) 8 | if self.magic != b"BTAF": 9 | raise NameError("BTAF tag not found") 10 | else: 11 | self.magic = b"BTAF" 12 | self.header = [12, 0] 13 | self.table = [] 14 | rawdata=rawdata[12:] 15 | if len(rawdata)>0: 16 | for i in range(self.getEntryNum()): 17 | self.table.append(unpack("II", rawdata[i*8:i*8+8])) 18 | def getSize(self): 19 | return self.header[0] 20 | def getEntryNum(self): 21 | return self.header[1] 22 | def toString(self, gmif): 23 | ret = b"BTAF"+pack("II", self.header[0], self.header[1]) 24 | for ofs, l in gmif.getEntries(): 25 | ret += pack("II", ofs, ofs+l) 26 | return ret 27 | 28 | class BTNF: 29 | def __init__(self, rawdata): 30 | if len(rawdata)>0: 31 | self.magic = rawdata[:4] 32 | self.header = unpack("IIHH", rawdata[4:0x10]) 33 | if self.magic != b"BTNF": 34 | raise NameError("BTNF tag not found") 35 | else: 36 | self.magic = b"BTNF" 37 | self.header = (16, 4, 0, 1) 38 | def toString(self): 39 | ret = b"BTNF" 40 | ret += pack("IIHH", self.header[0], self.header[1], self.header[2], self.header[3]) 41 | return ret 42 | class GMIF: 43 | def __init__(self, rawdata, t): 44 | if len(rawdata)>0: 45 | self.magic = rawdata[:4] 46 | self.size = unpack("I", rawdata[4:8])[0] 47 | if self.magic != b"GMIF": 48 | raise NameError("GMIF tag not found") 49 | else: 50 | self.magic = b"GMIF" 51 | self.size = 8 52 | self.files = [] 53 | for ofs in t: 54 | self.files.append(rawdata[8+ofs[0]:8+ofs[1]]) 55 | def getEntries(self): 56 | ret = [] 57 | of = 0 58 | for f in self.files: 59 | l = len(f) 60 | ret.append([of, l]) 61 | while l%4: 62 | l += 1 63 | of += l 64 | return ret 65 | def toString(self): 66 | ret = b"" 67 | for f in self.files: 68 | ret += f 69 | l = len(f) 70 | while l%4: 71 | l += 1 72 | ret += b"\xFF" 73 | self.size = len(ret) + 8#应包括header的8字节 74 | return b"GMIF"+pack("I", self.size)+ret 75 | class NARC: 76 | def __init__(self, rawdata): 77 | if len(rawdata)>0: 78 | self.magic = rawdata[:4] 79 | #print(self.magic) 80 | if self.magic != b"NARC": 81 | raise NameError("NARC tag not found") 82 | self.header = unpack("IIHH", rawdata[4:16]) 83 | else: 84 | self.magic = b"NARC" 85 | self.header = [0x0100FFFE, 0x10+12+8 + 0x10, 0x10, 3] 86 | rawdata= rawdata[16:] 87 | self.btaf = BTAF(rawdata) 88 | rawdata= rawdata[self.btaf.getSize():] 89 | self.btnf = BTNF(rawdata) 90 | rawdata= rawdata[self.btnf.header[0]:] 91 | self.gmif = GMIF(rawdata, self.btaf.table) 92 | def toString(self): 93 | ret = b"NARC" 94 | ret += pack("IIHH", self.header[0], self.header[1], self.header[2], self.header[3]) + self.btaf.toString(self.gmif) 95 | ret += self.btnf.toString()+self.gmif.toString() 96 | return ret 97 | def toFile(self, f): 98 | f.write(self.toString()) 99 | 100 | def __getitem__(self, key): 101 | return self.gmif.files[key] 102 | 103 | def __len__(self): 104 | return len(self.gmif.files) 105 | -------------------------------------------------------------------------------- /ChangeCodeList.py: -------------------------------------------------------------------------------- 1 | #Replace the original CMAP table with the newly integrated code table 2 | # All three font library have the same code table. I have pathd my Chinese code table as 'NewCodeList.txt' 3 | def ChangeCodeList(newCodeListpath,narcFilename,filenum = 3,encoding= 'utf-16'): 4 | with open(newCodeListpath,"r",encoding= encoding)as cf:# Make sure the first and the last code of Chinese characters in codelist 5 | raws = cf.read().split("\n") 6 | flag = 0 7 | for raw in raws: 8 | if "==" in raw: 9 | continue 10 | trans = raw.split("=") 11 | bo = (ord(trans[1]) >= 0x4E00) and (ord(trans[1]) <= 0x9FFF)#中文编码范围 12 | if flag == 0 and bo: 13 | first = int(trans[0]) 14 | flag = -1 15 | elif not bo and flag == -1: 16 | last = int(trans[0])-1#上一个才是最后 17 | break 18 | #print(first) 19 | #print(last) 20 | nftrExtrpath = narcFilename+'_extr/' 21 | for num in range(int(filenum)): 22 | filepath = nftrExtrpath + narcFilename+"-"+ str(num)#the location of font narc in the ROM of BW is a/0/2/3; 23 | #I have repathd the font narc as "a023" and export it at ./a023_extr/ 24 | new = "_new" 25 | MCodedict = dict() 26 | with open(newCodeListpath,"r",encoding= encoding) as f: 27 | raw = f.read().split("\n") 28 | for i in range(len(raw)): 29 | #print(raw[i]) 30 | if raw[i] != "": 31 | #print(raw[i]) 32 | trans = raw[i].split("=") 33 | id = trans[0] 34 | if "==" in raw[i]: 35 | MCodedict[id] = "=" 36 | else: 37 | MCodedict[id] = trans[1] 38 | #print(MCodedict) 39 | CMAPdictList = [] 40 | for i in range(8): 41 | CMAPdict = dict() 42 | with open(filepath +"序码表_"+str(i)+ ".txt","r",encoding=encoding)as f: 43 | raw = f.read().split("\n") 44 | for i in range(len(raw)): 45 | if raw[i] != "": 46 | trans = raw[i].split("=") 47 | if "==" in raw[i]: 48 | CMAPdict[trans[0]] = "=" 49 | else: 50 | CMAPdict[trans[0]] = trans[1] 51 | #print(len(CMAPdict)) 52 | #print(CMAPdict) 53 | CMAPdictList.append(CMAPdict) 54 | for key in MCodedict: 55 | for i in range(len(CMAPdictList)): 56 | if key in CMAPdictList[i]: 57 | #print(key) 58 | #print(CMAPdictList[i][key],MCodedict[key]) 59 | if CMAPdictList[i][key] != MCodedict[key]: 60 | print("将字模序为{}的字符 {} 修改为:{}".format(key,CMAPdictList[i][key],MCodedict[key])) 61 | CMAPdictList[i][key] = MCodedict[key]# One-to-one replacement 62 | #print(len(CMAPdictList)) 63 | for i in range(len(CMAPdictList)): 64 | writelist = [] 65 | for k in CMAPdictList[i]: 66 | trans = k + "=" + CMAPdictList[i][k] + "\n" 67 | writelist.append(trans) 68 | #print(writelist) 69 | with open(filepath +"序码表_"+str(i)+ new +".txt","w",encoding=encoding)as w: 70 | w.writelines(writelist) 71 | if __name__ =="__main__": 72 | import sys,os 73 | if len(sys.argv) != 3: 74 | print("使用方法: python .\ChangeCodeList.py <新码表> ") 75 | else: 76 | NCpath = sys.argv[1] 77 | narcname = sys.argv[2] 78 | extrpath = narcname+"_extr/" 79 | try: 80 | if os.path.exists(extrpath): 81 | ChangeCodeList(NCpath,narcname) 82 | else: 83 | raise FileExistsError(f"{extrpath}不存在,请确认是否已利用ExtractNarc.py从目标Narc中正确提取了Nftr文件") 84 | except Exception as e: 85 | print(f"錯誤: {e},请重新操作。") 86 | input("按任意键结束...") 87 | 88 | -------------------------------------------------------------------------------- /MakeCMP_Div.py: -------------------------------------------------------------------------------- 1 | import GetCMP, GetDivision 2 | import os,re 3 | 4 | #批量构造CMP整体原译对照文本 5 | def MakeCMP(jpdirpath,chdirpath,encoding = 'utf-16',mknewdir = False): 6 | jpfileslist = os.listdir(jpdirpath) 7 | chfileslist = os.listdir(chdirpath) 8 | jptxtfilelist = [] 9 | chtxtfilelist = [] 10 | for filepath in jpfileslist: 11 | if '.txt' in filepath: 12 | jptxtfilelist.append(filepath) 13 | for filepath in chfileslist: 14 | if '.txt' in filepath: 15 | chtxtfilelist.append(filepath) 16 | jp_ch_dict = {jptxtfilelist[i]:chtxtfilelist[i] for i in range(len(jptxtfilelist))}#方便批量构造CMP 17 | for jpfilepath in jp_ch_dict: 18 | chfilepath = chdirpath + "/"+ jp_ch_dict[jpfilepath] 19 | jpfilepath = jpdirpath + "/" + jpfilepath 20 | cmpfilepath = GetCMP.CMP(jpfilepath,chfilepath,encoding=encoding,mknewdir=mknewdir) 21 | finda = re.findall(r'(.*)/(.*)',cmpfilepath) 22 | CMP_path = finda[0][0] 23 | return CMP_path 24 | 25 | #批量分片经由CMP处理过后的整体对照文本,将其按块分开 26 | def DivCMP(CMPdirpath,blocknums = 2,encoding = 'utf-16',mknewdir = False): 27 | if CMPdirpath[-1]!="/":#补充路径尾部缺失的/方便后续拼接操作 28 | CMPdirpath += "/" 29 | filelist = os.listdir(CMPdirpath) 30 | CMPlist = [] 31 | for filepath in filelist: 32 | if ".txt" in filepath: 33 | if "CMP" in filepath: 34 | try: 35 | int(filepath[0]) 36 | except ValueError:#按命名规则,必须开头是CMP的才是可分片的整体文本 37 | CMPlist.append(filepath) 38 | for CMPpath in CMPlist: 39 | CMPpath = CMPdirpath + CMPpath 40 | with open(CMPpath,"r",encoding='utf-16')as f: 41 | GetDivision.txtDivision_byBlock(f.readlines(), CMPpath, blocknums = blocknums, 42 | encoding=encoding,mknewdir = mknewdir) 43 | 44 | #批量将分块文本重新组合复原 45 | def Div_Restore(CMPdirpath,blocknums = 2,encoding = 'utf-16',fmnewdir = False): 46 | if CMPdirpath[-1]!="/":#补充路径尾部缺失的/方便后续拼接操作 47 | CMPdirpath += "/" 48 | filelist = os.listdir(CMPdirpath) 49 | CMPlist = [] 50 | for filepath in filelist: 51 | if ".txt" in filepath: 52 | if "CMP" in filepath: 53 | try: 54 | int(filepath[0]) 55 | except ValueError:#按命名规则,必须开头是CMP的才是可分片的整体文本 56 | CMPlist.append(filepath) 57 | for CMPpath in CMPlist: 58 | CMPpath = CMPdirpath + CMPpath 59 | GetDivision.txtCombine_byBlock(CMPpath,blocknums = blocknums,encoding = encoding,fmnewdir = fmnewdir) 60 | 61 | #批量将CMP文本只保留翻译部分 62 | def MakeCMP_TH(CMPdirpath,dirpath=None,encoding = 'utf-16',name = None): 63 | if CMPdirpath[-1]!="/":#补充路径尾部缺失的/方便后续拼接操作 64 | CMPdirpath += "/" 65 | filelist = os.listdir(CMPdirpath) 66 | CMPlist = [] 67 | for filepath in filelist: 68 | if ".txt" in filepath: 69 | if "CMP" in filepath: 70 | try: 71 | int(filepath[0]) 72 | except ValueError:#按命名规则,必须开头是CMP的才是可分片的整体文本 73 | CMPlist.append(filepath) 74 | for CMPpath in CMPlist: 75 | CMPpath = CMPdirpath + CMPpath 76 | path = GetCMP.KeepTH(CMPpath,dirpath=dirpath,encoding = encoding,name=name) 77 | return path 78 | 79 | if __name__ == "__main__":#This is just for code testing 80 | import sys 81 | # 检查是否提供了参数 82 | if len(sys.argv) != 3: 83 | print("使用方法: python .\MakeCMP_Div.py <原Narc文件名> <新Narc文件名>") 84 | else: 85 | jpdir = sys.argv[1] 86 | chdir = sys.argv[2] 87 | for i in range(2,4): 88 | num = str(i) 89 | nstr = num + '_extr' 90 | jpdirpath = jpdir + nstr 91 | chdirpath = chdir + nstr 92 | blocknums = 2 93 | encoding = 'utf-16' 94 | CMPdirpath = MakeCMP(jpdirpath,chdirpath,encoding = encoding,mknewdir = True) 95 | DivCMP(CMPdirpath,blocknums = blocknums,encoding = encoding,mknewdir = True) 96 | Div_Restore(CMPdirpath,blocknums = blocknums,encoding = encoding,fmnewdir = True) 97 | MakeCMP_TH(CMPdirpath,encoding = encoding,mknewdir = True) 98 | -------------------------------------------------------------------------------- /GetCMP.py: -------------------------------------------------------------------------------- 1 | import re,os 2 | import MakeString 3 | #由完整的导出文本来构造对照外文/翻译文本 4 | def CMP(jpfilepath, chfilepath,encoding= 'utf-16',mknewdir = False):#输入文件名 5 | with open(jpfilepath,"r",encoding = encoding)as jpfile: 6 | with open(chfilepath,"r",encoding = encoding) as cnfile: 7 | jp_text = jpfile.readlines() 8 | ch_text = cnfile.readlines() 9 | #先处理为分节文本 10 | jp_entrytexts= MakeString.maketxtput(jp_text) 11 | ch_entrytexts= MakeString.maketxtput(ch_text) 12 | cmptexts = [] 13 | for i in range(len(jp_entrytexts)):#jp和cn列表的长度必定相同 14 | match = re.match("([^_]+)_([0-9]+)(.*)", jp_entrytexts[i][0]) 15 | if match: 16 | cmptexts.extend(jp_entrytexts[i][0]) 17 | for j in range(1,len(jp_entrytexts[i])): 18 | cmptexts.extend(jp_entrytexts[i][j]) 19 | for j in range(1,len(jp_entrytexts[i])): 20 | cmptexts.extend(ch_entrytexts[i][j]) 21 | else: 22 | raise KeyError("当前文本在第{}节处没有分块标识!".format(i)) 23 | if '/' in jpfilepath: 24 | finda = re.findall(r'(.*)/(.*)',jpfilepath)#finda[0][0]是文件目录,finda[0][1]是文件名 25 | version = re.findall(r'(.*)\((.*)',finda[0][1])[0][0] 26 | num = re.findall(r'(.*)\)(.*)',finda[0][1])[0][1] 27 | cmppath = "CMP_" + version + num 28 | 29 | if mknewdir: 30 | dirpath = finda[0][0] + '/'+ version + num[0] + "_CMP/" 31 | try: 32 | os.mkdir(dirpath) 33 | print(dirpath+" 已创建!") 34 | except FileExistsError: 35 | print("对应文件夹{}已存在。".format(dirpath)) 36 | pass 37 | output_filepath = dirpath + cmppath 38 | else: 39 | output_filepath = finda[0][0] + '/'+ cmppath 40 | else: 41 | version = re.findall(r'(.*)\((.*)',jpfilepath)[0][0] 42 | num = re.findall(r'(.*)\)(.*)',jpfilepath)[0][1] 43 | cmppath = "CMP_" + version + num 44 | 45 | if mknewdir: 46 | dirpath = version + num[0] +"_CMP/" 47 | try: 48 | os.mkdir(dirpath) 49 | print(dirpath+" 已创建!") 50 | except FileExistsError: 51 | print("对应文件夹{}已存在。".format(dirpath)) 52 | pass 53 | output_filepath = dirpath + cmppath 54 | else: 55 | output_filepath = cmppath 56 | print(output_filepath) 57 | textsrteam = "".join(cmptexts) 58 | with open(output_filepath,'w',encoding=encoding)as w: 59 | w.write(textsrteam) 60 | return output_filepath 61 | 62 | #将CMP生成的对照文本留下翻译部分(翻译工作完成后使用) 63 | def KeepTH(cmpfilepath,dirpath=None,encoding = 'utf-16',name = None): 64 | with open(cmpfilepath,'r',encoding=encoding)as cmpfile: 65 | cmptexts = cmpfile.readlines() 66 | cmp_entrytexts = MakeString.maketxtput(cmptexts) 67 | writetexts = [] 68 | subline = '\n------------------------------\n' 69 | for i in range(len(cmp_entrytexts)): 70 | match = re.match("([^_]+)_([0-9]+)(.*)", cmp_entrytexts[i][0]) 71 | if match: 72 | writetexts.extend(cmp_entrytexts[i][0]) 73 | for j in range(1,len(cmp_entrytexts[i])):#锁定并计入翻译部分,分节最后的一定是翻译部分 74 | sp = cmp_entrytexts[i][j].split(subline) 75 | writetexts.extend(sp[1]+subline) 76 | else: 77 | raise KeyError("当前文本在第{}节处没有分块标识!".format(i)) 78 | if '/' in cmpfilepath: 79 | finda = re.findall(r'(.*)/(.*)',cmpfilepath) 80 | comb = re.findall(r'(.*)_(.*)',finda[0][1]) 81 | version = comb[0][1][0] 82 | num = comb[0][1][1:] 83 | else: 84 | comb = re.findall(r'(.*)_(.*)',cmpfilepath) 85 | version = comb[0][1][0] 86 | num = comb[0][1][1:] 87 | if dirpath: 88 | if dirpath[-1]!="/": 89 | dirpath = dirpath+"/" 90 | else: 91 | if '/' in cmpfilepath: 92 | dirpath = finda[0][0] + '/' + version + num[0] +"_TH/" 93 | else: 94 | dirpath = version + num[0] +"_TH/" 95 | try: 96 | os.mkdir(dirpath) 97 | print(dirpath+" 已创建!") 98 | except FileExistsError: 99 | print("对应文件夹{}已存在。".format(dirpath)) 100 | pass 101 | if name: 102 | output_filepath = dirpath + name + num[1:] 103 | else: 104 | output_filepath = dirpath + version + num 105 | print(output_filepath) 106 | textsrteam = "".join(writetexts) 107 | with open(output_filepath,'w',encoding=encoding)as w: 108 | w.write(textsrteam) 109 | with open(output_filepath.replace(".txt",""),'w',encoding=encoding): 110 | pass#For InjectTXTNarc 111 | return dirpath 112 | 113 | if __name__ == "__main__":#This is just for code testing 114 | jpfilepath = './testdir/B(JP)2-44.txt' 115 | chfilepath = './testdir/B(CH)2-44.txt' 116 | cmpfilepath = CMP(jpfilepath,chfilepath,mknewdir=True) 117 | KeepTH(cmpfilepath) 118 | -------------------------------------------------------------------------------- /ChangeWidth.py: -------------------------------------------------------------------------------- 1 | import csv 2 | def ChangeWidth(newCodeListpath,narcFilename,encoding='utf-16'): 3 | with open(newCodeListpath,"r",encoding= encoding)as cf:# Make sure the first and the last code of Chinese characters in codelist 4 | raws = cf.read().split("\n") 5 | flag = 0 6 | for raw in raws: 7 | if "==" in raw: 8 | continue 9 | trans = raw.split("=") 10 | bo = (ord(trans[1]) >= 0x4E00) and (ord(trans[1]) <= 0x9FFF)#中文编码范围 11 | if flag == 0 and bo: 12 | first = int(trans[0]) 13 | flag = -1 14 | elif not bo and flag == -1: 15 | last = int(trans[0])-1#上一个才是最后 16 | break 17 | nftrExtrpath = narcFilename+'_extr/' 18 | for num in range(3): 19 | filepath = nftrExtrpath + narcFilename +"-"+str(num) 20 | csvfile = filepath + 'CWDH.csv' 21 | ncsvfile = filepath + 'CWDH_new.csv' 22 | 23 | transl = [] 24 | with open(csvfile,"r") as r: 25 | reader = csv.DictReader(r) 26 | locs = [] 27 | widths = [] 28 | tags = [] 29 | for row in reader: 30 | locs.append(int(row['loc']))#csv表全都是默认为str字串型 31 | widths.append(int(row['width'])) 32 | tags.append(int(row['tag'])) 33 | for i in range(len(tags)): 34 | if tags[i] == 1: 35 | #print(widths[i]) 36 | transl.append(widths[i]) 37 | transl = list(set(transl)) 38 | transl.sort() 39 | print(f"CWDH:{transl}") 40 | nwidth = [] 41 | for i in range(len(locs)): 42 | if (locs[i] >= first and locs[i] <= last) or locs[i] == 4932 or locs[i] == 336 or locs[i]== 207\ 43 | or locs[i] == 208 or locs[i] == 4904 or locs[i] == 4913 :#?!“”,。全角字母 44 | nwidth.append(transl[1])#选择适中的宽度 45 | elif(locs[i] >= 4934 and locs[i] <= 4959) or (locs[i] >= 4966 and locs[i] <= 4991): 46 | nwidth.append(transl[0])#最小可选宽度 47 | else: 48 | nwidth.append(widths[i]) 49 | with open(ncsvfile,"w",newline="") as w: 50 | writer = csv.writer(w) 51 | writer.writerow(["loc","width","tag"]) 52 | for i in range(len(locs)): 53 | writer.writerow([locs[i],nwidth[i],tags[i]]) 54 | 55 | csvfile = filepath + "宽度表.csv" 56 | ncsvfile = filepath + "宽度表_new.csv" 57 | with open(csvfile,"r",newline="")as r: 58 | reader = csv.DictReader(r) 59 | locs = [] 60 | lefts = [] 61 | widths = [] 62 | advances = [] 63 | for row in reader: 64 | locs.append(int(row['loc']))#csv表全都是默认为str字串型 65 | lefts.append(int(row['left'])) 66 | widths.append(int(row['width'])) 67 | advances.append(int(row['advance'])) 68 | nwidth = [] 69 | nleft = [] 70 | nadvance = [] 71 | for i in range(len(locs)): 72 | if (locs[i] >= first and locs[i] <= last):#中文 73 | if num < 2: 74 | nwidth.append(max(widths)) 75 | nadvance.append(max(advances)-1) 76 | nleft.append(0) 77 | else: 78 | nwidth.append(max(widths)) 79 | nadvance.append(max(advances)) 80 | nleft.append(0) 81 | elif locs[i] == 4932 or locs[i] == 336 or locs[i]== 207\ 82 | or locs[i] == 208 or locs[i] == 4904 or locs[i] == 4913\ 83 | or locs[i] == 4927 or locs[i] == 4928:#?!“”,。;: 84 | nwidth.append(max(widths)-3) 85 | nadvance.append(max(advances)-2) 86 | nleft.append(2) 87 | elif locs[i] == 213:#… 88 | nwidth.append(widths[i]) 89 | nleft.append(lefts[i]) 90 | nadvance.append(advances[i]+1) 91 | elif (locs[i] >= 4934 and locs[i] <= 4959) or (locs[i] >= 4966 and locs[i] <= 4991):#全角字母 92 | nwidth.append(widths[i]) 93 | if advances[i] < max(advances): 94 | nadvance.append(advances[i]+1) 95 | else: 96 | nadvance.append(advances[i]) 97 | nleft.append(lefts[i]) 98 | else: 99 | nwidth.append(widths[i]) 100 | nadvance.append(advances[i]) 101 | if lefts[i]-1 > 0: 102 | nleft.append(lefts[i]-1) 103 | else: 104 | nleft.append(lefts[i]) 105 | 106 | with open(ncsvfile,"w",newline="") as w: 107 | writer = csv.writer(w) 108 | writer.writerow(["loc","left","width","advance"]) 109 | for i in range(len(locs)): 110 | writer.writerow([locs[i],nleft[i],nwidth[i],nadvance[i]]) 111 | if __name__ =="__main__": 112 | import sys,os 113 | if len(sys.argv) != 3: 114 | print("使用方法: python .\ChangeWidth.py <新码表> ") 115 | else: 116 | NCpath = sys.argv[1] 117 | narcname = sys.argv[2] 118 | extrpath = narcname+"_extr/" 119 | try: 120 | if os.path.exists(extrpath): 121 | ChangeWidth(NCpath,narcname) 122 | else: 123 | raise FileExistsError(f"{extrpath}不存在,请确认是否已利用ExtractNarc.py从目标Narc中正确提取了Nftr文件") 124 | except Exception as e: 125 | print(f"錯誤: {e},请重新操作。") 126 | input("按任意键结束...") 127 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | *此项目应当在Python 3.xx下执行* 2 | # 这里将会从头到尾阐述整个项目的执行方式 3 | 目前仅针对《宝可梦:黑/白》的文本汉化进行实际操作。 4 | 5 | 项目中存在仅适用于《宝可梦:黑/白》日版的硬编码,若欲将项目应用于其他游戏,则需要修改这些部分。 6 | # 1.准备工作 7 | ## 从ROM中导出Narc 8 | 使用[tinke](https://github.com/pleonex/tinke)读取*.nds导出a/0/0/2、a/0/0/3和a/0/2/3这三个Narc文件 9 | 其中: 10 | 11 | a/0/0/2 系统文本集 12 | 13 | a/0/0/3 剧情文本集 14 | 15 | a/0/2/3 字体集(字库) 16 | 17 | ## Narc解包 18 | 用ExtractNarc.py <处理类型:输入text(文本)或file(其它)> 19 | ### 示例 20 | *此处仅为使用方法的示例* 21 | 22 | 為方便後續操作,建议将上述通过tinke提取出来的文件重命名,以日版(JP)的Narc文件为例: 23 | 24 | a/0/0/2提取出来的2 重命名为 B(JP)2 25 | 26 | a/0/0/3提取出来的3 重命名为 B(JP)3 27 | 28 | a/0/2/3提取出来的3 重命名为 a023 29 | 30 | 将这些Narc文件与*.py放在同级目录下,在控制台Command Line(CMD)中执行语句: 31 | 32 | ``` 33 | >> python ExtractNarc.py B(JP)2 text 34 | 35 | >> python ExtractNarc.py B(JP)3 text 36 | 37 | >> python ExtractNarc.py a023 file 38 | ``` 39 | 由此就创建了: 40 | 41 | B(JP)2_extr、B(JP)3_extr和a023_extr这三个分别对应B(JP)2、B(JP)3和a023的子文件夹。 42 | 43 | B(JP)2_extr、B(JP)3_extr内应当有:解包后按照<>编号命名的文件块(无后缀)和对应的解密后的文本*.txt 44 | 45 | a023_extr内应当有:解包后按照编号命名的文件块(无后缀) 46 | 47 | # 2.文本处理 48 | *整体流程:* 49 | 50 | 拆文本→翻译(修改)文本→做码表→改字库→文本和字库都导入Narc分块→打包生成New_Narc→用tinke将New_Narc对位导入 51 | 52 | ## 拆文本组合为原译文对照 53 | →直接执行CMP_Div_main.py脚本,按照提示制作对照文本。 54 | 55 | ### 示例 56 | 依然以 1.准备工作 中的示例文件命名为例: 57 | ``` 58 | >> python CMP_Div_main.py 59 | 60 | >> 请输入当前目录下已利用ExtractNarc.py导出的文本对应的源Narc文件名:B(JP)3 61 | 62 | >> 请选择操作:CMP 63 | ``` 64 | 由此就会在B(JP)3_extr\目录下创建名为B3_CMP的文件夹,其中包含0和1两个分块的原译文对照文本。 65 | 要修改的是0和1两个文件夹中的文本。 66 | 67 | ### 翻译示例 68 | 以.\B(JP)3_extr\B3_CMP\0\0_CMP_B3-23.txt中的第一段文本0_0I为例: 69 | 70 | 0_0I 71 | 72 | \------------------------------ 73 | 74 | はくぶつかんの そのおくで 75 | 76 | ちょうせんしゃを まつ`` 77 | 78 | ポケモンジム……`` 79 | 80 | 81 | 82 | なんだか ふんいき あるっすよね`` 83 | 84 | 85 | 86 | というわけで 87 | 88 | これを さしあげるっす!`` 89 | 90 | 91 | 92 | \------------------------------ 93 | 94 | はくぶつかんの そのおくで 95 | 96 | ちょうせんしゃを まつ`` 97 | 98 | ポケモンジム……`` 99 | 100 | 101 | 102 | なんだか ふんいき あるっすよね`` 103 | 104 | 105 | 106 | というわけで 107 | 108 | これを さしあげるっす!`` 109 | 110 | 111 | 112 | \------------------------------ 113 | 114 | 上面的部分是原文,下面的部分是译文,因此直接修改翻译下面的部分即可: 115 | 116 | 0_0I 117 | 118 | \------------------------------ 119 | 120 | はくぶつかんの そのおくで 121 | 122 | ちょうせんしゃを まつ`` 123 | 124 | ポケモンジム……`` 125 | 126 | 127 | 128 | なんだか ふんいき あるっすよね`` 129 | 130 | 131 | 132 | というわけで 133 | 134 | これを さしあげるっす!`` 135 | 136 | 137 | 138 | \------------------------------ 139 | 140 | 博物馆里面是 141 | 142 | 等待着挑战者的宝可梦道馆……`` 143 | 144 | 145 | 146 | 感觉真有气氛啊。`` 147 | 148 | 149 | 150 | 哦对了, 151 | 152 | 这个给你!`` 153 | 154 | 155 | 156 | \------------------------------ 157 | #### 控制符 158 | `<>`中包括的是控制符,翻译时建议尽量按照原文的格式摆放。 159 | 160 | 不过对于``和``,翻译者可以酌情考虑变更。 161 | 162 | 在游戏中的效果: 163 | 164 | ``的作用是:显示翻页的运动箭头▼,并且按A后整个对话框翻一页。也就是按A后,``后的文段直接覆盖``所在的文段。 165 | 166 | 以上面的翻译为例:“感觉真有气氛啊。``”按A,对话框的两行字直接换成 167 | 168 | “哦对了, 169 | 170 | 这个给你!``”。 171 | 172 | ``的作用是:按A后,``后面的一段文字把``所在的这段文字顶到上面去。 173 | 174 | 以上面的翻译为例:“等待着挑战者的宝可梦道馆……``”按A, 175 | 176 | “ 177 | 感觉真有气氛啊。”往上滑动,顶去“等待着挑战者的宝可梦道馆……”。 178 | 179 | ## 文本导入打包流程 180 | 完成文本修改(翻译)后,把修改(翻译)后的文本组合恢复原本的格式,导入Narc中打包。 181 | ### 组合文本 182 | →直接执行CMP_Div_main.py脚本按照提示组合文本。 183 | 184 | #### 示例 185 | 依然以示例文件命名为例: 186 | ``` 187 | >> python CMP_Div_main.py 188 | 189 | >> 请输入当前目录下已利用ExtractNarc.py导出的文本对应的源Narc文件名:B(JP)3 190 | 191 | >> 请选择操作:Restore 192 | 193 | >> 请输入拟定创造的新Narc文件名:B(CH)3 194 | ``` 195 | 组合后的文本就在新创建的B(CH)3_extr/目录下,并且已经创建好了待写入的对应文本块。 196 | 197 | ### 导入文本到对应Narc分片中 198 | →用InjectTXTNarc.py批量导入分片。 199 | #### 示例 200 | 依然以示例文件命名为例: 201 | 202 | ``` 203 | >> python .\InjectTXTNarc.py B(CH)3 204 | ``` 205 | 206 | 待程式正确执行后,这样就将B(CH)3_extr\中的所有*txt导入对应的Narc分片中了。 207 | 208 | ### 打包Narc 209 | →用MakeNarc.py打包文本Narc生成对应的New_Narc 210 | #### 示例: 211 | 依然以示例文件命名为例: 212 | ``` 213 | >> python .\MakeNarc.py B(CH)3 214 | ``` 215 | 这样就得到了打包后的名为New_B(CH)3的Narc. 216 | 217 | # 3.字库制作 218 | ## 做码表的流程 219 | →直接执行CodeList_main.py按照提示制作新码表。 220 | 221 | ### 示例 222 | 依然以示例文件命名为例: 223 | 224 | ``` 225 | >> python .\CodeList_main.py 226 | 227 | >> 请输入Nftr对应的原Narc文件名:a023 228 | 229 | >> 是否已有完整的原始码表?输入Y为是,N为否:N 230 | 231 | 已在程式脚本同目录下生成3个字库对应的全码表 232 | a023-0_Total.txt、 233 | a023-1_Total.txt、 234 | a023-2_Total.txt, 235 | 这3个码表的内容应当完全相同。 236 | >> 确认无误后,请任选一个码表作为原始码表(输入0,1,2中的任意一个数字):0 237 | 238 | >> 翻译后的文本*.txt放在多个文件夹中吗?输入Y为是,N为否:Y 239 | 240 | >> 请输入第1个路径(输入“END”或按ENTER结束):./B(CH)2_extr/ 241 | 242 | >> 请输入第2个路径(输入“END”或按ENTER结束):./B(CH)3_extr/ 243 | 244 | >> 请输入第3个路径(输入“END”或按ENTER结束):\n 245 | 246 | 已在程式脚本同目录下生成了名为CHS.TBL的中文编码表,专用于制作中文字库。 247 | >> 确认无误后,请按任意键继续后续操作:\n 248 | 249 | 预替换的第1个字符为编号603的['603', '一'],开始替换。 250 | …… 251 | 已在程式脚本同目录下生成了名为NewCodeList.txt的新码表。 252 | 全操作完毕,请按任意键结束... 253 | ``` 254 | 中途会对应生成全文涉及到的中文码表CHS.TBL,用于后续的中文字库制作。 255 | 256 | 制作的全新码表固定名称为NewCodeList.txt,仅执行中文编码区域的覆盖。 257 | 258 | ## 修改重做字库调用流程 259 | 为了让游戏能正确显示修改后的文本,需要为新加入的字符做字库。 260 | ### 制作中文字库 261 | 用MakeFonts_main.py参照CHS.TBL做中文字库。 262 | #### 示例 263 | 依然以示例文件命名为例: 264 | ``` 265 | >> python .\MakeFonts_main.py CHS.TBL SIMSUN2.TTC a023 266 | ``` 267 | 如上执行后,会在与脚本同级的目录下得到三个中文字库: 268 | 269 | a023-0-chs 270 | 271 | a023-1-chs 272 | 273 | a023-2-chs 274 | 275 | 为了方便后续操作,请不要擅自移动它们。 276 | 277 | ### 修改字符宽度表 278 | *# 此部分涉及的程式码存在大量硬编码* 279 | 280 | →用Change三件套修改Width、Bitmap和CodeList,直接执行ChangeBCW_main.py按提示操作。 281 | #### 示例 282 | 依然以示例文件命名为例: 283 | ``` 284 | >> python .\ChangeBCW_main.py 285 | 286 | >> 请输入已经导出的Nftr的原Narc文件名:a023 287 | 288 | >> 请输入码表路径:NewCodeList.txt 289 | ``` 290 | 正确执行上述后,在a023_extr\下会生成_new的所有需要的新字库文件。 291 | 292 | ### 导入Nftr导入Narc 293 | →InjectNftr.py将新字库文件导入各分片。 294 | 295 | →MakeNarc.py打包字库。 296 | #### 示例 297 | 依然以示例文件命名为例: 298 | ``` 299 | >> python .\InjectNftr.py a023 300 | 301 | >> python .\MakeNarc.py a023 302 | ``` 303 | 这样就在脚本的同级目录下得到了打包后的新字库New_a023的Narc. 304 | 305 | # 4.将新Narc导入ROM 306 | 使用[tinke](https://github.com/pleonex/tinke)读取*.nds将a/0/0/2、a/0/0/3和a/0/2/3这三个Narc文件对应替换后保存。 307 | 308 | 以示例文件命名为例: 309 | 310 | 将a/0/0/2替换为New_B(CH)2,将a/0/0/3替换为New_B(CH)3,将a/0/2/3替换为New_a023. 311 | 312 | 黑/白版本的文本和字库是相同的,所以直接用相同的新Narc对应黑/白的ROM替换即可。 313 | 314 | 至此你完成了《宝可梦:黑/白》的基础文本汉化工作。 315 | 316 | 至于游戏中的图片汉化,还需要另做操作,此处不作赘述。 317 | -------------------------------------------------------------------------------- /InjectNftr.py: -------------------------------------------------------------------------------- 1 | import csv 2 | from struct import pack 3 | from nftr import * 4 | #把导出的字库文件导入各Nftr分片中 5 | def InjectNftr(narcfilename): 6 | nftrpath = narcfilename+"_extr/" 7 | for i in range(3): 8 | filepath = nftrpath+narcfilename +"-"+ str(i) 9 | new = "_new"#修改 10 | #new = ""#复原 11 | with open(filepath,"rb")as f: 12 | rawdata = f.read() 13 | nftr = NFTR(rawdata) 14 | cdict = nftr.cwdh.width0123dict 15 | values = list(cdict.values()) 16 | keys =list(cdict.keys()) 17 | cdict_inv = {values[i]:keys[i] for i in range(len(keys))} 18 | 19 | #制造cwdh的新宽度表1 20 | csvfile = filepath + 'CWDH'+new+'.csv' 21 | with open(csvfile,"r") as r: 22 | reader = csv.DictReader(r) 23 | locs = [] 24 | widths = [] 25 | tags = [] 26 | combs = []#cwdh的新宽度表1 27 | for row in reader: 28 | locs.append(int(row['loc']))#csv表全都是默认为str字串型 29 | widths.append(int(row['width'])) 30 | tags.append(int(row['tag'])) 31 | widthtable = []#还原表1拆开为2bit的样子 32 | for i in range(0,len(locs),4): 33 | k = 4 34 | comb = 0 35 | v = 0 36 | for j in range(k): 37 | if tags[i+j] == 1: 38 | v = cdict_inv[widths[i+j]] 39 | widthtable.append(v) 40 | else: 41 | v = cdict_inv[0] 42 | widthtable.append(v) 43 | comb = comb + (v << (2 * (k-j-1))) 44 | combs.append(comb) 45 | 46 | if combs == nftr.cwdh.widthtableComb: 47 | print("CWDH原封不动",True) 48 | else: 49 | print("已修改CWDH.") 50 | nftr.cwdh.widthtableComb = combs#替换掉原来的原始表1,待写入 51 | 52 | #修改周期表2和数组表3 53 | for i in locs: 54 | #print(widthtable[i]) 55 | widthid = cdict[widthtable[i]] 56 | if widthid == 0:#计算周期表2的索引 57 | glyphidx = i#宽度表1元素按字模序排序,widthid等于3时的序数就是字模序 58 | cycleid = glyphidx & 0x1FF ^ (8 59 | * (((glyphidx >> 9) ^ (glyphidx >> 11) ^ (glyphidx >> 12) ^ (glyphidx >> 10)) & 1)) & 0x1FF 60 | #print(cycleid) 61 | 62 | cid = nftr.cwdh.cycletable[cycleid] 63 | if cid > 128:#转换大数表示的有符号负数2^7 == 128用一个Byte表示数字的范围是0~128 64 | cid -= 256 65 | if cid > 0:#是宽度 66 | if cid != widths[glyphidx]:#新宽度和原宽度不同就替换 67 | nftr.cwdh.cycletable[cycleid] = widths[glyphidx] 68 | print("已修改第{}字符于 表2 的宽度值为:{}".format(glyphidx,widths[glyphidx])) 69 | elif cid < 0:#说明需要查表3 70 | #print(cid) 71 | #print(abs(cid)) 72 | arr = nftr.cwdh.arraytable[abs(cid)-1]#必须-1,第一项的下标是0 73 | flag = -1 74 | for k in arr[1]: 75 | if k == glyphidx:#说明查表3找到了对应字模的宽度 76 | if arr[1][glyphidx] != widths[glyphidx]:#新宽度和原宽度不同就替换 77 | nftr.cwdh.arraytable[abs(cid)-1][1][glyphidx] = widths[glyphidx] 78 | print("已修改第{}字符于 表3 的宽度值为:{}".format(glyphidx,widths[glyphidx])) 79 | flag = 0 80 | break 81 | if flag == -1: 82 | print("存在不可查值,字模序为:{}".format(glyphidx)) 83 | else: 84 | raise NameError("疑似出现字符宽度为0的状况") 85 | else:#0,1,2的情形,直接字典映射得到宽度,直接用combs替换表1不需要在此修改 86 | pass 87 | #print(len(combs)) 88 | #print(combs) 89 | 90 | #替换码表 91 | i = 0 92 | for cmap in nftr.CMAPTable: 93 | idxs = list(cmap.CodeTableDict.keys()) 94 | codes = list(cmap.CodeTableDict.values()) 95 | with open(filepath +"序码表_"+str(i)+ new +".txt","r",encoding="utf16")as txt: 96 | lines = txt.readlines() 97 | if len(lines) > len(idxs): 98 | raise LookupError("超出原始CMAP {}的可覆盖范围!".format(i)) 99 | else: 100 | nCodeTableDict = dict() 101 | for j in range(len(lines)): 102 | line = lines[j].replace("\n","") 103 | if line[-1] == "=":#排除是=的情况 104 | idx = line.split("=")[0] 105 | char = "=" 106 | else: 107 | idx, char = line.split("=") 108 | #print(idx,idxs[j]) 109 | if "NULL" in idx: 110 | if idx == idxs[j]: 111 | nCodeTableDict[idx] = 0xFFFF 112 | else: 113 | if int(idx) == int(idxs[j]): 114 | if codes[j] != ord(char): 115 | nCodeTableDict[int(idx)] = ord(char) 116 | print("已修改第{}个CMAP的第{}字符为:{}".format(i,idx,char)) 117 | else: 118 | nCodeTableDict[int(idx)] = codes[j] 119 | else: 120 | print(idx,idxs[j]) 121 | raise KeyError("对应第{}个CMAP的字模序发生改变!".format(i)) 122 | nftr.CMAPTable[i].CodeTableDict = nCodeTableDict 123 | i += 1 124 | 125 | #重组数据+字模 126 | csvfile = filepath + "宽度表"+new+".csv" 127 | with open(csvfile,"r",newline="")as f: 128 | datas = [] 129 | reader = csv.reader(f) 130 | pos = 0 131 | for row in reader: 132 | if pos == 0:#csv标题不要 133 | pos += 1 134 | continue 135 | trans = list(map(int,row[1:]))#字模序不要 136 | datas.append(trans) 137 | pos += 1 138 | fontsize = nftr.cglp.tfontsize - 3#a023-0单字模大小为45Byte,a023-1单字模大小为25Byte,a023-2单字模大小为36Byte 139 | with open(filepath + new +".bitmap","rb")as f: 140 | buffer = f.read() 141 | if len(nftr.bitmaps) < len(datas): 142 | raise LookupError("超出字模序列可覆盖范围!") 143 | #print(nftr.cglp.fontsdata) 144 | #print(datas) 145 | for j in range(len(datas)): 146 | if nftr.cglp.fontsdata[j] != tuple(datas[j]): 147 | trans = b"" 148 | for data in datas[j]: 149 | trans += pack('B',data) 150 | nftr.cglp.tfonts[j] = trans + nftr.bitmaps[j] 151 | print("已修改第{}个字模的数据为:{}".format(j,datas[j])) 152 | if nftr.bitmaps[j] != buffer[j*fontsize:j*fontsize+fontsize]:#不要忘记*fontsize实现跨块读 153 | trans = b"" 154 | for data in datas[j]: 155 | trans += pack('B',data) 156 | nftr.cglp.tfonts[j] = trans + buffer[j*fontsize:j*fontsize+fontsize] 157 | print("已修改第{}个字模。".format(j)) 158 | 159 | nfilepath = filepath#因为打包逻辑只认无后缀文件,必须直接导入原文件 160 | with open(nfilepath,"wb")as f: 161 | nftr.toFile(f) 162 | 163 | if __name__ == "__main__": 164 | import sys,os 165 | #检查是否提供了参数 166 | if len(sys.argv) != 2: 167 | print("使用方法: python .\InjectNftr.py <原Narc的文件名>") 168 | else: 169 | narcname = sys.argv[1] 170 | extrpath = narcname+"_extr/" 171 | try: 172 | if os.path.exists(extrpath): 173 | InjectNftr(narcname) 174 | else: 175 | raise FileExistsError(f"{extrpath}不存在,请确认是否已利用ExtractNarc.py从目标Narc中正确提取了Nftr文件") 176 | except Exception as e: 177 | print(f"錯誤: {e},请重新操作。") 178 | 179 | -------------------------------------------------------------------------------- /CheckHelper.py: -------------------------------------------------------------------------------- 1 | import re,os,MakeString 2 | from SetCharCounts import * 3 | def controlChecker(): 4 | for num in range(2, 4): 5 | num = str(num) 6 | dirpath = "B(JP)"+num+"_extr/B"+num+"_CMP/" 7 | 8 | def chscounts(trans): 9 | pos = 0 10 | for c in trans: 11 | if (ord(c) >= 0x4E00) and (ord(c) <= 0x9FFF): 12 | pos += 1 13 | return pos 14 | 15 | for n in range(2):#0,1 16 | filelist = os.listdir(dirpath+str(n)+"/") 17 | for filepath in filelist: 18 | path = dirpath+str(n)+"/"+filepath 19 | with open(path,'r',encoding="utf-16")as cmpfile: 20 | cmptexts = cmpfile.readlines() 21 | cmp_entrytexts = MakeString.maketxtput(cmptexts) 22 | subline = '\n------------------------------\n' 23 | for i in range(len(cmp_entrytexts)): 24 | match = re.match("([^_]+)_([0-9]+)(.*)", cmp_entrytexts[i][0]) 25 | if match: 26 | for j in range(1,len(cmp_entrytexts[i])):#锁定并计入翻译部分,分节最后的一定是翻译部分 27 | sp = cmp_entrytexts[i][j].split(subline) 28 | trans = sp[1].split("\n") 29 | if len(trans) >= 2: 30 | flag = 0#上一行的末尾标记 31 | pos = 0#上一个控制符后经过的行数 32 | for k in range(len(trans)): 33 | 34 | if trans[k] == "": 35 | continue 36 | elif chscounts(trans[k]) > 17: 37 | print(f"在{filepath}的:\n{trans[k]}") 38 | print("疑似发现行过长") 39 | txt = "".join(sp[1]) 40 | print(txt) 41 | else: 42 | if flag == 0: 43 | if pos < 2: 44 | if "" in trans[k]: 45 | flag = 1 46 | pos = 0 47 | elif "" in trans[k]: 48 | flag = 2 49 | pos = 0 50 | else:# 是换行符\n 51 | flag = 3 52 | else: 53 | print(f"在{filepath}的:\n{trans[k]}") 54 | print("疑似发现控制符错误:超过3行没有控制符") 55 | txt = "".join(sp[1]) 56 | print(txt) 57 | break 58 | elif flag == 1:#说明上一行末尾是 59 | if pos == 0: 60 | if "" in trans[k]: 61 | print(f"在{filepath}的:\n{trans[k]}") 62 | print("疑似发现控制符错误:后紧随") 63 | txt = "".join(sp[1]) 64 | print(txt) 65 | break 66 | else: 67 | pos += 1 68 | flag = 0 69 | elif pos == 1:#后的第二行 70 | if trans[k-1]!="":#第一行是\n可以多一行 71 | if "<" not in trans[k]: 72 | print(f"在{filepath}的:\n{trans[k]}") 73 | print("疑似发现控制符错误:后两行没有控制符") 74 | txt = "".join(sp[1]) 75 | print(txt) 76 | break 77 | else: 78 | if "" in trans[k]: 79 | flag =1 80 | elif "" in trans[k]: 81 | flag =2 82 | pos = 0 83 | else: 84 | continue 85 | else: 86 | print("逻辑,是否存在逻辑错误?") 87 | elif flag == 2:#说明上一行末尾是 88 | if pos == 0: 89 | if trans[k-1] != "":#第一行是\n可以多一行 90 | if "<" not in trans[k]: 91 | if trans[k+1:]:#后最多只能有一句结尾句,能往下说明不是结尾句 92 | #print(f"pos {pos},flag {flag}") 93 | print(f"在{filepath}的:\n{trans[k]}") 94 | print("疑似发现控制符错误:后缺少控制符") 95 | txt = "".join(sp[1]) 96 | print(txt) 97 | break 98 | 99 | else:#除了结尾句,后一行末尾不是就是 100 | if "" in trans[k]: 101 | flag =1 102 | elif "" in trans[k]: 103 | flag =2 104 | pos = 0 105 | else: 106 | continue 107 | else: 108 | print("逻辑,是否存在逻辑错误?") 109 | else: 110 | if trans[k] != "": 111 | if pos > 1: 112 | print(f"在{filepath}的:\n{trans[k]}") 113 | print("疑似发现控制符错误:超过2行换行后没有控制符") 114 | txt = "".join(sp[1]) 115 | print(txt) 116 | break 117 | def charCodeChecker(dirpath): 118 | def get_simplified_chinese_chars_mapping(): 119 | # 使用 GBK 编码构建简体中文字符的 Unicode 映射 120 | simplified_chinese_chars = set() 121 | for code in range(0x4E00, 0x9FA5 + 1): # 常用汉字范围 122 | try: 123 | char = chr(code) 124 | # 尝试将字符编码为 GBK,成功则为简体中文字符 125 | char.encode('gbk') 126 | simplified_chinese_chars.add(char) 127 | except UnicodeEncodeError: 128 | continue # 不是简体中文字符 129 | 130 | return simplified_chinese_chars 131 | simplified_chinese_chars = get_simplified_chinese_chars_mapping() 132 | charl = SetCharCounts(dirpath) 133 | for char in charl: 134 | if char not in simplified_chinese_chars: 135 | print(char) 136 | 137 | if __name__ == "__main__":#Just for the files which are named as the same formation as . 138 | controlChecker() 139 | #charCodeChecker(['./B(CH)2_extr','./B(CH)3_extr']) 140 | 141 | 142 | 143 | -------------------------------------------------------------------------------- /MakeString.py: -------------------------------------------------------------------------------- 1 | import struct, re 2 | from binary16 import binaryreader, binarywriter 3 | 4 | # 单块文本解密和加密 5 | subline = '\n------------------------------\n' 6 | 7 | 8 | def gen5get(f, fillflag = False): 9 | texts = [] 10 | reader = binaryreader(f) 11 | # 前12Byte,文件头header的结构[2Byte总分块数,2Byte分节数,4Byte最大块大小,4Byte的0填充] 12 | numblocks = reader.read16() # 头第1个2Byte是文本总的版块数 13 | numentries = reader.read16() # 第2个2Byte是各版块总的分节数,每个版块各numentries节 14 | filesize = reader.read32() # 4Byte表示最大分块的大小 15 | zero = reader.read32() # 4Byte的0,无用 16 | blockoffsets = [] 17 | 18 | #print("filesize:",filesize) 19 | # header后紧跟:各分块地址offset 20 | for i in range(numblocks): 21 | blockoffsets.append(reader.read32()) # 每4Byte表示各分块偏移地址(是相对块头部的偏移地址) 22 | # filesize == len(f)-reader.pos() 23 | for i in range(numblocks): # 按分块地址找到块进行操作 24 | reader.seek(blockoffsets[i]) # 按块,指针移动到分块地址处 25 | size = reader.read32() # 分块头部4Byte是当前分块的大小 26 | tableoffsets = [] 27 | charcounts = [] 28 | textflags = [] 29 | # 分块大小后是当前分块的分节信息排列 30 | for j in range(numentries): # 每个分节信息共8Byte 31 | tableoffsets.append(reader.read32()) # 4Byte表示分节地址 32 | charcounts.append(reader.read16()) # 2Byte表示对应分节的字数 33 | textflags.append(reader.read16()) # 2Byte表示对应分节的标识值 34 | # 分节信息后就是当前各段文本排列 35 | for j in range(numentries): # 按分节读取文本 36 | compressed = False 37 | encchars = [] 38 | text = "" 39 | reader.seek(blockoffsets[i] + tableoffsets[j]) # 文件指针定位到当前块的当前节文本段 40 | # print(charcounts[j]) 41 | for k in range(charcounts[j]): # 按当前节文本的字数逐字读取 42 | encchars.append(reader.read16()) # 每个字2Byte 43 | key = encchars[len(encchars) - 1] ^ 0xFFFF # 当前分节的最后一个字的码数与0xFFFF的异或,就是当前文本段的初始key 44 | decchars = [] 45 | # print("enchars:",len(encchars)) 46 | while encchars: # 逐字解密 47 | char = encchars.pop() ^ key # 从最后一个字倒序逐字解密:加密的字码与key异或得到解密后的字码 48 | key = ((key >> 3) | (key << 13)) & 0xFFFF # 上一个字的key右移3位同上一个字的key左移13位的或,再同0xFFFF的与,得到下一个字的key 49 | decchars.insert(0, char) # 解密后的字符顺序加入dechars表内 50 | # print("dechars:",len(decchars)) 51 | if decchars[0] == 0xF100: # 判断是否被压缩 52 | print("存在压缩。") 53 | compressed = True # 文段开头第一个值是0xF100就说明被压缩,压缩标记compressed赋为True 54 | decchars.pop(0) # 去掉文段开头的压缩标记 55 | newstring = [] 56 | container = 0 57 | bit = 0 58 | # 进行解压操作 59 | while decchars: 60 | container |= decchars.pop(0) << bit 61 | bit += 16 62 | while bit >= 9: 63 | bit -= 9 64 | c = container & 0x1FF 65 | if c == 0x1FF: 66 | newstring.append(0xFFFF) 67 | else: 68 | newstring.append(c) 69 | container >>= 9 70 | decchars = newstring 71 | 72 | if fillflag:#调试时使用 73 | # 块填充检测 74 | if j != numentries - 1: 75 | if tableoffsets[j + 1] - tableoffsets[j] != 2 * charcounts[j]: 76 | print('疑似存在填充,位置:', (blockoffsets[i] + tableoffsets[j]) + 2 * charcounts[j]) 77 | print("填充长度为:", tableoffsets[j + 1] - tableoffsets[j] - 2 * charcounts[j]) 78 | print("填充内容为:", f[tableoffsets[j] + 2 * charcounts[j]:tableoffsets[j + 1]]) 79 | else: 80 | if i != numblocks - 1: 81 | if blockoffsets[i + 1] - blockoffsets[i] - tableoffsets[j] != 2 * charcounts[j]: 82 | # size - tableoffsets[j] == blockoffsets[i+1]-tableoffsets[j] 83 | print("当前分块长度为:", size) 84 | print(blockoffsets[i], tableoffsets[j], charcounts[j]) 85 | print('疑似于块尾部存在填充,位置:', (blockoffsets[i] + tableoffsets[j]) + 2 * charcounts[j]) 86 | print("填充长度为:", blockoffsets[i + 1] - blockoffsets[i] - tableoffsets[j] - 2 * charcounts[j]) 87 | print("填充内容为:", f[(blockoffsets[i] + tableoffsets[j]) + 2 * charcounts[j]:blockoffsets[i + 1]]) 88 | else: 89 | print("块尾部没有填充。块长度为:", size) 90 | else: 91 | if len(f) - (blockoffsets[i] + tableoffsets[j]) != 2 * charcounts[j]: 92 | # size - tableoffsets[j] == len(f) - (blockoffsets[i]+tableoffsets[j]) 93 | print("当前分块长度为:", size) 94 | print("文件总长度为:", len(f)) 95 | print('疑似于文件尾部存在填充,位置:', (blockoffsets[i] + tableoffsets[j]) + 2 * charcounts[j]) 96 | print("填充长度为:", len(f) - (blockoffsets[i] + tableoffsets[j]) - 2 * charcounts[j]) 97 | print("填充内容为:", f[(blockoffsets[i] + tableoffsets[j]) + 2 * charcounts[j]:len(f)]) 98 | else: 99 | print("文件尾部没有填充。块长度为:", size) 100 | print("文件总长度为:", len(f)) 101 | 102 | while decchars: # 控制符处理 103 | c = decchars.pop(0) 104 | if c == 0xFFFF: # 字符码为0xFFFF时说明已经读到文段,结束处理跳出 105 | if fillflag: 106 | if decchars: 107 | print("第{}块的第{}分节后的填充为:{}".format(i, j, decchars)) # 剩下的都是填充 108 | print("分节总长度为:{},填充长度为:{}".format(charcounts[j], len(decchars))) 109 | else: 110 | print("第{}块的第{}分节没有填充。".format(i, j)) 111 | print("分节总长度为:{}".format(charcounts[j])) 112 | break 113 | elif c == 0xFFFE: 114 | text += "\n" # \\n 115 | elif c < 20 or c > 0xFF60: # 中文全角标点符号和全角英文码在0xFF00 - 0xFF60之间,必须囊括 116 | text += "<" + "x%04X" % c + ">" # \\x%04X%c 117 | elif c == 0xF000: 118 | try: 119 | kind = decchars.pop(0) 120 | count = decchars.pop(0) 121 | if kind == 0xbe00 and count == 0: 122 | text += "\n" # \\f 123 | continue 124 | if kind == 0xbe01 and count == 0: 125 | text += "\n" # \\r 126 | continue 127 | text += ">= 1 146 | if compressed: # 压缩标记 147 | flag += "c" 148 | # print([e,text]) 149 | e += flag + subline 150 | text += subline 151 | texts.append([e, text]) # 当前文本段处理完毕,加入文本表texts 152 | # print([e,text]) 153 | return texts 154 | 155 | 156 | def gen5put(texts): # 加密导入单文本文件 157 | textofs = {} 158 | sizes = {} 159 | comments = {} 160 | textflags = {} 161 | blockwriters = {} 162 | for entry in texts: 163 | # print(entry) 164 | for num in range(len(entry)): # 去掉参考线 165 | entry[num] = entry[num].replace(subline, '') 166 | # print(entry) 167 | 168 | match = re.match("([^_]+)_([0-9]+)(.*)", entry[0]) 169 | if not match: 170 | continue 171 | blockid = match.group(1) 172 | textid = int(match.group(2)) 173 | flags = match.group(3) 174 | # print(blockid,textid,flags) 175 | text = entry[1] 176 | if blockid.lower() == "comment": 177 | comments[textid] = text 178 | continue 179 | blockid = int(blockid) 180 | if blockid not in blockwriters: 181 | blockwriters[blockid] = binarywriter() 182 | textofs[blockid] = {} 183 | sizes[blockid] = {} 184 | textflags[blockid] = {} 185 | textofs[blockid][textid] = blockwriters[blockid].pos() 186 | dec = [] 187 | while text: 188 | c = text[0] 189 | if c == '\n': # 补上隐藏的换行符(导出时直接用\n是为了方便翻译,导入时为了方便批量处理就重新加上) 190 | text = '' + text 191 | c = text[0] 192 | text = text[1:] 193 | if c == '<': 194 | c = text[0] 195 | text = text[1:] 196 | 197 | dr = text.find('>') # 去掉导出时加入的控制符的尾部> 198 | ltext = list(text) ###xxxxxx 199 | ltext.pop(dr) 200 | # print(ltext) 201 | try: 202 | if c != "V":#VAR控制符后面可能出现自带"\n"的状况 203 | if len(ltext) != 0 and ltext[dr] == '\n': 204 | ltext.pop(dr) # 去掉'>'后的'\n' 205 | except IndexError: 206 | pass # 说明'>'在最后且没有‘\n’,是x控制符 207 | # print(ltext) 208 | text = ''.join(ltext) 209 | 210 | if c == 'x': 211 | n = int(text[:4], 16) 212 | # print("n:",n) 213 | # print("text:",text) 214 | text = text[4:] 215 | # print("text:",text) 216 | elif c == 'n': 217 | n = 0xFFFE 218 | elif c == 'P' and text[:3] == 'AGE': 219 | dec.append(0xF000) 220 | dec.append(0xbe01) 221 | dec.append(0) 222 | # print("text:",text) 223 | text = text[3:] 224 | # print("text:",text) 225 | continue 226 | elif c == 'F': 227 | dec.append(0xF000) 228 | dec.append(0xbe00) 229 | dec.append(0) 230 | continue 231 | elif c == 'V': 232 | if text[:2] == "AR": 233 | text = text[3:] 234 | eov = text.find(")") 235 | args = list(map(int, text[:eov].split(","))) 236 | text = text[eov + 1:] 237 | dec.append(0xF000) 238 | dec.append(args.pop(0)) 239 | dec.append(len(args)) 240 | for a in args: 241 | dec.append(a) 242 | else: 243 | dec.append(ord('V')) 244 | continue 245 | else: 246 | n = 1 247 | dec.append(n) 248 | 249 | else: 250 | dec.append(ord(c)) 251 | # print("dec:",len(dec)) 252 | flag = 0 253 | for i in range(16): 254 | if chr(65 + i) in flags: 255 | flag |= 1 << i 256 | textflags[blockid][textid] = flag 257 | if "c" in flags: 258 | print("存在压缩。") 259 | comp = [0xF100] 260 | container = 0 261 | bit = 0 262 | while dec: 263 | c = dec.pop(0) 264 | if c >> 9: 265 | print("非法压缩字符: %i" % c) 266 | container |= c << bit 267 | bit += 9 268 | while bit >= 16: 269 | bit -= 16 270 | comp.append(container & 0xFFFF) 271 | container >>= 16 272 | container |= 0xFFFF << bit 273 | comp.append(container & 0xFFFF) 274 | dec = comp[:] 275 | 276 | key = (0x7C89 + textid * 0x2983) & 0xFFFF # 必须&0FFFF保证数值限定在2Byte内#key的初始值不可为0,否则不会加密 277 | enc = [] 278 | # 逐字加密 279 | while dec: 280 | char = dec.pop(0) ^ key 281 | # print("key:",key) 282 | key = ((key << 3) | (key >> 13)) & 0xFFFF 283 | enc.append(char) 284 | fills = [0xFFFF for num in range(len(enc))] # 制造没有加密的填充 285 | enc.append(key ^ 0xFFFF) # 文段末尾一定有一个停止符0xFFFF 286 | while fills: # 给填充加密然后放进末尾 287 | key = ((key << 3) | (key >> 13)) & 0xFFFF 288 | fill = fills.pop() ^ key 289 | enc.append(fill) 290 | 291 | sizes[blockid][textid] = len(enc) 292 | # print("enc:",len(enc)) 293 | for e in enc: # 将加密的每个字加入预写入列表 294 | blockwriters[blockid].write16(e) 295 | numblocks = max(blockwriters) + 1 296 | if numblocks != len(blockwriters): 297 | raise KeyError 298 | numentries = 0 299 | for block in blockwriters: 300 | numentries = max(numentries, max(textofs[block]) + 1) 301 | offsets = [] 302 | baseofs = 12 + 4 * numblocks 303 | textblock = binarywriter() 304 | blocksizelist = [] 305 | for i in range(numblocks): 306 | data = blockwriters[i].toarray() 307 | offsets.append(baseofs + textblock.pos()) 308 | relofs = numentries * 8 + 4 309 | blocksize = len(data) * 2 + relofs 310 | print("blocksize:", blocksize) 311 | if blocksize % 4: # 如果blocksize%16不为0,则执行下面的语句 312 | print("4除块长度的余数为:", blocksize % 4) 313 | # 块长度无法整除4,需要在其后填0xFFFF 314 | fnum = blocksize % 4 315 | key = ((key << 3) | (key >> 13)) & 0xFFFF 316 | data.extend([key ^ 0xFFFF for num in range(int(fnum / 2))]) 317 | blocksize = blocksize + fnum 318 | print("填充{}个0xFFFF后,块长度与4的余数为:{}".format(int(fnum / 2), blocksize % 4)) 319 | print("blocksize:", blocksize) 320 | blocksizelist.append(blocksize) 321 | textblock.write32(blocksize) # 对应块的大小 322 | for j in range(numentries): # 每组8Byte的块的分节信息表 323 | textblock.write32(textofs[i][j] + relofs) # 对应分节的偏移地址4Byte 324 | textblock.write16(sizes[i][j]) # 对应分节的字数2Byte 325 | textblock.write16(textflags[i][j]) # 对应分节的标识值2Byte 326 | textblock.writear(data) 327 | writer = binarywriter() # 构造文件头 328 | writer.write16(numblocks) # 2Byte总分块数 329 | writer.write16(numentries) # 2Byte分节数 330 | print(blocksizelist) 331 | print("max(blocksizelist):", max(blocksizelist)) 332 | writer.write32(max(blocksizelist)) # 4Byte最大块大小(用于游戏中分配内存) 333 | writer.write32(0) # 4Byte的0填充 334 | for i in range(numblocks): 335 | writer.write32(offsets[i]) 336 | writer.writear(textblock.toarray()) 337 | return writer.tobytes() 338 | 339 | 340 | def maketxtput(raw): # raw是file.readlines() 341 | # 将文本处理为gen5put()可处理的entry列表形式 342 | flag = -1 343 | entry = [] 344 | texts = [] 345 | s = '' 346 | for i in range(len(raw)): 347 | match = re.match("([^_]+)_([0-9]+)(.*)", raw[i]) 348 | if match: # 说明遇到了节标记 349 | if s != '': # 说明当前遇到的节标记是下一节的 350 | entry.append(s) # 上一节的第二部分内容完成 351 | texts.append(entry) # 把完成的上一节加入列表 352 | entry = [] # 清空预备给下一节 353 | s = raw[i] 354 | flag = 0 355 | elif flag == 0: # 说明上一个是节标记 356 | s = s + raw[i] # 加上subline 357 | entry.append(s) # 当前节的第一部分完成 358 | s = '' 359 | flag = 1 360 | elif flag == 1: 361 | s = s + raw[i] 362 | # 只有上面的逻辑会丢失最后一节 363 | if i == len(raw) - 1: 364 | entry.append(s) # 最后一节的第二部分内容完成 365 | texts.append(entry) # 把最后一节的内容加入列表 366 | return texts 367 | 368 | 369 | if __name__ == "__main__":#Just for code testing 370 | # 导出单个分片文本 371 | filepath = 'B(JP)2-13' 372 | with open(filepath, 'rb') as f: 373 | texts = gen5get(f.read(),fillflag=True) 374 | with open(filepath + '.txt', 'w', encoding='utf16') as w: 375 | for line in texts: 376 | w.writelines(line) 377 | print(texts) 378 | # 导入单个分片文本 379 | ifilepath = filepath + '.test' 380 | with open(filepath + '.txt', 'r', encoding='utf16') as txtf: 381 | raw = txtf.readlines() 382 | texts = maketxtput(raw) 383 | with open(ifilepath, 'wb') as f: 384 | f.write(gen5put(texts)) 385 | 386 | # 再导出看和导出的原文本是否相同 387 | with open(ifilepath, 'rb') as f: 388 | texts = gen5get(f.read(),fillflag=True) 389 | with open(ifilepath + '.txt', 'w', encoding='utf16') as w: 390 | for line in texts: 391 | w.writelines(line) 392 | -------------------------------------------------------------------------------- /nftr.py: -------------------------------------------------------------------------------- 1 | from struct import unpack,pack 2 | #数值单位:Byte 3 | class FINF: 4 | def __init__(self,rawdata,FINFoffset): 5 | if len(rawdata)>0: 6 | self.magic = rawdata[:4] 7 | if self.magic != b"FNIF": 8 | print(self.magic) 9 | raise NameError("FINF tag not found") 10 | self.header = unpack("IBBHBBBBIII", rawdata[4:28]) 11 | self.offset = FINFoffset 12 | else: 13 | self.magic = b"FNIF" 14 | self.header = [0 for i in range(11)] 15 | def getSize(self): 16 | return self.header[0] 17 | def getFontType(self):#0x0 - Bitmap, 0x1 - TGLP 18 | if self.header[1] == 0: 19 | return 'Bitmap' 20 | elif self.header[1] == 1: 21 | return 'TGLP' 22 | else: 23 | return self.header[1] 24 | def getLeft(self): 25 | #[默认字模宽度,默认字模高度]2Byte大端模式数就右移得到正确的小端模式值 26 | return self.header[4] 27 | def getWidth(self): 28 | return self.header[5] 29 | def getAdvance(self):#默认字模间距 30 | return self.header[6] 31 | def getEncoding(self):#0x0 - UTF-8, 0x1 - UTF-16, 0x2 - ShiftJIS, 0x3 - CP1252 32 | if self.header[7] == 0: 33 | return 'utf-8' 34 | elif self.header[7] == 1: 35 | return 'utf-16' 36 | elif self.header[7] == 2: 37 | return 'shift_jis' 38 | elif self.header[7] == 3: 39 | return 'cp1252' 40 | else: 41 | return self.header[7] 42 | def getCDWHinner_offset(self): 43 | return self.header[-2] 44 | def getCMAP_offset(self): 45 | return self.header[-1] 46 | def toString(self): 47 | #打包header 48 | ret = b"FNIF" + pack("IBBHBBBBIII", self.header[0],self.header[1], 49 | self.header[2],self.header[3], 50 | self.header[4],self.header[5],self.header[6], 51 | self.header[7],self.header[8], 52 | self.header[9],self.header[10]) 53 | return ret 54 | class CGLP: 55 | def __init__(self, rawdata, CGLPoffset): 56 | self.offset = CGLPoffset#CGLP的绝对偏移地址 57 | if len(rawdata)>0: 58 | self.magic = rawdata[:4] 59 | if self.magic != b"PLGC": 60 | print(self.magic) 61 | raise NameError("CGLP tag not found") 62 | self.header = unpack("IBBHBBBB", rawdata[4:16]) 63 | self.bitmapdatasize = 3 64 | else: 65 | self.magic = b"PLGC" 66 | self.header = [0 for i in range(8)] 67 | self.size = self.header[0]#CGLP段总大小(包括header) 68 | self.width = self.header[1]#默认字模宽度 69 | self.height = self.header[2]#默认字模高度 70 | self.tfontsize = self.header[3]#字模及其数据大小 71 | self.basline = self.header[4] 72 | self.Maxwidth = self.header[5] 73 | self.bpp = self.header[6]#默认颜色位数 74 | rawdata = rawdata[16:] 75 | 76 | self.fonts = [] 77 | self.fontsdata = [] 78 | self.tfonts = [] 79 | if len(rawdata) > 0: 80 | pos = 0 81 | i = 0 82 | while pos < self.size - 16: 83 | datafont = rawdata[i*self.tfontsize:i*self.tfontsize + self.tfontsize] 84 | self.tfonts.append(datafont) 85 | self.fontsdata.append(unpack('BBB',datafont[:self.bitmapdatasize]))#字模数据 86 | self.fonts.append(datafont[self.bitmapdatasize:])#字模 87 | i += 1 88 | pos += self.tfontsize 89 | 90 | def getCWDH_offset(self): 91 | offset = self.size + self.offset 92 | #print(hex(offset)) 93 | return offset#所有字模点阵数据之后就是CWDH 94 | def getFonts(self): 95 | return self.fonts 96 | def getFontsData(self): 97 | return self.fontsdata 98 | def toString(self): 99 | #打包header 100 | ret = b"PLGC" + pack("IBBHBBBB",self.size,self.width, 101 | self.height,self.tfontsize , 102 | self.basline, self.Maxwidth ,self.bpp, 103 | self.header[7]) 104 | #打包数据+字模 105 | for buffer in self.tfonts: 106 | ret += buffer 107 | l = len(ret)#对齐4Byte,需补齐时填充0 108 | while l%4: 109 | l += 1 110 | ret += b"\x00" 111 | return ret 112 | 113 | class CWDH: 114 | def __init__(self,rawdata,CWDHoffset): 115 | self.offset = CWDHoffset 116 | self.widthtableComb = []#没有拆成2bit的原始Byte表1 117 | self.widthtable = [] 118 | self.cycletable = [] 119 | self.arraytable =[] 120 | if len(rawdata) > 0: 121 | self.magic = rawdata[:4] 122 | if self.magic != b"HDWC": 123 | print(self.magic) 124 | raise NameError("CWDH tag not found") 125 | self.header = unpack('IBBBBIII',rawdata[4:24]) 126 | else: 127 | self.magic = b"HDWC" 128 | self.header = [0 for i in range(8)] 129 | self.size = self.header[0]#CWDH段总长度(包括header) 130 | self.width0123dict = {0:self.header[1],1:self.header[2],2:self.header[3],3:self.header[4]}#表1宽度索引表的索引0,1,2对应的宽度,3表示查表2周期化的宽度表 131 | self.widthtable8offset = self.offset + self.header[5] + 8#表1宽度索引表的起始地址 132 | self.cycletable8offset = self.offset + self.header[6] + 8#表2周期化的宽度表的起始地址 133 | self.arraytable8offset = self.offset + self.header[7] + 8#表3宽度检索表的起始地址 134 | print("widthtable8offset:",hex(self.widthtable8offset)) 135 | print("cycletable8offset:",hex(self.cycletable8offset)) 136 | print("arraytable8offset",hex(self.arraytable8offset)) 137 | #只有表1和表3是直接可得的,表2依赖字模序,字模序依表1 138 | 139 | rawdata = rawdata[24:]#此时就是表1开头 140 | 141 | widthtableSize = self.cycletable8offset - self.widthtable8offset 142 | cycletableSize = self.arraytable8offset - self.cycletable8offset 143 | arraytableSize = self.size - (self.header[7] + 8) 144 | #print(widthtableSize) 145 | #print(cycletableSize) 146 | #print(arraytableSize) 147 | if len(rawdata)>0: 148 | for i in range(widthtableSize):#提取表1 149 | k = 4 150 | B = rawdata[i] 151 | self.widthtableComb.append(B) 152 | #print(B) 153 | for j in range(k):#2bit一个宽度索引 154 | self.widthtable.append(B >> (2 * (k-j-1)) & 3) 155 | print("len(self.widthtable):",len(self.widthtable)) 156 | #print(self.widthtable) 157 | rawdata = rawdata[widthtableSize:] 158 | if len(rawdata)>0:#提取表2 159 | for i in range(cycletableSize): 160 | self.cycletable.append(rawdata[i]) 161 | 162 | rawdata = rawdata[cycletableSize:] 163 | print("len(self.cycletable):",len(self.cycletable)) 164 | if len(rawdata)>0:#提取表3 165 | i = 0 166 | while arraytableSize - i -1 > 0: 167 | arrays = [] 168 | kvnum = rawdata[i] 169 | arrays.append(kvnum) 170 | i += 1 171 | key_value = dict() 172 | while kvnum > 0: 173 | loc = unpack('>H', rawdata[i:i+2])[0]#索引数是大端模式 174 | i += 2 175 | width = rawdata[i] 176 | i += 1 177 | key_value[loc] = width 178 | kvnum -= 1 179 | arrays.append(key_value) 180 | self.arraytable.append(arrays) 181 | #print(arrays) 182 | print("len(self.arraytable):",len(self.arraytable)) 183 | #print(self.arraytable) 184 | self.WidthTable = []#翻译表1-2-3得到的真宽度表(按字模序排列) 185 | if self.arraytable and self.widthtable and self.cycletable: 186 | for i in range(len(self.widthtable)): 187 | #print(self.widthtable[i]) 188 | widthid = self.width0123dict[self.widthtable[i]] 189 | tag = 1 190 | if widthid == 0:#计算周期表2的索引 191 | glyphidx = i#宽度表1元素按字模序排序,widthid等于3时的序数就是字模序 192 | cycleid = glyphidx & 0x1FF ^ (8 193 | * (((glyphidx >> 9) ^ (glyphidx >> 11) ^ (glyphidx >> 12) ^ (glyphidx >> 10)) & 1)) & 0x1FF 194 | #print(cycleid) 195 | cid = self.cycletable[cycleid] 196 | if cid > 128:#转换大数表示的有符号负数2^7 == 128用一个Byte表示数字的范围是0~128 197 | cid -= 256 198 | if cid > 0:#是宽度 199 | tag = 2 200 | self.WidthTable.append([cid,tag]) 201 | elif cid < 0:#说明需要查表3 202 | tag = 3 203 | #print(cid) 204 | #print(abs(cid)) 205 | arr = self.arraytable[abs(cid)-1]#必须-1,第一项的下标是0 206 | flag = -1 207 | for k in arr[1]: 208 | if k == glyphidx:#说明查表3找到了对应字模的宽度 209 | self.WidthTable.append([arr[1][glyphidx],tag]) 210 | flag = 0 211 | break 212 | if flag == -1: 213 | print("存在不可查值,字模序为:{}".format(glyphidx)) 214 | else: 215 | raise NameError("疑似出现字符宽度为0的状况") 216 | else:#0,1,2的情形,直接字典映射得到宽度 217 | self.WidthTable.append([widthid,tag]) 218 | print("len(self.WidthTable):",len(self.WidthTable)) 219 | 220 | def getSize(self): 221 | return self.header[0] 222 | def getWidthIDtable(self): 223 | return self.widthtable 224 | def getArrayIDtable(self): 225 | return self.arraytable 226 | def getWidthTable(self): 227 | return self.WidthTable 228 | def toString(self): 229 | #打包header 230 | ret = b"HDWC" + pack('IBBBBIII',self.size,self.header[1], 231 | self.header[2],self.header[3], 232 | self.header[4],self.header[5], 233 | self.header[6],self.header[7]) 234 | #打包三个表 235 | for B in self.widthtableComb: 236 | ret += pack('B',B) 237 | for B in self.cycletable: 238 | ret += pack('B',B) 239 | for arr in self.arraytable: 240 | trans = pack("B",arr[0]) 241 | for k in arr[1]: 242 | trans = trans + pack(">H",k) + pack('B',arr[1][k]) 243 | ret += trans 244 | l = len(ret)#对齐4Byte,需补齐时填充0 245 | while l%4: 246 | l += 1 247 | ret += b"\x00" 248 | return ret 249 | 250 | class CMAP: 251 | def __init__(self, rawdata): 252 | if len(rawdata) > 0: 253 | self.magic = rawdata[:4] 254 | if self.magic != b'PAMC': 255 | raise NameError("CMAP tag not found") 256 | self.header = unpack("IHHHHI",rawdata[4:20]) 257 | else: 258 | self.magic = b'PAMC' 259 | self.header = [0 for i in range(6)] 260 | self.size = self.header[0] 261 | self.codeBegin = self.header[1] 262 | self.codeEnd = self.header[2] 263 | self.mapMethod = self.header[3] 264 | self.resever = self.header[4] 265 | self.nextCMAP8offset = self.header[5] 266 | #导出码表 267 | rawdata = rawdata[20:] 268 | self.CodeTableDict = dict() 269 | if self.mapMethod == 0x0:#由起始、终止编码和起始字模序导出码表 270 | glyphidx = unpack('I', rawdata[:4])[0] 271 | interval = self.codeEnd - self.codeBegin 272 | for i in range(interval+1): 273 | code = self.codeBegin + i 274 | self.CodeTableDict[glyphidx] = code 275 | glyphidx += 0x1 276 | elif self.mapMethod == 1:#由起始、终止编码和表内的字模序导出码表 277 | interval = self.codeEnd - self.codeBegin 278 | i = 0 279 | while i <= interval : 280 | glyphidx = unpack('H',rawdata[i*2:i*2+2])[0] 281 | if glyphidx != 0xFFFF: 282 | code = self.codeBegin + i 283 | self.CodeTableDict[glyphidx] = code 284 | else: 285 | self.CodeTableDict["NULL"+str(i)] = 8251#※ 286 | i += 1 287 | elif self.mapMethod == 0x2:#直接扫描[字符编码,字模序]对导出码表 288 | if self.codeBegin!=0x0000 or self.codeEnd!=0xffff: 289 | raise NameError("起始和终止码不是0x0000和0xFFFF:{},{}".format(self.codeBegin,self.codeEnd)) 290 | totalnum = unpack("H",rawdata[:2])[0] 291 | rawdata = rawdata[2:] 292 | i = 0 293 | while i < totalnum: 294 | codeidx = unpack("HH",rawdata[i*4:i*4+4]) 295 | self.CodeTableDict[codeidx[1]] = codeidx[0] 296 | i += 1 297 | else: 298 | raise NameError("mapMethod not defined") 299 | 300 | def getSize(self): 301 | return self.size 302 | def getBegin_FontCode(self): 303 | return self.codeBegin 304 | def getEnd_FontCode(self): 305 | return self.codeEnd 306 | def getNextCMAP_offset(self): 307 | return self.nextCMAP8offset - 8 308 | def toString(self): 309 | ret = b'PAMC' + pack("IHHHHI",self.size,self.codeBegin, 310 | self.codeEnd,self.mapMethod, 311 | self.resever,self.nextCMAP8offset) 312 | if self.mapMethod == 0x0: 313 | #打包起始字模序 314 | fglyphidx = list(self.CodeTableDict.keys())[0] 315 | ret += pack('I',fglyphidx) 316 | elif self.mapMethod == 0x1: 317 | #打包字模序码表 318 | for idx in self.CodeTableDict: 319 | if self.CodeTableDict[idx] == 0xffff: 320 | ret += pack('H',0xFFFF) 321 | else: 322 | ret += pack('H',idx) 323 | elif self.mapMethod == 0x2: 324 | #打包序对数量 325 | ret += pack("H",len(self.CodeTableDict)) 326 | #打包[编码,字模序]码表 327 | for idx in self.CodeTableDict: 328 | trans = pack('H',self.CodeTableDict[idx]) + pack('H',idx) 329 | ret += trans 330 | else: 331 | raise NameError("mapMethod not defined") 332 | l = len(ret)#对齐4Byte,需补齐时填充0 333 | while l%4: 334 | l += 1 335 | ret += b"\x00" 336 | return ret 337 | class NFTR: 338 | def __init__(self, rawdata): 339 | self.size = len(rawdata) 340 | if len(rawdata)>0: 341 | self.magic = rawdata[:4] 342 | self.header = unpack("IIHH", rawdata[4:16]) 343 | self.FINFoffset = self.header[2] 344 | if self.magic != b"RTFN": 345 | print(self.magic) 346 | raise NameError("NFTR tag not found") 347 | else: 348 | self.magic = b"RTFN" 349 | self.header = [0 for i in range(4)] 350 | rawdata = rawdata[16:] 351 | self.finf = FINF(rawdata, self.FINFoffset) 352 | CGLPoffset = self.FINFoffset + self.finf.getSize() 353 | rawdata = rawdata[self.finf.getSize():] 354 | self.cglp = CGLP(rawdata, CGLPoffset) 355 | self.fontsdata = self.cglp.getFontsData() 356 | self.bitmaps = self.cglp.getFonts() 357 | CWDHoffset = self.cglp.getCWDH_offset() 358 | rawdata = rawdata[self.cglp.size:] 359 | self.cwdh = CWDH(rawdata,CWDHoffset) 360 | self.WidthTable = self.cwdh.WidthTable 361 | rawdata = rawdata[self.cwdh.getSize():] 362 | cmap = CMAP(rawdata) 363 | self.CMAPTable = [cmap] 364 | rawdata = rawdata[cmap.getSize():] 365 | while cmap.getNextCMAP_offset() < self.size: 366 | cmap = CMAP(rawdata) 367 | self.CMAPTable.append(cmap) 368 | rawdata = rawdata[cmap.getSize():] 369 | if len(rawdata) == 0: 370 | print("nftr文件的全部数据处理完毕。") 371 | break 372 | def toString(self): 373 | #打包header 374 | ret = b"RTFN" +pack("IIHH",self.header[0],self.header[1],self.FINFoffset,self.header[3]) 375 | #按顺序打包FINF+CGLP+CWDH+CMAPs 376 | ret += self.finf.toString() + self.cglp.toString() + self.cwdh.toString() 377 | for cmap in self.CMAPTable: 378 | ret += cmap.toString() 379 | return ret 380 | def toFile(self, f): 381 | f.write(self.toString()) 382 | 383 | if __name__ =="__main__":#Just for code testing 384 | import csv 385 | filepath = 'a023_extr/a023-0' 386 | with open(filepath,'rb')as f: 387 | f.seek(0) 388 | rawdata = f.read() 389 | print("len(rawdata)",len(rawdata)) 390 | nftr = NFTR(rawdata) 391 | with open(filepath + "宽度表.csv","w",newline="")as w: 392 | writer = csv.writer(w) 393 | writer.writerow(["loc","left","width","advance"]) 394 | for i in range(len(nftr.fontsdata)): 395 | l = [i] 396 | l.extend(nftr.fontsdata[i]) 397 | writer.writerow(l) 398 | with open(filepath + "CWDH.csv","w",newline="")as w: 399 | writer = csv.writer(w) 400 | writer.writerow(["loc","width","tag"]) 401 | widthtable = nftr.cwdh.WidthTable 402 | for i in range(len(widthtable)): 403 | trans = [i] 404 | trans.extend(widthtable[i]) 405 | writer.writerow(trans) 406 | with open(filepath + ".bitmap","wb")as w: 407 | for bitmap in nftr.bitmaps: 408 | w.write(bitmap) 409 | i = 0 410 | for cmap in nftr.CMAPTable: 411 | idxs = list(cmap.CodeTableDict.keys()) 412 | codes = list(cmap.CodeTableDict.values()) 413 | with open(filepath +"序码表_"+str(i)+ ".txt","w",encoding="utf16")as w: 414 | for j in range(len(idxs)): 415 | s = str(idxs[j]) + "=" + chr(codes[j])+"\n" 416 | w.write(s) 417 | i += 1 418 | 419 | -------------------------------------------------------------------------------- /FreetypeMakeFonts.py: -------------------------------------------------------------------------------- 1 | import freetype,struct 2 | from GlyphEntry import GlyphEntry 3 | #Freetype对于SIMSUN2.TTC这类字体默认生成的字体是1bpp,一行2Byte;拓展为2bpp;拓展为N bpp 后,一行会变成2*N Byte 4 | #Freetype对于其它字体一般是默认生成8bpp字体。 5 | def CharBitmapCreator(char,FONT,fontsize = 12,blod = False): 6 | #由单字char和字体文件FONT生成width为fontsize的,实际宽度为16的1bpp的bitmap字模,对齐方式为左中(width是多少,生成的扫描阵就有多少行) 7 | face = freetype.Face(FONT) 8 | face.set_char_size( fontsize*64 ) 9 | face.load_char(char) 10 | buffer = []#1bpp字体前加[0,0]就是填充一行 11 | bitmap = face.glyph.bitmap 12 | data, rows, width, top, left = bitmap.buffer, bitmap.rows, bitmap.width, face.glyph.bitmap_top, face.glyph.bitmap_left 13 | #print("默认生成的(宽度,长度):",width, rows) 14 | #print(len(data)) 15 | #print(data) 16 | if fontsize < 11: 17 | if bitmap.pixel_mode == 1:#Only make operation for 1bpp fonts 18 | #freetype.FT_PIXEL_MODES 19 | #{'FT_PIXEL_MODE_NONE': 0,'FT_PIXEL_MODE_MONO'(1bpp): 1, 20 | # 'FT_PIXEL_MODE_GRAY'(8bpp): 2, 'FT_PIXEL_MODE_GRAY2'(2bpp): 3, 21 | # 'FT_PIXEL_MODE_GRAY4'(4bpp): 4,'FT_PIXEL_MODE_LCD': 5, 22 | # 'FT_PIXEL_MODE_LCD_V': 6, 'FT_PIXEL_MODE_MAX': 7} 23 | buffer.extend(data[2:]) 24 | buffer.extend(data[:2]) 25 | else: 26 | buffer.extend(data) 27 | else: 28 | buffer.extend(data) 29 | if blod: 30 | buffer.extend([0,0]) 31 | glyph = GlyphEntry(width=width, rows=rows,buffer=buffer, top=top, left=left) 32 | return glyph 33 | 34 | def combineBytes(buffer,bpp):#将单Byte列表合并为扫描行列表(仅用于由1bpp拓展的字体) 35 | cmb = [] 36 | for i in range(0, len(buffer)-bpp, 2*bpp):#通过实际的1bpp和2bpp处理的规律总结出来的递推公式 37 | I = 0 38 | for j in range(2*bpp): 39 | I = I | (buffer[i+(2*bpp-1 - j)] << 8*j) 40 | cmb.append(I) 41 | return cmb 42 | 43 | def divedeCmb(cmb,bpp):#将经由combineBytes处理后的列表还原为单Byte列表 44 | buffer = [] 45 | Bn = 2*bpp 46 | Bn += 1#保证右移次数 47 | for i in range(len(cmb)): 48 | Blist = [] 49 | for j in range(Bn): 50 | if (Bn - 2 - j) < 0:#到负数就跳出,总右移次数刚好是2*bpp次 51 | break 52 | B = (cmb[i] >> 8*(Bn - 2 - j)) & 0xFF#右移8的倍数,最后一次(Bn - 2 - j)是0 53 | Blist.append(B) 54 | buffer.extend(Blist) 55 | return buffer 56 | 57 | #经过combineByte处理后的字模数据实现整体右移 58 | def Rightbuffer(rawList,bpp): 59 | RbufferList = [] 60 | for raw in rawList: 61 | r = raw >> bpp 62 | RbufferList.append(r) 63 | return RbufferList 64 | 65 | #经过combineByte处理后的字模数据实现整体下移 66 | def Downbuffer(rawList): 67 | DbufferList = [] 68 | DbufferList.extend(rawList[-1:])#把最后一行移到开头实现整体字形的下移(是0) 69 | DbufferList.extend(rawList[:-1])#切片不包括右界 70 | return DbufferList 71 | 72 | #经过combineByte处理后的字模数据实现整体左移 73 | def Leftbuffer(rawList,bpp): 74 | LbufferList = [] 75 | for raw in rawList: 76 | r = raw << bpp 77 | LbufferList.append(r) 78 | return LbufferList 79 | 80 | #经过combineByte处理后的字模数据实现整体上移 81 | def Upbuffer(rawList): 82 | #print(rawList) 83 | UbufferList =[] 84 | UbufferList.extend(rawList[1:])#把第一行移到尾部实现整体自行的上移 85 | UbufferList.extend(rawList[:1])#extend只能对可迭代序列(列表)进行操作 86 | return UbufferList 87 | 88 | def ByteDivToInt(Byte,keep,divnum):#一次仅对一个输入的Byte进行操作 89 | #把1Byte拆成int(8/divnum)个【能拆成的个数只会是1,2,4】并只保留keep个2进制位的int 90 | bits = [] 91 | if divnum > 4:#此时int(8/divnum)==1 92 | return Byte#相当于没拆 93 | if divnum == 1:#把Byte分成8个int,每个int是1位bit 94 | for i in range(8): 95 | bit = (Byte >> (7-i)) & keep 96 | bits.append(bit) 97 | elif divnum in [2, 4]: 98 | flag = 0 99 | if divnum == 2:#把Byte分成4个int,每个int是2位bit 100 | mask = 0x3#保留的二进制位→2bpp就是1Byte拆成4份,各用0x3保留2个位 101 | else:# 把Byte分成2个int,每个int是4位bit 102 | mask = 0xF#保留的二进制位→4bpp就是1Byte拆成2份,各用0xF保留4个位 103 | while Byte: 104 | bits.insert(0, Byte & mask)#必须用insert保证顺序(上下保持一致) 105 | Byte = Byte >> divnum 106 | flag += 1 107 | if 8 - flag*divnum:#没有用到前面的int((8 - flag*bpp)/bpp)个2进制位,补0 108 | #print(int((8 - flag*bpp)/bpp)) 109 | for i in range(int((8 - flag*divnum)/divnum)): 110 | bits.insert(0, 0) 111 | 112 | else: 113 | raise TypeError("无法将Byte平均拆成{}个。".format(divnum)) 114 | return bits 115 | def combinebpp(pixel,bpp):#输入被ByteDivToInt拆开的Byte列表(也可是已经按bpp分开的bits列表),将其按照bpp重组为新的Byte构成列表 116 | Bytelist = [] 117 | flag = 0 118 | B = 0 119 | divnum = int(8/bpp) 120 | if bpp == 1:#其实实现逻辑和下方的bpp==n相同,只不过使用的程序表达方法不同 121 | for i in range(len(pixel)): 122 | if flag == 7: 123 | B = B | pixel[i] 124 | Bytelist.append(B) 125 | B = 0 126 | flag = 0 127 | else: 128 | pixel[i] = pixel[i] << (7 - flag) 129 | B = B | pixel[i] 130 | flag += 1 131 | else: 132 | for i in range(0, len(pixel)-(divnum-1), divnum): 133 | B = 0 134 | for j in range(divnum): 135 | B = B | pixel[i+j] << ((divnum-1)-j)*bpp 136 | Bytelist.append(B) 137 | return Bytelist 138 | 139 | def debpp(buffer,width,bpp = 8, dbpp = 4,method = 'gamma'):#将高bpp的字模降级为低bpp的字模(一般用于8bpp字体) 140 | Bytes = [] 141 | pixels = [] 142 | def linear_scale(value): 143 | return int(value/(2**bpp-1)*(2**dbpp-1)) 144 | import math 145 | def log_scale(value): 146 | return int((2**dbpp-1) * (math.log(value + 1) / math.log((2**bpp)))) 147 | def gamma_correct(value,gamma=2.2): 148 | normalized = value / (2**bpp-1) 149 | corrected = normalized ** (1.0 / gamma) 150 | return int(corrected * (2**dbpp-1)) 151 | if bpp == 8 and bpp % 2 == 0: 152 | divnum = int(bpp/dbpp)#Byte分成divnum个dbpp的Byte 153 | #print(divnum) 154 | for B in buffer: 155 | if method == 'gamma': 156 | value = gamma_correct(B) 157 | elif method == 'linear': 158 | value = linear_scale(B) 159 | elif method == "log": 160 | value = log_scale(B) 161 | else: 162 | value = gamma_correct(B) 163 | 164 | pixel = value & 0xFF 165 | Bytes.append(pixel) 166 | if (width%int(8/divnum))%2:#是奇数,每行都要填充 167 | #print(width,width%int(8/divnum)) 168 | NBytes = [] 169 | for i in range(0,len(Bytes),width): 170 | NBytes.extend(Bytes[i:i+width]) 171 | NBytes.append(0)#填充 172 | Bytes = NBytes 173 | for i in range(0,len(Bytes),divnum):#按Byte组合 174 | NB = 0 175 | for j in range(divnum): 176 | NB = NB | (Bytes[i+(divnum-1-j)] << dbpp*j) & 0xFF 177 | pixels.append(NB) 178 | else: 179 | print("还未有定义8bpp字体之外的降级方法。") 180 | pixels = [] 181 | return pixels 182 | def trans2bpp(buffer,keep = 1):#仅用于将1bpp字体拓展为2bpp字体 183 | converted_data = []# 创建一个新的列表,用于存储转换后的 2bpp 索引数据 184 | # 遍历原始列表中的每个数值 185 | for value in buffer: 186 | # 提取每个像素的索引 187 | pixel = ByteDivToInt(value, keep,1)#keep == 1是无阴影,keep == 3就是带右阴影 188 | 189 | for i in range(8): 190 | if pixel[i] == 3:#字库字模没有用到3索引对应的颜色 191 | pixel[i] = 1 192 | 193 | ByteList = combinebpp(pixel,2) 194 | 195 | # 将像素索引添加到新的列表中 196 | converted_data.extend(ByteList) 197 | #print(converted_data) 198 | return converted_data 199 | 200 | def changecolor(rawList,bpp=2):#输入combineBytes生成的点阵扫描行列表,输出变色后的Bytes列表 201 | buffer = divedeCmb(rawList,bpp)#还原为Byte列表 202 | pixels = [] 203 | mask = 2**bpp-1#保留的二进制位→2bpp就是用0x3保留2个位 204 | for B in buffer:#按bpp分离bit 205 | pixels.extend(ByteDivToInt(B,bpp,bpp)) 206 | for i in range(len(pixels)):#变色 207 | if pixels[i] != 0: 208 | #print(pixels[i]) 209 | pixels[i] = (pixels[i] + 1) & mask #不为零的像素索引+1,&mask实现限定数值范围在bpp位内,是索引顺序循环,由此实现变色 210 | #还原为Bytes列表 211 | Newbuffer = combinebpp(pixels,2) 212 | return Newbuffer#输出的是Bytes列表 213 | 214 | def combShadow2bpp(sbuffer1,sbuffer2):#输入字模Bytes列表制造组合阴影,例如右下阴影:buffer1右阴影,sbuffer2下阴影 215 | pixels1 = [] 216 | pixels2 = [] 217 | resultpixels = [] 218 | for i in range(len(sbuffer1)): 219 | pixels1.extend(ByteDivToInt(sbuffer1[i],2,2)) 220 | pixels2.extend(ByteDivToInt(sbuffer2[i],2,2)) 221 | for i in range(len(pixels1)): 222 | trans = pixels1[i] | pixels2[i] 223 | resultpixels.append(trans) 224 | Newbuffer = combinebpp(resultpixels,2) 225 | return Newbuffer 226 | 227 | def fillShadow2bpp(buffer,shadowbuffer):#输入字模的Byte_buffer列表和阴影对应的shawdownbuffer_Byte列表,两列表的长度应一致 228 | pixels = [] 229 | shadowpixels = [] 230 | resultpixels = [] 231 | for i in range(len(buffer)): 232 | pixels.extend(ByteDivToInt(buffer[i],2,2)) 233 | shadowpixels.extend(ByteDivToInt(shadowbuffer[i],2,2)) 234 | for i in range(len(pixels)): 235 | if pixels[i] and shadowpixels[i]: 236 | trans = (pixels[i] ^ shadowpixels[i]) >> 1#异或后左移只保留1 237 | else: 238 | trans = pixels[i] ^ shadowpixels[i]#异或只保留不为0的部分 239 | resultpixels.append(trans) 240 | Newbuffer = combinebpp(resultpixels,2) 241 | return Newbuffer 242 | def full_Q(glyph, Awidth, bpp , baseline = None): 243 | buffer, width, height, top, left = glyph.buffer, glyph.width, glyph.rows, glyph.top, glyph.left 244 | #输入宽度小于等于width的GlyphEntry字模对象,将其buffer填充到长宽为Awidth的方形字模Bytes列表 245 | # 规定Awidth>=原始width和height,由此填充(用于一般的8bpp字体) 246 | Width = Awidth 247 | #print(glyph.width, glyph.rows) 248 | if baseline: 249 | baseline = baseline 250 | else: 251 | baseline = Awidth - int(Awidth * 0.2) 252 | if bpp < 8:#说明降过bpp 253 | flag = 0 254 | if (width % (8 / bpp))%2:#宽度不是偶数就需要补齐,输入的buffer是经过debpp已经补齐过的 255 | width += int(width % (8 / bpp)) 256 | ColoBytesNum = int(width /(8 / bpp))#构成一个扫描行的Byte数 257 | Width = int(Awidth/(8 / bpp)) 258 | else: 259 | flag = 1 260 | ColoBytesNum = width 261 | addw = Width - ColoBytesNum#每行需填充的Byte数 262 | AddRow = [0 for i in range(Width)] 263 | Newbuffer = [] 264 | #增宽 265 | for i in range(0,height):#每行填充addw个值为0的Byte实现增宽 266 | Newbuffer.extend(buffer[ColoBytesNum*i:ColoBytesNum*i+ColoBytesNum]) 267 | Newbuffer.extend([0 for j in range(addw)]) 268 | #按left调整字体位置 269 | SNbuffer = [] 270 | for i in range(Awidth):#按给定宽度Awidth将Newbuffer分成每个长度为Awidth的列表 271 | # Newbuffer的长度必定整除Awidth 272 | SNbuffer.append(Newbuffer[i*Awidth:i*Awidth+Awidth]) 273 | if flag:#未降bpp直接按列移动 274 | if left > 0: 275 | for i in range(len(SNbuffer)): 276 | SNbuffer[i] = SNbuffer[i][-left:] + SNbuffer[i][:-left] 277 | else: 278 | #拆后移动再复原 279 | def divcolo(colo):#按行处理 280 | bits = [] 281 | for B in colo: 282 | bits.extend(ByteDivToInt(B,bpp,bpp)) 283 | return bits 284 | if left > 0: 285 | for i in range(len(SNbuffer)): 286 | bits = divcolo(SNbuffer[i]) 287 | bits = bits[-left:] + bits[:-left] 288 | SNbuffer[i] = combinebpp(bits, bpp) 289 | Newbuffer = [] 290 | for colo in SNbuffer: 291 | Newbuffer.extend(colo) 292 | #print(len(Newbuffer)/12) 293 | #按top增高 294 | #print(top) 295 | fnum = baseline - top 296 | lnum = Awidth - (fnum + height) 297 | #print(fnum,lnum) 298 | #print(fnum + lnum + height) 299 | if fnum + lnum + height > Awidth: 300 | raise ValueError(f"当前处理字符的top为:{top},需要{fnum + lnum + height}的绘制框大小,但实际指定字体大小为{Awidth}。" 301 | f"\n给定baseline与指定预生成的字体大小不符合绘制要求,请重新规定baseline操作。") 302 | #print(fnum, lnum) 303 | transbuffer = [] 304 | for i in range(fnum): 305 | transbuffer.extend(AddRow) 306 | for i in range(lnum): 307 | Newbuffer.extend(AddRow) 308 | glyph.buffer = transbuffer + Newbuffer 309 | #print(len(glyph.buffer)) 310 | return glyph# 返回处理完毕的字模对象 311 | 312 | def reshape16(buffer,width,height,bpp,blod = False): 313 | #输入宽度小于等于16的没有填充的字模Bytes列表,规定>=原始width和height,由此填充(仅用于1bpp拓展的字体) 314 | if (width * width)*bpp/8 > len(buffer): 315 | raise KeyError(f"指定的width:{width}与字模大小不匹配,width * height应该小于{len(buffer)}") 316 | transbuffer = combineBytes(buffer,bpp) 317 | addh= height-len(transbuffer) 318 | pixelnum = 16 319 | if addh >= 0: 320 | for i in range(addh): 321 | transbuffer.append(0)#填充扫描行加在尾部 322 | else: 323 | for i in range(-addh): 324 | transbuffer.pop(-1)#删除行 325 | if blod:#有加粗就让字体顶格 326 | ntbuffer = transbuffer[1:] 327 | ntbuffer.extend(transbuffer[:1]) 328 | transbuffer = ntbuffer 329 | for i in range(len(transbuffer)): 330 | transbuffer[i] = transbuffer[i] >> (pixelnum - width)*bpp#生成的字体默认宽度永远都是16 331 | transbuffer = divedeCmb(transbuffer,bpp)#复原为Bytes列表 332 | bitsbuffer = [] 333 | for i in range(len(transbuffer)):#按bpp拆成对应的bits列表 334 | bitsbuffer.extend(ByteDivToInt(transbuffer[i],keep=bpp,divnum=bpp)) 335 | delnum = pixelnum - width 336 | if pixelnum - delnum < 1: 337 | raise ValueError("预裁剪部分超出可裁剪范围!") 338 | 339 | #按bit裁剪 340 | bits = [] 341 | if delnum:#裁剪掉多余的bit 342 | for i in range(0,len(bitsbuffer),pixelnum): 343 | for j in range(delnum): 344 | bitsbuffer[i+j] = "NULL" 345 | for b in bitsbuffer: 346 | if b != "NULL": 347 | bits.append(b) 348 | #print(len(bits)) 349 | if (len(bits)*bpp) % 8: 350 | for i in range(8 - int(((len(bits)*bpp) % 8)/bpp)): 351 | bits.append(0)#向上取整 352 | #print(len(bits)) 353 | Newbuffer = combinebpp(bits,bpp) 354 | return Newbuffer 355 | 356 | def blodShadow(buffer,bpp):#输入字模Byte数据,输出对应字模的加边后的Byte数据。默认字模是最靠左的 357 | raw = combineBytes(buffer,bpp) 358 | rraw = Rightbuffer(raw,bpp) 359 | rrraw = Rightbuffer(rraw,bpp) 360 | draw = Downbuffer(raw) 361 | ddraw = Downbuffer(draw) 362 | rdraw = Downbuffer(rraw) 363 | rdlraw = Leftbuffer(rdraw,bpp) 364 | rdrraw = Rightbuffer(rdraw,bpp) 365 | rddraw = Downbuffer(rdraw) 366 | rrdraw = Downbuffer(rrraw) 367 | rrddraw = Downbuffer(rrdraw) 368 | rdbuffer = divedeCmb(rdraw,bpp) 369 | cbuffer = changecolor(raw,bpp)#变色操作只能在移动后才能使用 370 | crbuffer = changecolor(rraw,bpp) 371 | cddbuffer = changecolor(ddraw,bpp) 372 | crddbuffer = changecolor(rddraw,bpp) 373 | crrbuffer = changecolor(rrraw,bpp) 374 | crrddbuffer = changecolor(rrddraw,bpp) 375 | crdrbuffer = changecolor(rdrraw,bpp) 376 | crdlbuffer = changecolor(rdlraw,bpp) 377 | 378 | shalist = [cbuffer,crbuffer,cddbuffer,crddbuffer,crrbuffer,crrddbuffer,crdrbuffer,crdlbuffer] 379 | blodbuffer = cbuffer 380 | if bpp == 2: 381 | for i in range(len(shalist)): 382 | blodbuffer = combShadow2bpp(blodbuffer,shalist[i])#生成全方位阴影 383 | blodbuffer = fillShadow2bpp(rdbuffer,blodbuffer)#将全方位阴影应用于右下移过的字模实现加边 384 | return blodbuffer 385 | 386 | def rightdownShadow(buffer,bpp): 387 | raw = combineBytes(buffer,bpp) 388 | rraw = Rightbuffer(raw,bpp) 389 | draw = Downbuffer(raw) 390 | rdraw = Downbuffer(rraw) 391 | crbuffer = changecolor(rraw,bpp)#变色操作只能在移动后才能使用 392 | cdbuffer = changecolor(draw,bpp) 393 | crdbuffer = changecolor(rdraw,bpp) 394 | shalist = [crbuffer,cdbuffer,crdbuffer] 395 | rdbuffer = crbuffer 396 | if bpp == 2: 397 | for i in range(len(shalist)): 398 | rdbuffer = combShadow2bpp(rdbuffer,shalist[i]) 399 | rdbuffer = fillShadow2bpp(buffer,rdbuffer) 400 | return rdbuffer 401 | 402 | 403 | 404 | if __name__ == "__main__":#This is just for code testing 405 | def bpp1To2test():#1bpp拓展到2bpp的SIMSUN2字体一条龙测试 406 | char = "白" 407 | FONT = 'SIMSUN2.TTC' 408 | fontsize = 10 409 | bitmapbuffer = CharBitmapCreator(char,FONT,fontsize = fontsize).buffer 410 | data = trans2bpp(bitmapbuffer,1) 411 | reshapdata = reshape16(data,width=fontsize+1,height=fontsize+3,bpp=2) 412 | mainbuffer = combineBytes(data,2) 413 | rraw = Rightbuffer(mainbuffer,2) 414 | draw = Downbuffer(mainbuffer) 415 | rdraw = Downbuffer(rraw) 416 | rdlraw = Leftbuffer(rdraw,2) 417 | rdluraw = Upbuffer(rdlraw) 418 | rbuffer = divedeCmb(rraw,2) 419 | dbuffer = divedeCmb(draw,2) 420 | rdbuffer = divedeCmb(rdraw,2) 421 | rdlbuffer = divedeCmb(rdlraw,2) 422 | rdlubuffer = divedeCmb(rdluraw,2) 423 | rcolorbuffer = changecolor(rraw,2)#变色操作只能在移动后才能使用 424 | dcolorbuffer = changecolor(draw,2) 425 | rdclorbuffer = changecolor(rdraw,2) 426 | rdshabuffer = combShadow2bpp(rcolorbuffer,dcolorbuffer) 427 | rdshabuffer = combShadow2bpp(rdshabuffer,rdclorbuffer) 428 | rd_buffer = rightdownShadow(data,2) 429 | bloddata = trans2bpp(CharBitmapCreator(char,FONT,fontsize = fontsize,blod=True).buffer,1) 430 | blod_buffer = blodShadow(bloddata,2) 431 | 432 | reshapebuffer = reshape16(blod_buffer,width=fontsize+1,height=fontsize+3,bpp=2,blod=True) 433 | buffer = fillShadow2bpp(data,rdshabuffer) 434 | datas = [data,rbuffer,dbuffer,rdlbuffer,rdlubuffer, 435 | rcolorbuffer,dcolorbuffer,rdshabuffer,buffer,rd_buffer,blod_buffer] 436 | with open('testfont.bin','wb')as f: 437 | for data in datas: 438 | for i in data: 439 | f.write(struct.pack('B',i)) 440 | reshapelist = [reshapdata,reshapebuffer] 441 | with open("testfont",'wb') as f: 442 | for d in reshapelist: 443 | for i in d: 444 | f.write(struct.pack('B',i)) 445 | def bpp8To4test_cit():#测试拆bpp的方法ByteDivToInt和combinebpp 446 | char = "塔" 447 | FONT = 'FZKT_GBK.ttf' 448 | fontsize = 24 449 | font = CharBitmapCreator(char,FONT,fontsize = fontsize) 450 | bitmapbuffer = font.buffer 451 | print(font.width,font.rows) 452 | sourcebuffer = debpp(bitmapbuffer,width=font.width, bpp = 8, dbpp = 4,method = 'gamma') 453 | pxi = [] 454 | for B in sourcebuffer: 455 | pxi.extend(ByteDivToInt(B,4,4)) 456 | font.buffer = combinebpp(pxi,4) 457 | with open('testfont_8to4bpp_cit','wb')as f: 458 | for data in font.buffer: 459 | f.write(struct.pack('B',data)) 460 | def bpp8To4test():#8bpp到4bpp的字体降级测试 461 | char = "岸" 462 | FONT = 'FZKT_GBK.ttf' 463 | fontsize = 24 464 | baseline = 21 465 | font = CharBitmapCreator(char,FONT,fontsize = fontsize) 466 | bitmapbuffer = font.buffer 467 | print(font.width,font.rows) 468 | sourcebuffer = debpp(bitmapbuffer,width=font.width, bpp = 8, dbpp = 4,method = 'gamma') 469 | font.buffer = sourcebuffer 470 | qbuffer = full_Q(font,fontsize,4,baseline=baseline).buffer 471 | with open('testfont_8to4bpp','wb')as f: 472 | for data in sourcebuffer: 473 | f.write(struct.pack('B',data)) 474 | with open('testfont_8to4bpp_Q',"wb")as f: 475 | for data in qbuffer: 476 | f.write(struct.pack('B',data)) 477 | 478 | #bpp1To2test() 479 | #bpp8To4test() 480 | #bpp8To4test_cit() 481 | --------------------------------------------------------------------------------