├── .env
├── .gitattributes
├── .github
└── workflows
│ ├── dol-beta-lastest.yml
│ └── dol-compile.yml
├── .gitignore
├── LICENSE
├── README.md
├── __init__.py
├── build.py
├── data
├── img
│ └── banner.png
├── jsmodule
│ ├── acorn
│ │ ├── CHANGELOG.md
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── dist
│ │ │ ├── acorn.d.mts
│ │ │ ├── acorn.d.ts
│ │ │ └── acorn.js
│ │ └── package.json
│ └── format.js
└── json
│ ├── blacklists.json
│ └── whitelists.json
├── main.py
├── requirements.txt
├── src
├── __init__.py
├── _wip_parse_texts
│ ├── __init__.py
│ ├── consts.py
│ ├── log.py
│ └── main.py
├── ast_javascript
│ ├── __init__.py
│ └── acorn.py
├── consts.py
├── download.py
├── log.py
├── paratranz.py
├── parse_text.py
├── project_dol.py
└── tools
│ ├── __init__.py
│ ├── build_release
│ ├── __init__.py
│ ├── apk-build-tools
│ │ ├── cmdline-tools
│ │ │ └── latest.zip
│ │ └── gradle
│ │ │ └── gradle.zip
│ ├── consts.py
│ ├── download.py
│ ├── log.py
│ ├── run_build.py
│ └── run_credits.py
│ ├── process_changelog
│ ├── __init__.py
│ └── main.py
│ └── process_variables
│ ├── __init__.py
│ └── main.py
└── test.py
/.env:
--------------------------------------------------------------------------------
1 | CHINESE_VERSION=
2 | SOURCE_TYPE=common
3 | PARATRANZ_TOKEN=
4 | GITHUB_ACCESS_TOKEN=
5 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NumberSir/vrelnir_localization/526475b4a1e9b23f0708c066ee5d68f955b14b4d/.gitattributes
--------------------------------------------------------------------------------
/.github/workflows/dol-beta-lastest.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: DOL Chinese Compile Beta Lastest
5 |
6 | on:
7 | schedule:
8 | - cron: "0 18 * * *"
9 | workflow_dispatch:
10 |
11 | env:
12 | PARATRANZ_TOKEN: ${{secrets.PARATRANZ_TOKEN}}
13 | GIT_EMAIL: ${{secrets.GIT_EMAIL}}
14 | GIT_USERNAME: ${{secrets.GIT_USERNAME}}
15 | GIT_REPO: ${{secrets.GIT_REPO}}
16 | GIT_OWNER: ${{secrets.GIT_OWNER}}
17 |
18 | permissions:
19 | contents: read
20 |
21 | jobs:
22 | build:
23 | runs-on: ubuntu-latest
24 | steps:
25 | - uses: actions/checkout@v3
26 | with:
27 | ref: ${{ github.head_ref }}
28 | fetch-depth: 0
29 | token: ${{ secrets.ACCESS_TOKEN }}
30 | - name: Set up Python 3.10
31 | uses: actions/setup-python@v3
32 | with:
33 | python-version: "3.10"
34 | cache: 'pip'
35 | - name: Install dependencies
36 | run: |
37 | python -m pip install --upgrade pip
38 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
39 | - name: Clone PRIVATE repository
40 | uses: GuillaumeFalourd/clone-github-repo-action@v2
41 | with:
42 | owner: '${{env.GIT_OWNER}}'
43 | repository: '${{env.GIT_REPO}}'
44 | access-token: ${{ secrets.ACCESS_TOKEN }}
45 | - name: Run Python Script
46 | run: |
47 | python -m build -D beta
48 | - name: Commit files
49 | run: |
50 | git config --global user.email "${{env.GIT_EMAIL}}"
51 | git config --global user.name "${{env.GIT_USERNAME}}"
52 | cd dol-chinese
53 | git add .
54 | git commit -m "测试版更新"
55 | - name: Push changes
56 | uses: ad-m/github-push-action@master
57 | with:
58 | github_token: ${{ secrets.ACCESS_TOKEN }}
59 | repository: '${{env.GIT_OWNER}}/${{env.GIT_REPO}}'
60 | directory: ${{env.GIT_REPO}}
61 | force_with_lease: true
62 |
--------------------------------------------------------------------------------
/.github/workflows/dol-compile.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: DOL Chinese Compile
5 |
6 | on:
7 | schedule:
8 | - cron: "0 18 * * 0"
9 | workflow_dispatch:
10 |
11 | env:
12 | PARATRANZ_TOKEN: ${{secrets.PARATRANZ_TOKEN}}
13 | GIT_EMAIL: ${{secrets.GIT_EMAIL}}
14 | GIT_USERNAME: ${{secrets.GIT_USERNAME}}
15 | GIT_REPO: ${{secrets.GIT_REPO}}
16 | GIT_OWNER: ${{secrets.GIT_OWNER}}
17 |
18 | permissions:
19 | contents: read
20 |
21 | jobs:
22 | build:
23 | runs-on: ubuntu-latest
24 | steps:
25 | - uses: actions/checkout@v3
26 | with:
27 | ref: ${{ github.head_ref }}
28 | fetch-depth: 0
29 | token: ${{ secrets.ACCESS_TOKEN }}
30 | - name: Set up Python 3.10
31 | uses: actions/setup-python@v3
32 | with:
33 | python-version: "3.10"
34 | cache: 'pip'
35 | - name: Install dependencies
36 | run: |
37 | python -m pip install --upgrade pip
38 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
39 | - name: Clone PRIVATE repository
40 | uses: GuillaumeFalourd/clone-github-repo-action@v2
41 | with:
42 | owner: '${{env.GIT_OWNER}}'
43 | repository: '${{env.GIT_REPO}}'
44 | access-token: ${{ secrets.ACCESS_TOKEN }}
45 | - name: Run Python Script
46 | run: |
47 | python -m build -D
48 | - name: Commit files
49 | run: |
50 | git config --global user.email "${{env.GIT_EMAIL}}"
51 | git config --global user.name "${{env.GIT_USERNAME}}"
52 | cd dol-chinese
53 | git add .
54 | git commit -m "稳定版更新"
55 | - name: Push changes
56 | uses: ad-m/github-push-action@master
57 | with:
58 | github_token: ${{ secrets.ACCESS_TOKEN }}
59 | repository: '${{env.GIT_OWNER}}/${{env.GIT_REPO}}'
60 | directory: ${{env.GIT_REPO}}
61 | force_with_lease: true
62 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.xml
3 | *.iml
4 | *.zip
5 | *.7z
6 | *.xmind
7 | CREDITS-*.md
8 |
9 | .env
10 | .vscode
11 | .idea
12 |
13 | __pycache__
14 | dol
15 | degrees-of-lewdity-master
16 | degrees-of-lewdity-world-expansion-master
17 | dolandriod
18 | dol-chinese
19 | setto
20 |
21 | paratranz
22 | temp
23 | raw_dicts
24 |
25 | commits.json
26 | i18n.json
27 | 0.4.3.0.json
28 | .env
29 | src/tools/changelog_process/0.4.4.0.json
30 | src/tools/changelog_process/0.4.4.0.txt
31 | src/tools/variables_process/vars/_all_variables.json
32 | src/tools/variables_process/vars/_variables.json
33 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Number_Sir
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Vrelnir Localization([Degrees of Lewdity](https://gitgud.io/Vrelnir/degrees-of-lewdity))
2 |
3 | 
4 |
5 | ## 新项目(未完成)
6 | [Sugarcube2-Localization](https://github.com/NumberSir/Sugarcube2-Localization)
7 |
8 | ## 简介
9 | [黄油翻译小工具 0v0](https://github.com/NumberSir/vrelnir_localization)
10 | 1. 获取原仓库最新内容下载到本地
11 | 2. 生成对应版本的字典,放在 `raw_dict` 文件夹里
12 | 3. 从 `paratranz` 下载最新汉化包 (可能要在 `src/consts.py` 里填你的 `token`, 在个人设置里找)
13 | 4. 用最新的汉化包替换自动提取出的汉化包,保存失效值
14 | 5. 覆写游戏源文件的汉化 (检查简单的翻译错误如全角逗号: `",`, 尖括号不对齐: `<< >`)
15 | 6. 修改版本号 `chs-x.y.z`
16 | 7. 生成供 `i18n` mod 加载的汉化字典包 (默认在 `data/json/i18n.json`)
17 | 8. 编译为 `html` 并用默认浏览器运行 (默认在 `degrees-of-lewdity-master`)
18 |
19 | ## 食用方法
20 | 1. 需要 Python 3.10+
21 | 2. 在根目录通过 `cmd` 或 `shell` 使用 `pip install -r requirements.txt` 安装依赖库
22 | 3. 在 `.env` 里填你的 `token` (`PARATRANZ_TOKEN`), 在 `https://paratranz.cn/users/my` 的设置里找
23 | 4. 在 `.env` 里修改版本号 (`CHINESE_VERSION`)
24 | 5. 运行 `main.py` (通过 `cmd` 或 `shell` 使用 `python -m main`)
25 |
26 | ## 关于版本号
27 | 汉化版本号的基本结构是 `chs-x.y.z`,如 `chs-alpha1.7.1`
28 |
29 | 游戏版本号的基本结构是 `{游戏版本号}-chs-{汉化版本号}`,如 `0.4.1.7-chs-alpha1.7.1`
30 |
31 | 汉化版本号的修改遵循如下规则:
32 | 1. `alpha` / `beta` / `release` 分别代表:
33 | - `alpha`: 当前翻译率达到 100%, 可能有漏提取的文本,润色不充分
34 | - `beta`: 当前翻译率达到 100%, 没有漏提取的文本,润色不充分
35 | - `release`: 当前翻译率达到 100%, 没有漏提取的文本,已经充分润色
36 | 2. 如果游戏版本号发生破坏性更新:如 `0.4.1` => `0.4.2`, 或 `0.4` -> `0.5`,则汉化版本号重置,如:
37 | - `0.4.1.7-chs-alpha1.7.1` => `0.4.2.4-chs-alpha1.0.0`
38 | 3. 如果游戏版本号发生小修小补更新:如 `0.4.1.6` => `0.4.1.7`, 或 `0.4.2.0` => `0.4.2.5`,则汉化版本号第一位加一,如:
39 | - `0.4.2.4-chs-alpha1.0.0` => `0.4.2.5-chs-alpha2.0.0`
40 | 4. 每周五晚九点定期更新,则汉化版本号第二位加一,如:
41 | - `0.4.1.7-chs-alpha1.6.0` => `0.4.1.7-chs-alpha1.7.0`
42 | 5. 出现了导致游戏无法继续进行的恶性问题而临时更新,则汉化版本号末位加一,如:
43 | - `0.4.1.7-chs-alpha1.7.0` => `0.4.1.7-chs-alpha1.7.1`
44 | 6. 如果打包自用 / 群内用的临时前瞻版,则在汉化版本号后加 `-pre`,如:
45 | - `0.4.1.7-chs-alpha1.7.1` => `0.4.1.7-chs-alpha1.8.0-pre`
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NumberSir/vrelnir_localization/526475b4a1e9b23f0708c066ee5d68f955b14b4d/__init__.py
--------------------------------------------------------------------------------
/build.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from pathlib import Path
3 |
4 | from src import (
5 | logger,
6 | Paratranz,
7 | ProjectDOL,
8 | PARATRANZ_TOKEN,
9 | CHINESE_VERSION,
10 | SOURCE_TYPE
11 | )
12 | from src.tools.process_variables import VariablesProcess as VP
13 |
14 | async def process_common(dol_common: ProjectDOL, pt: Paratranz, chs_version: str):
15 | """
16 | 原版处理流程
17 | 1. 下载源码
18 | 2. 创建生肉词典
19 | 3. 下载汉化词典
20 | 4. 替换生肉词典
21 | 5. 替换游戏原文
22 | """
23 | """ 删库跑路 """
24 | await dol_common.drop_all_dirs()
25 |
26 | """ 下载源码 """
27 | await dol_common.download_from_gitgud()
28 |
29 | """ 预处理所有的 <> """
30 | var = VP()
31 | var.fetch_all_file_paths()
32 | var.fetch_all_set_content()
33 |
34 | """ 创建生肉词典 """
35 | await dol_common.create_dicts()
36 |
37 | """ 下载汉化词典 成品在 `raw_dicts` 文件夹里 """
38 | download_flag = await pt.download_from_paratranz() # 如果下载,需要在 consts 里填上管理员的 token, 在网站个人设置里找
39 | if not download_flag:
40 | return
41 |
42 | """ 替换生肉词典 """
43 | await dol_common.update_dicts()
44 |
45 |
46 | async def main():
47 | logger.info(f"filepath: {Path(__file__)}")
48 | dol_common = ProjectDOL(type_=SOURCE_TYPE) # 改成 “dev” 则下载最新开发版分支的内容 common原版
49 |
50 | pt_common = Paratranz(type_=SOURCE_TYPE)
51 | if not PARATRANZ_TOKEN:
52 | logger.error("未填写 PARATRANZ_TOKEN, 汉化包下载可能失败,请前往 https://paratranz.cn/users/my 的设置栏中查看自己的 token, 并在 .env 中填写\n")
53 | return
54 |
55 | await process_common(dol_common, pt_common, chs_version=CHINESE_VERSION)
56 |
57 |
58 | if __name__ == '__main__':
59 | asyncio.run(main())
60 |
--------------------------------------------------------------------------------
/data/img/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NumberSir/vrelnir_localization/526475b4a1e9b23f0708c066ee5d68f955b14b4d/data/img/banner.png
--------------------------------------------------------------------------------
/data/jsmodule/acorn/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ## 8.10.0 (2023-07-05)
2 |
3 | ### New features
4 |
5 | Add a `checkPrivateFields` option that disables strict checking of private property use.
6 |
7 | ## 8.9.0 (2023-06-16)
8 |
9 | ### Bug fixes
10 |
11 | Forbid dynamic import after `new`, even when part of a member expression.
12 |
13 | ### New features
14 |
15 | Add Unicode properties for ES2023.
16 |
17 | Add support for the `v` flag to regular expressions.
18 |
19 | ## 8.8.2 (2023-01-23)
20 |
21 | ### Bug fixes
22 |
23 | Fix a bug that caused `allowHashBang` to be set to false when not provided, even with `ecmaVersion >= 14`.
24 |
25 | Fix an exception when passing no option object to `parse` or `new Parser`.
26 |
27 | Fix incorrect parse error on `if (0) let\n[astral identifier char]`.
28 |
29 | ## 8.8.1 (2022-10-24)
30 |
31 | ### Bug fixes
32 |
33 | Make type for `Comment` compatible with estree types.
34 |
35 | ## 8.8.0 (2022-07-21)
36 |
37 | ### Bug fixes
38 |
39 | Allow parentheses around spread args in destructuring object assignment.
40 |
41 | Fix an issue where the tree contained `directive` properties in when parsing with a language version that doesn't support them.
42 |
43 | ### New features
44 |
45 | Support hashbang comments by default in ECMAScript 2023 and later.
46 |
47 | ## 8.7.1 (2021-04-26)
48 |
49 | ### Bug fixes
50 |
51 | Stop handling `"use strict"` directives in ECMAScript versions before 5.
52 |
53 | Fix an issue where duplicate quoted export names in `export *` syntax were incorrectly checked.
54 |
55 | Add missing type for `tokTypes`.
56 |
57 | ## 8.7.0 (2021-12-27)
58 |
59 | ### New features
60 |
61 | Support quoted export names.
62 |
63 | Upgrade to Unicode 14.
64 |
65 | Add support for Unicode 13 properties in regular expressions.
66 |
67 | ### Bug fixes
68 |
69 | Use a loop to find line breaks, because the existing regexp search would overrun the end of the searched range and waste a lot of time in minified code.
70 |
71 | ## 8.6.0 (2021-11-18)
72 |
73 | ### Bug fixes
74 |
75 | Fix a bug where an object literal with multiple `__proto__` properties would incorrectly be accepted if a later property value held an assigment.
76 |
77 | ### New features
78 |
79 | Support class private fields with the `in` operator.
80 |
81 | ## 8.5.0 (2021-09-06)
82 |
83 | ### Bug fixes
84 |
85 | Improve context-dependent tokenization in a number of corner cases.
86 |
87 | Fix location tracking after a 0x2028 or 0x2029 character in a string literal (which before did not increase the line number).
88 |
89 | Fix an issue where arrow function bodies in for loop context would inappropriately consume `in` operators.
90 |
91 | Fix wrong end locations stored on SequenceExpression nodes.
92 |
93 | Implement restriction that `for`/`of` loop LHS can't start with `let`.
94 |
95 | ### New features
96 |
97 | Add support for ES2022 class static blocks.
98 |
99 | Allow multiple input files to be passed to the CLI tool.
100 |
101 | ## 8.4.1 (2021-06-24)
102 |
103 | ### Bug fixes
104 |
105 | Fix a bug where `allowAwaitOutsideFunction` would allow `await` in class field initializers, and setting `ecmaVersion` to 13 or higher would allow top-level await in non-module sources.
106 |
107 | ## 8.4.0 (2021-06-11)
108 |
109 | ### New features
110 |
111 | A new option, `allowSuperOutsideMethod`, can be used to suppress the error when `super` is used in the wrong context.
112 |
113 | ## 8.3.0 (2021-05-31)
114 |
115 | ### New features
116 |
117 | Default `allowAwaitOutsideFunction` to true for ECMAScript 2022 an higher.
118 |
119 | Add support for the `d` ([indices](https://github.com/tc39/proposal-regexp-match-indices)) regexp flag.
120 |
121 | ## 8.2.4 (2021-05-04)
122 |
123 | ### Bug fixes
124 |
125 | Fix spec conformity in corner case 'for await (async of ...)'.
126 |
127 | ## 8.2.3 (2021-05-04)
128 |
129 | ### Bug fixes
130 |
131 | Fix an issue where the library couldn't parse 'for (async of ...)'.
132 |
133 | Fix a bug in UTF-16 decoding that would read characters incorrectly in some circumstances.
134 |
135 | ## 8.2.2 (2021-04-29)
136 |
137 | ### Bug fixes
138 |
139 | Fix a bug where a class field initialized to an async arrow function wouldn't allow await inside it. Same issue existed for generator arrow functions with yield.
140 |
141 | ## 8.2.1 (2021-04-24)
142 |
143 | ### Bug fixes
144 |
145 | Fix a regression introduced in 8.2.0 where static or async class methods with keyword names fail to parse.
146 |
147 | ## 8.2.0 (2021-04-24)
148 |
149 | ### New features
150 |
151 | Add support for ES2022 class fields and private methods.
152 |
153 | ## 8.1.1 (2021-04-12)
154 |
155 | ### Various
156 |
157 | Stop shipping source maps in the NPM package.
158 |
159 | ## 8.1.0 (2021-03-09)
160 |
161 | ### Bug fixes
162 |
163 | Fix a spurious error in nested destructuring arrays.
164 |
165 | ### New features
166 |
167 | Expose `allowAwaitOutsideFunction` in CLI interface.
168 |
169 | Make `allowImportExportAnywhere` also apply to `import.meta`.
170 |
171 | ## 8.0.5 (2021-01-25)
172 |
173 | ### Bug fixes
174 |
175 | Adjust package.json to work with Node 12.16.0 and 13.0-13.6.
176 |
177 | ## 8.0.4 (2020-10-05)
178 |
179 | ### Bug fixes
180 |
181 | Make `await x ** y` an error, following the spec.
182 |
183 | Fix potentially exponential regular expression.
184 |
185 | ## 8.0.3 (2020-10-02)
186 |
187 | ### Bug fixes
188 |
189 | Fix a wasteful loop during `Parser` creation when setting `ecmaVersion` to `"latest"`.
190 |
191 | ## 8.0.2 (2020-09-30)
192 |
193 | ### Bug fixes
194 |
195 | Make the TypeScript types reflect the current allowed values for `ecmaVersion`.
196 |
197 | Fix another regexp/division tokenizer issue.
198 |
199 | ## 8.0.1 (2020-08-12)
200 |
201 | ### Bug fixes
202 |
203 | Provide the correct value in the `version` export.
204 |
205 | ## 8.0.0 (2020-08-12)
206 |
207 | ### Bug fixes
208 |
209 | Disallow expressions like `(a = b) = c`.
210 |
211 | Make non-octal escape sequences a syntax error in strict mode.
212 |
213 | ### New features
214 |
215 | The package can now be loaded directly as an ECMAScript module in node 13+.
216 |
217 | Update to the set of Unicode properties from ES2021.
218 |
219 | ### Breaking changes
220 |
221 | The `ecmaVersion` option is now required. For the moment, omitting it will still work with a warning, but that will change in a future release.
222 |
223 | Some changes to method signatures that may be used by plugins.
224 |
225 | ## 7.4.0 (2020-08-03)
226 |
227 | ### New features
228 |
229 | Add support for logical assignment operators.
230 |
231 | Add support for numeric separators.
232 |
233 | ## 7.3.1 (2020-06-11)
234 |
235 | ### Bug fixes
236 |
237 | Make the string in the `version` export match the actual library version.
238 |
239 | ## 7.3.0 (2020-06-11)
240 |
241 | ### Bug fixes
242 |
243 | Fix a bug that caused parsing of object patterns with a property named `set` that had a default value to fail.
244 |
245 | ### New features
246 |
247 | Add support for optional chaining (`?.`).
248 |
249 | ## 7.2.0 (2020-05-09)
250 |
251 | ### Bug fixes
252 |
253 | Fix precedence issue in parsing of async arrow functions.
254 |
255 | ### New features
256 |
257 | Add support for nullish coalescing.
258 |
259 | Add support for `import.meta`.
260 |
261 | Support `export * as ...` syntax.
262 |
263 | Upgrade to Unicode 13.
264 |
265 | ## 6.4.1 (2020-03-09)
266 |
267 | ### Bug fixes
268 |
269 | More carefully check for valid UTF16 surrogate pairs in regexp validator.
270 |
271 | ## 7.1.1 (2020-03-01)
272 |
273 | ### Bug fixes
274 |
275 | Treat `\8` and `\9` as invalid escapes in template strings.
276 |
277 | Allow unicode escapes in property names that are keywords.
278 |
279 | Don't error on an exponential operator expression as argument to `await`.
280 |
281 | More carefully check for valid UTF16 surrogate pairs in regexp validator.
282 |
283 | ## 7.1.0 (2019-09-24)
284 |
285 | ### Bug fixes
286 |
287 | Disallow trailing object literal commas when ecmaVersion is less than 5.
288 |
289 | ### New features
290 |
291 | Add a static `acorn` property to the `Parser` class that contains the entire module interface, to allow plugins to access the instance of the library that they are acting on.
292 |
293 | ## 7.0.0 (2019-08-13)
294 |
295 | ### Breaking changes
296 |
297 | Changes the node format for dynamic imports to use the `ImportExpression` node type, as defined in [ESTree](https://github.com/estree/estree/blob/master/es2020.md#importexpression).
298 |
299 | Makes 10 (ES2019) the default value for the `ecmaVersion` option.
300 |
301 | ## 6.3.0 (2019-08-12)
302 |
303 | ### New features
304 |
305 | `sourceType: "module"` can now be used even when `ecmaVersion` is less than 6, to parse module-style code that otherwise conforms to an older standard.
306 |
307 | ## 6.2.1 (2019-07-21)
308 |
309 | ### Bug fixes
310 |
311 | Fix bug causing Acorn to treat some characters as identifier characters that shouldn't be treated as such.
312 |
313 | Fix issue where setting the `allowReserved` option to `"never"` allowed reserved words in some circumstances.
314 |
315 | ## 6.2.0 (2019-07-04)
316 |
317 | ### Bug fixes
318 |
319 | Improve valid assignment checking in `for`/`in` and `for`/`of` loops.
320 |
321 | Disallow binding `let` in patterns.
322 |
323 | ### New features
324 |
325 | Support bigint syntax with `ecmaVersion` >= 11.
326 |
327 | Support dynamic `import` syntax with `ecmaVersion` >= 11.
328 |
329 | Upgrade to Unicode version 12.
330 |
331 | ## 6.1.1 (2019-02-27)
332 |
333 | ### Bug fixes
334 |
335 | Fix bug that caused parsing default exports of with names to fail.
336 |
337 | ## 6.1.0 (2019-02-08)
338 |
339 | ### Bug fixes
340 |
341 | Fix scope checking when redefining a `var` as a lexical binding.
342 |
343 | ### New features
344 |
345 | Split up `parseSubscripts` to use an internal `parseSubscript` method to make it easier to extend with plugins.
346 |
347 | ## 6.0.7 (2019-02-04)
348 |
349 | ### Bug fixes
350 |
351 | Check that exported bindings are defined.
352 |
353 | Don't treat `\u180e` as a whitespace character.
354 |
355 | Check for duplicate parameter names in methods.
356 |
357 | Don't allow shorthand properties when they are generators or async methods.
358 |
359 | Forbid binding `await` in async arrow function's parameter list.
360 |
361 | ## 6.0.6 (2019-01-30)
362 |
363 | ### Bug fixes
364 |
365 | The content of class declarations and expressions is now always parsed in strict mode.
366 |
367 | Don't allow `let` or `const` to bind the variable name `let`.
368 |
369 | Treat class declarations as lexical.
370 |
371 | Don't allow a generator function declaration as the sole body of an `if` or `else`.
372 |
373 | Ignore `"use strict"` when after an empty statement.
374 |
375 | Allow string line continuations with special line terminator characters.
376 |
377 | Treat `for` bodies as part of the `for` scope when checking for conflicting bindings.
378 |
379 | Fix bug with parsing `yield` in a `for` loop initializer.
380 |
381 | Implement special cases around scope checking for functions.
382 |
383 | ## 6.0.5 (2019-01-02)
384 |
385 | ### Bug fixes
386 |
387 | Fix TypeScript type for `Parser.extend` and add `allowAwaitOutsideFunction` to options type.
388 |
389 | Don't treat `let` as a keyword when the next token is `{` on the next line.
390 |
391 | Fix bug that broke checking for parentheses around an object pattern in a destructuring assignment when `preserveParens` was on.
392 |
393 | ## 6.0.4 (2018-11-05)
394 |
395 | ### Bug fixes
396 |
397 | Further improvements to tokenizing regular expressions in corner cases.
398 |
399 | ## 6.0.3 (2018-11-04)
400 |
401 | ### Bug fixes
402 |
403 | Fix bug in tokenizing an expression-less return followed by a function followed by a regular expression.
404 |
405 | Remove stray symlink in the package tarball.
406 |
407 | ## 6.0.2 (2018-09-26)
408 |
409 | ### Bug fixes
410 |
411 | Fix bug where default expressions could fail to parse inside an object destructuring assignment expression.
412 |
413 | ## 6.0.1 (2018-09-14)
414 |
415 | ### Bug fixes
416 |
417 | Fix wrong value in `version` export.
418 |
419 | ## 6.0.0 (2018-09-14)
420 |
421 | ### Bug fixes
422 |
423 | Better handle variable-redefinition checks for catch bindings and functions directly under if statements.
424 |
425 | Forbid `new.target` in top-level arrow functions.
426 |
427 | Fix issue with parsing a regexp after `yield` in some contexts.
428 |
429 | ### New features
430 |
431 | The package now comes with TypeScript definitions.
432 |
433 | ### Breaking changes
434 |
435 | The default value of the `ecmaVersion` option is now 9 (2018).
436 |
437 | Plugins work differently, and will have to be rewritten to work with this version.
438 |
439 | The loose parser and walker have been moved into separate packages (`acorn-loose` and `acorn-walk`).
440 |
441 | ## 5.7.3 (2018-09-10)
442 |
443 | ### Bug fixes
444 |
445 | Fix failure to tokenize regexps after expressions like `x.of`.
446 |
447 | Better error message for unterminated template literals.
448 |
449 | ## 5.7.2 (2018-08-24)
450 |
451 | ### Bug fixes
452 |
453 | Properly handle `allowAwaitOutsideFunction` in for statements.
454 |
455 | Treat function declarations at the top level of modules like let bindings.
456 |
457 | Don't allow async function declarations as the only statement under a label.
458 |
459 | ## 5.7.0 (2018-06-15)
460 |
461 | ### New features
462 |
463 | Upgraded to Unicode 11.
464 |
465 | ## 5.6.0 (2018-05-31)
466 |
467 | ### New features
468 |
469 | Allow U+2028 and U+2029 in string when ECMAVersion >= 10.
470 |
471 | Allow binding-less catch statements when ECMAVersion >= 10.
472 |
473 | Add `allowAwaitOutsideFunction` option for parsing top-level `await`.
474 |
475 | ## 5.5.3 (2018-03-08)
476 |
477 | ### Bug fixes
478 |
479 | A _second_ republish of the code in 5.5.1, this time with yarn, to hopefully get valid timestamps.
480 |
481 | ## 5.5.2 (2018-03-08)
482 |
483 | ### Bug fixes
484 |
485 | A republish of the code in 5.5.1 in an attempt to solve an issue with the file timestamps in the npm package being 0.
486 |
487 | ## 5.5.1 (2018-03-06)
488 |
489 | ### Bug fixes
490 |
491 | Fix misleading error message for octal escapes in template strings.
492 |
493 | ## 5.5.0 (2018-02-27)
494 |
495 | ### New features
496 |
497 | The identifier character categorization is now based on Unicode version 10.
498 |
499 | Acorn will now validate the content of regular expressions, including new ES9 features.
500 |
501 | ## 5.4.0 (2018-02-01)
502 |
503 | ### Bug fixes
504 |
505 | Disallow duplicate or escaped flags on regular expressions.
506 |
507 | Disallow octal escapes in strings in strict mode.
508 |
509 | ### New features
510 |
511 | Add support for async iteration.
512 |
513 | Add support for object spread and rest.
514 |
515 | ## 5.3.0 (2017-12-28)
516 |
517 | ### Bug fixes
518 |
519 | Fix parsing of floating point literals with leading zeroes in loose mode.
520 |
521 | Allow duplicate property names in object patterns.
522 |
523 | Don't allow static class methods named `prototype`.
524 |
525 | Disallow async functions directly under `if` or `else`.
526 |
527 | Parse right-hand-side of `for`/`of` as an assignment expression.
528 |
529 | Stricter parsing of `for`/`in`.
530 |
531 | Don't allow unicode escapes in contextual keywords.
532 |
533 | ### New features
534 |
535 | Parsing class members was factored into smaller methods to allow plugins to hook into it.
536 |
537 | ## 5.2.1 (2017-10-30)
538 |
539 | ### Bug fixes
540 |
541 | Fix a token context corruption bug.
542 |
543 | ## 5.2.0 (2017-10-30)
544 |
545 | ### Bug fixes
546 |
547 | Fix token context tracking for `class` and `function` in property-name position.
548 |
549 | Make sure `%*` isn't parsed as a valid operator.
550 |
551 | Allow shorthand properties `get` and `set` to be followed by default values.
552 |
553 | Disallow `super` when not in callee or object position.
554 |
555 | ### New features
556 |
557 | Support [`directive` property](https://github.com/estree/estree/compare/b3de58c9997504d6fba04b72f76e6dd1619ee4eb...1da8e603237144f44710360f8feb7a9977e905e0) on directive expression statements.
558 |
559 | ## 5.1.2 (2017-09-04)
560 |
561 | ### Bug fixes
562 |
563 | Disable parsing of legacy HTML-style comments in modules.
564 |
565 | Fix parsing of async methods whose names are keywords.
566 |
567 | ## 5.1.1 (2017-07-06)
568 |
569 | ### Bug fixes
570 |
571 | Fix problem with disambiguating regexp and division after a class.
572 |
573 | ## 5.1.0 (2017-07-05)
574 |
575 | ### Bug fixes
576 |
577 | Fix tokenizing of regexps in an object-desctructuring `for`/`of` loop and after `yield`.
578 |
579 | Parse zero-prefixed numbers with non-octal digits as decimal.
580 |
581 | Allow object/array patterns in rest parameters.
582 |
583 | Don't error when `yield` is used as a property name.
584 |
585 | Allow `async` as a shorthand object property.
586 |
587 | ### New features
588 |
589 | Implement the [template literal revision proposal](https://github.com/tc39/proposal-template-literal-revision) for ES9.
590 |
591 | ## 5.0.3 (2017-04-01)
592 |
593 | ### Bug fixes
594 |
595 | Fix spurious duplicate variable definition errors for named functions.
596 |
597 | ## 5.0.2 (2017-03-30)
598 |
599 | ### Bug fixes
600 |
601 | A binary operator after a parenthesized arrow expression is no longer incorrectly treated as an error.
602 |
603 | ## 5.0.0 (2017-03-28)
604 |
605 | ### Bug fixes
606 |
607 | Raise an error for duplicated lexical bindings.
608 |
609 | Fix spurious error when an assignement expression occurred after a spread expression.
610 |
611 | Accept regular expressions after `of` (in `for`/`of`), `yield` (in a generator), and braced arrow functions.
612 |
613 | Allow labels in front or `var` declarations, even in strict mode.
614 |
615 | ### Breaking changes
616 |
617 | Parse declarations following `export default` as declaration nodes, not expressions. This means that class and function declarations nodes can now have `null` as their `id`.
618 |
619 | ## 4.0.11 (2017-02-07)
620 |
621 | ### Bug fixes
622 |
623 | Allow all forms of member expressions to be parenthesized as lvalue.
624 |
625 | ## 4.0.10 (2017-02-07)
626 |
627 | ### Bug fixes
628 |
629 | Don't expect semicolons after default-exported functions or classes, even when they are expressions.
630 |
631 | Check for use of `'use strict'` directives in non-simple parameter functions, even when already in strict mode.
632 |
633 | ## 4.0.9 (2017-02-06)
634 |
635 | ### Bug fixes
636 |
637 | Fix incorrect error raised for parenthesized simple assignment targets, so that `(x) = 1` parses again.
638 |
639 | ## 4.0.8 (2017-02-03)
640 |
641 | ### Bug fixes
642 |
643 | Solve spurious parenthesized pattern errors by temporarily erring on the side of accepting programs that our delayed errors don't handle correctly yet.
644 |
645 | ## 4.0.7 (2017-02-02)
646 |
647 | ### Bug fixes
648 |
649 | Accept invalidly rejected code like `(x).y = 2` again.
650 |
651 | Don't raise an error when a function _inside_ strict code has a non-simple parameter list.
652 |
653 | ## 4.0.6 (2017-02-02)
654 |
655 | ### Bug fixes
656 |
657 | Fix exponential behavior (manifesting itself as a complete hang for even relatively small source files) introduced by the new 'use strict' check.
658 |
659 | ## 4.0.5 (2017-02-02)
660 |
661 | ### Bug fixes
662 |
663 | Disallow parenthesized pattern expressions.
664 |
665 | Allow keywords as export names.
666 |
667 | Don't allow the `async` keyword to be parenthesized.
668 |
669 | Properly raise an error when a keyword contains a character escape.
670 |
671 | Allow `"use strict"` to appear after other string literal expressions.
672 |
673 | Disallow labeled declarations.
674 |
675 | ## 4.0.4 (2016-12-19)
676 |
677 | ### Bug fixes
678 |
679 | Fix crash when `export` was followed by a keyword that can't be
680 | exported.
681 |
682 | ## 4.0.3 (2016-08-16)
683 |
684 | ### Bug fixes
685 |
686 | Allow regular function declarations inside single-statement `if` branches in loose mode. Forbid them entirely in strict mode.
687 |
688 | Properly parse properties named `async` in ES2017 mode.
689 |
690 | Fix bug where reserved words were broken in ES2017 mode.
691 |
692 | ## 4.0.2 (2016-08-11)
693 |
694 | ### Bug fixes
695 |
696 | Don't ignore period or 'e' characters after octal numbers.
697 |
698 | Fix broken parsing for call expressions in default parameter values of arrow functions.
699 |
700 | ## 4.0.1 (2016-08-08)
701 |
702 | ### Bug fixes
703 |
704 | Fix false positives in duplicated export name errors.
705 |
706 | ## 4.0.0 (2016-08-07)
707 |
708 | ### Breaking changes
709 |
710 | The default `ecmaVersion` option value is now 7.
711 |
712 | A number of internal method signatures changed, so plugins might need to be updated.
713 |
714 | ### Bug fixes
715 |
716 | The parser now raises errors on duplicated export names.
717 |
718 | `arguments` and `eval` can now be used in shorthand properties.
719 |
720 | Duplicate parameter names in non-simple argument lists now always produce an error.
721 |
722 | ### New features
723 |
724 | The `ecmaVersion` option now also accepts year-style version numbers
725 | (2015, etc).
726 |
727 | Support for `async`/`await` syntax when `ecmaVersion` is >= 8.
728 |
729 | Support for trailing commas in call expressions when `ecmaVersion` is >= 8.
730 |
731 | ## 3.3.0 (2016-07-25)
732 |
733 | ### Bug fixes
734 |
735 | Fix bug in tokenizing of regexp operator after a function declaration.
736 |
737 | Fix parser crash when parsing an array pattern with a hole.
738 |
739 | ### New features
740 |
741 | Implement check against complex argument lists in functions that enable strict mode in ES7.
742 |
743 | ## 3.2.0 (2016-06-07)
744 |
745 | ### Bug fixes
746 |
747 | Improve handling of lack of unicode regexp support in host
748 | environment.
749 |
750 | Properly reject shorthand properties whose name is a keyword.
751 |
752 | ### New features
753 |
754 | Visitors created with `visit.make` now have their base as _prototype_, rather than copying properties into a fresh object.
755 |
756 | ## 3.1.0 (2016-04-18)
757 |
758 | ### Bug fixes
759 |
760 | Properly tokenize the division operator directly after a function expression.
761 |
762 | Allow trailing comma in destructuring arrays.
763 |
764 | ## 3.0.4 (2016-02-25)
765 |
766 | ### Fixes
767 |
768 | Allow update expressions as left-hand-side of the ES7 exponential operator.
769 |
770 | ## 3.0.2 (2016-02-10)
771 |
772 | ### Fixes
773 |
774 | Fix bug that accidentally made `undefined` a reserved word when parsing ES7.
775 |
776 | ## 3.0.0 (2016-02-10)
777 |
778 | ### Breaking changes
779 |
780 | The default value of the `ecmaVersion` option is now 6 (used to be 5).
781 |
782 | Support for comprehension syntax (which was dropped from the draft spec) has been removed.
783 |
784 | ### Fixes
785 |
786 | `let` and `yield` are now “contextual keywords”, meaning you can mostly use them as identifiers in ES5 non-strict code.
787 |
788 | A parenthesized class or function expression after `export default` is now parsed correctly.
789 |
790 | ### New features
791 |
792 | When `ecmaVersion` is set to 7, Acorn will parse the exponentiation operator (`**`).
793 |
794 | The identifier character ranges are now based on Unicode 8.0.0.
795 |
796 | Plugins can now override the `raiseRecoverable` method to override the way non-critical errors are handled.
797 |
798 | ## 2.7.0 (2016-01-04)
799 |
800 | ### Fixes
801 |
802 | Stop allowing rest parameters in setters.
803 |
804 | Disallow `y` rexexp flag in ES5.
805 |
806 | Disallow `\00` and `\000` escapes in strict mode.
807 |
808 | Raise an error when an import name is a reserved word.
809 |
810 | ## 2.6.2 (2015-11-10)
811 |
812 | ### Fixes
813 |
814 | Don't crash when no options object is passed.
815 |
816 | ## 2.6.0 (2015-11-09)
817 |
818 | ### Fixes
819 |
820 | Add `await` as a reserved word in module sources.
821 |
822 | Disallow `yield` in a parameter default value for a generator.
823 |
824 | Forbid using a comma after a rest pattern in an array destructuring.
825 |
826 | ### New features
827 |
828 | Support parsing stdin in command-line tool.
829 |
830 | ## 2.5.0 (2015-10-27)
831 |
832 | ### Fixes
833 |
834 | Fix tokenizer support in the command-line tool.
835 |
836 | Stop allowing `new.target` outside of functions.
837 |
838 | Remove legacy `guard` and `guardedHandler` properties from try nodes.
839 |
840 | Stop allowing multiple `__proto__` properties on an object literal in strict mode.
841 |
842 | Don't allow rest parameters to be non-identifier patterns.
843 |
844 | Check for duplicate paramter names in arrow functions.
845 |
--------------------------------------------------------------------------------
/data/jsmodule/acorn/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (C) 2012-2022 by various contributors (see AUTHORS)
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/data/jsmodule/acorn/README.md:
--------------------------------------------------------------------------------
1 | # Acorn
2 |
3 | A tiny, fast JavaScript parser written in JavaScript.
4 |
5 | ## Community
6 |
7 | Acorn is open source software released under an
8 | [MIT license](https://github.com/acornjs/acorn/blob/master/acorn/LICENSE).
9 |
10 | You are welcome to
11 | [report bugs](https://github.com/acornjs/acorn/issues) or create pull
12 | requests on [github](https://github.com/acornjs/acorn). For questions
13 | and discussion, please use the
14 | [Tern discussion forum](https://discuss.ternjs.net).
15 |
16 | ## Installation
17 |
18 | The easiest way to install acorn is from [`npm`](https://www.npmjs.com/):
19 |
20 | ```sh
21 | npm install acorn
22 | ```
23 |
24 | Alternately, you can download the source and build acorn yourself:
25 |
26 | ```sh
27 | git clone https://github.com/acornjs/acorn.git
28 | cd acorn
29 | npm install
30 | ```
31 |
32 | ## Interface
33 |
34 | **parse**`(input, options)` is the main interface to the library. The
35 | `input` parameter is a string, `options` must be an object setting
36 | some of the options listed below. The return value will be an abstract
37 | syntax tree object as specified by the [ESTree
38 | spec](https://github.com/estree/estree).
39 |
40 | ```javascript
41 | let acorn = require("acorn");
42 | console.log(acorn.parse("1 + 1", {ecmaVersion: 2020}));
43 | ```
44 |
45 | When encountering a syntax error, the parser will raise a
46 | `SyntaxError` object with a meaningful message. The error object will
47 | have a `pos` property that indicates the string offset at which the
48 | error occurred, and a `loc` object that contains a `{line, column}`
49 | object referring to that same position.
50 |
51 | Options are provided by in a second argument, which should be an
52 | object containing any of these fields (only `ecmaVersion` is
53 | required):
54 |
55 | - **ecmaVersion**: Indicates the ECMAScript version to parse. Must be
56 | either 3, 5, 6 (or 2015), 7 (2016), 8 (2017), 9 (2018), 10 (2019),
57 | 11 (2020), 12 (2021), 13 (2022), 14 (2023), or `"latest"` (the
58 | latest the library supports). This influences support for strict
59 | mode, the set of reserved words, and support for new syntax
60 | features.
61 |
62 | **NOTE**: Only 'stage 4' (finalized) ECMAScript features are being
63 | implemented by Acorn. Other proposed new features must be
64 | implemented through plugins.
65 |
66 | - **sourceType**: Indicate the mode the code should be parsed in. Can be
67 | either `"script"` or `"module"`. This influences global strict mode
68 | and parsing of `import` and `export` declarations.
69 |
70 | **NOTE**: If set to `"module"`, then static `import` / `export` syntax
71 | will be valid, even if `ecmaVersion` is less than 6.
72 |
73 | - **onInsertedSemicolon**: If given a callback, that callback will be
74 | called whenever a missing semicolon is inserted by the parser. The
75 | callback will be given the character offset of the point where the
76 | semicolon is inserted as argument, and if `locations` is on, also a
77 | `{line, column}` object representing this position.
78 |
79 | - **onTrailingComma**: Like `onInsertedSemicolon`, but for trailing
80 | commas.
81 |
82 | - **allowReserved**: If `false`, using a reserved word will generate
83 | an error. Defaults to `true` for `ecmaVersion` 3, `false` for higher
84 | versions. When given the value `"never"`, reserved words and
85 | keywords can also not be used as property names (as in Internet
86 | Explorer's old parser).
87 |
88 | - **allowReturnOutsideFunction**: By default, a return statement at
89 | the top level raises an error. Set this to `true` to accept such
90 | code.
91 |
92 | - **allowImportExportEverywhere**: By default, `import` and `export`
93 | declarations can only appear at a program's top level. Setting this
94 | option to `true` allows them anywhere where a statement is allowed,
95 | and also allows `import.meta` expressions to appear in scripts
96 | (when `sourceType` is not `"module"`).
97 |
98 | - **allowAwaitOutsideFunction**: If `false`, `await` expressions can
99 | only appear inside `async` functions. Defaults to `true` in modules
100 | for `ecmaVersion` 2022 and later, `false` for lower versions.
101 | Setting this option to `true` allows to have top-level `await`
102 | expressions. They are still not allowed in non-`async` functions,
103 | though.
104 |
105 | - **allowSuperOutsideMethod**: By default, `super` outside a method
106 | raises an error. Set this to `true` to accept such code.
107 |
108 | - **allowHashBang**: When this is enabled, if the code starts with the
109 | characters `#!` (as in a shellscript), the first line will be
110 | treated as a comment. Defaults to true when `ecmaVersion` >= 2023.
111 |
112 | - **checkPrivateFields**: By default, the parser will verify that
113 | private properties are only used in places where they are valid and
114 | have been declared. Set this to false to turn such checks off.
115 |
116 | - **locations**: When `true`, each node has a `loc` object attached
117 | with `start` and `end` subobjects, each of which contains the
118 | one-based line and zero-based column numbers in `{line, column}`
119 | form. Default is `false`.
120 |
121 | - **onToken**: If a function is passed for this option, each found
122 | token will be passed in same format as tokens returned from
123 | `tokenizer().getToken()`.
124 |
125 | If array is passed, each found token is pushed to it.
126 |
127 | Note that you are not allowed to call the parser from the
128 | callback—that will corrupt its internal state.
129 |
130 | - **onComment**: If a function is passed for this option, whenever a
131 | comment is encountered the function will be called with the
132 | following parameters:
133 |
134 | - `block`: `true` if the comment is a block comment, false if it
135 | is a line comment.
136 | - `text`: The content of the comment.
137 | - `start`: Character offset of the start of the comment.
138 | - `end`: Character offset of the end of the comment.
139 |
140 | When the `locations` options is on, the `{line, column}` locations
141 | of the comment’s start and end are passed as two additional
142 | parameters.
143 |
144 | If array is passed for this option, each found comment is pushed
145 | to it as object in Esprima format:
146 |
147 | ```javascript
148 | {
149 | "type": "Line" | "Block",
150 | "value": "comment text",
151 | "start": Number,
152 | "end": Number,
153 | // If `locations` option is on:
154 | "loc": {
155 | "start": {line: Number, column: Number}
156 | "end": {line: Number, column: Number}
157 | },
158 | // If `ranges` option is on:
159 | "range": [Number, Number]
160 | }
161 | ```
162 |
163 | Note that you are not allowed to call the parser from the
164 | callback—that will corrupt its internal state.
165 |
166 | - **ranges**: Nodes have their start and end characters offsets
167 | recorded in `start` and `end` properties (directly on the node,
168 | rather than the `loc` object, which holds line/column data. To also
169 | add a
170 | [semi-standardized](https://bugzilla.mozilla.org/show_bug.cgi?id=745678)
171 | `range` property holding a `[start, end]` array with the same
172 | numbers, set the `ranges` option to `true`.
173 |
174 | - **program**: It is possible to parse multiple files into a single
175 | AST by passing the tree produced by parsing the first file as the
176 | `program` option in subsequent parses. This will add the toplevel
177 | forms of the parsed file to the "Program" (top) node of an existing
178 | parse tree.
179 |
180 | - **sourceFile**: When the `locations` option is `true`, you can pass
181 | this option to add a `source` attribute in every node’s `loc`
182 | object. Note that the contents of this option are not examined or
183 | processed in any way; you are free to use whatever format you
184 | choose.
185 |
186 | - **directSourceFile**: Like `sourceFile`, but a `sourceFile` property
187 | will be added (regardless of the `location` option) directly to the
188 | nodes, rather than the `loc` object.
189 |
190 | - **preserveParens**: If this option is `true`, parenthesized expressions
191 | are represented by (non-standard) `ParenthesizedExpression` nodes
192 | that have a single `expression` property containing the expression
193 | inside parentheses.
194 |
195 | **parseExpressionAt**`(input, offset, options)` will parse a single
196 | expression in a string, and return its AST. It will not complain if
197 | there is more of the string left after the expression.
198 |
199 | **tokenizer**`(input, options)` returns an object with a `getToken`
200 | method that can be called repeatedly to get the next token, a `{start,
201 | end, type, value}` object (with added `loc` property when the
202 | `locations` option is enabled and `range` property when the `ranges`
203 | option is enabled). When the token's type is `tokTypes.eof`, you
204 | should stop calling the method, since it will keep returning that same
205 | token forever.
206 |
207 | In ES6 environment, returned result can be used as any other
208 | protocol-compliant iterable:
209 |
210 | ```javascript
211 | for (let token of acorn.tokenizer(str)) {
212 | // iterate over the tokens
213 | }
214 |
215 | // transform code to array of tokens:
216 | var tokens = [...acorn.tokenizer(str)];
217 | ```
218 |
219 | **tokTypes** holds an object mapping names to the token type objects
220 | that end up in the `type` properties of tokens.
221 |
222 | **getLineInfo**`(input, offset)` can be used to get a `{line,
223 | column}` object for a given program string and offset.
224 |
225 | ### The `Parser` class
226 |
227 | Instances of the **`Parser`** class contain all the state and logic
228 | that drives a parse. It has static methods `parse`,
229 | `parseExpressionAt`, and `tokenizer` that match the top-level
230 | functions by the same name.
231 |
232 | When extending the parser with plugins, you need to call these methods
233 | on the extended version of the class. To extend a parser with plugins,
234 | you can use its static `extend` method.
235 |
236 | ```javascript
237 | var acorn = require("acorn");
238 | var jsx = require("acorn-jsx");
239 | var JSXParser = acorn.Parser.extend(jsx());
240 | JSXParser.parse("foo()", {ecmaVersion: 2020});
241 | ```
242 |
243 | The `extend` method takes any number of plugin values, and returns a
244 | new `Parser` class that includes the extra parser logic provided by
245 | the plugins.
246 |
247 | ## Command line interface
248 |
249 | The `bin/acorn` utility can be used to parse a file from the command
250 | line. It accepts as arguments its input file and the following
251 | options:
252 |
253 | - `--ecma3|--ecma5|--ecma6|--ecma7|--ecma8|--ecma9|--ecma10`: Sets the ECMAScript version
254 | to parse. Default is version 9.
255 |
256 | - `--module`: Sets the parsing mode to `"module"`. Is set to `"script"` otherwise.
257 |
258 | - `--locations`: Attaches a "loc" object to each node with "start" and
259 | "end" subobjects, each of which contains the one-based line and
260 | zero-based column numbers in `{line, column}` form.
261 |
262 | - `--allow-hash-bang`: If the code starts with the characters #! (as
263 | in a shellscript), the first line will be treated as a comment.
264 |
265 | - `--allow-await-outside-function`: Allows top-level `await` expressions.
266 | See the `allowAwaitOutsideFunction` option for more information.
267 |
268 | - `--compact`: No whitespace is used in the AST output.
269 |
270 | - `--silent`: Do not output the AST, just return the exit status.
271 |
272 | - `--help`: Print the usage information and quit.
273 |
274 | The utility spits out the syntax tree as JSON data.
275 |
276 | ## Existing plugins
277 |
278 | - [`acorn-jsx`](https://github.com/RReverser/acorn-jsx): Parse [Facebook JSX syntax extensions](https://github.com/facebook/jsx)
279 |
--------------------------------------------------------------------------------
/data/jsmodule/acorn/dist/acorn.d.mts:
--------------------------------------------------------------------------------
1 | export {
2 | Node,
3 | Parser,
4 | Position,
5 | SourceLocation,
6 | TokContext,
7 | Token,
8 | TokenType,
9 | defaultOptions,
10 | getLineInfo,
11 | isIdentifierChar,
12 | isIdentifierStart,
13 | isNewLine,
14 | lineBreak,
15 | lineBreakG,
16 | parse,
17 | parseExpressionAt,
18 | tokContexts,
19 | tokTypes,
20 | tokenizer,
21 | version,
22 | AbstractToken,
23 | Comment,
24 | Options,
25 | ecmaVersion,
26 | } from "./acorn.js";
27 |
--------------------------------------------------------------------------------
/data/jsmodule/acorn/dist/acorn.d.ts:
--------------------------------------------------------------------------------
1 | export as namespace acorn
2 | export = acorn
3 |
4 | declare namespace acorn {
5 | function parse(input: string, options: Options): Node
6 |
7 | function parseExpressionAt(input: string, pos: number, options: Options): Node
8 |
9 | function tokenizer(input: string, options: Options): {
10 | getToken(): Token
11 | [Symbol.iterator](): Iterator
12 | }
13 |
14 | type ecmaVersion = 3 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 2015 | 2016 | 2017 | 2018 | 2019 | 2020 | 2021 | 2022 | 2023 | 2024 | 'latest'
15 |
16 | interface Options {
17 | ecmaVersion: ecmaVersion
18 | sourceType?: 'script' | 'module'
19 | onInsertedSemicolon?: (lastTokEnd: number, lastTokEndLoc?: Position) => void
20 | onTrailingComma?: (lastTokEnd: number, lastTokEndLoc?: Position) => void
21 | allowReserved?: boolean | 'never'
22 | allowReturnOutsideFunction?: boolean
23 | allowImportExportEverywhere?: boolean
24 | allowAwaitOutsideFunction?: boolean
25 | allowSuperOutsideMethod?: boolean
26 | allowHashBang?: boolean
27 | locations?: boolean
28 | onToken?: ((token: Token) => any) | Token[]
29 | onComment?: ((
30 | isBlock: boolean, text: string, start: number, end: number, startLoc?: Position,
31 | endLoc?: Position
32 | ) => void) | Comment[]
33 | ranges?: boolean
34 | program?: Node
35 | sourceFile?: string
36 | directSourceFile?: string
37 | preserveParens?: boolean
38 | }
39 |
40 | class Parser {
41 | // state.js
42 | lineStart: number;
43 | options: Options;
44 | curLine: number;
45 | start: number;
46 | end: number;
47 | input: string;
48 | type: TokenType;
49 |
50 | // state.js
51 | constructor(options: Options, input: string, startPos?: number)
52 | parse(this: Parser): Node
53 |
54 | // tokenize.js
55 | next(): void;
56 | nextToken(): void;
57 |
58 | // statement.js
59 | parseTopLevel(node: Node): Node;
60 |
61 | // node.js
62 | finishNode(node: Node, type: string): Node;
63 | finishNodeAt(node: Node, type: string, pos: number, loc: Position): Node;
64 |
65 | // location.js
66 | raise(pos: number, message: string) : void;
67 | raiseRecoverable?(pos: number, message: string) : void;
68 |
69 | // parseutils.js
70 | unexpected(pos: number) : void;
71 |
72 | // index.js
73 | static acorn: typeof acorn;
74 |
75 | // state.js
76 | static parse(this: typeof Parser, input: string, options: Options): Node
77 | static parseExpressionAt(this: typeof Parser, input: string, pos: number, options: Options): Node
78 | static tokenizer(this: typeof Parser, input: string, options: Options): {
79 | getToken(): Token
80 | [Symbol.iterator](): Iterator
81 | }
82 | static extend(this: typeof Parser, ...plugins: ((BaseParser: typeof Parser) => typeof Parser)[]): typeof Parser
83 | }
84 |
85 | interface Position { line: number; column: number; offset: number }
86 |
87 | const defaultOptions: Options
88 |
89 | function getLineInfo(input: string, offset: number): Position
90 |
91 | class SourceLocation {
92 | start: Position
93 | end: Position
94 | source?: string | null
95 | constructor(p: Parser, start: Position, end: Position)
96 | }
97 |
98 | class Node {
99 | type: string
100 | start: number
101 | end: number
102 | loc?: SourceLocation
103 | sourceFile?: string
104 | range?: [number, number]
105 | constructor(parser: Parser, pos: number, loc?: SourceLocation)
106 | }
107 |
108 | class TokenType {
109 | label: string
110 | keyword: string
111 | beforeExpr: boolean
112 | startsExpr: boolean
113 | isLoop: boolean
114 | isAssign: boolean
115 | prefix: boolean
116 | postfix: boolean
117 | binop: number
118 | updateContext?: (prevType: TokenType) => void
119 | constructor(label: string, conf?: any)
120 | }
121 |
122 | const tokTypes: {
123 | num: TokenType
124 | regexp: TokenType
125 | string: TokenType
126 | name: TokenType
127 | privateId: TokenType
128 | eof: TokenType
129 | bracketL: TokenType
130 | bracketR: TokenType
131 | braceL: TokenType
132 | braceR: TokenType
133 | parenL: TokenType
134 | parenR: TokenType
135 | comma: TokenType
136 | semi: TokenType
137 | colon: TokenType
138 | dot: TokenType
139 | question: TokenType
140 | questionDot: TokenType
141 | arrow: TokenType
142 | template: TokenType
143 | invalidTemplate: TokenType
144 | ellipsis: TokenType
145 | backQuote: TokenType
146 | dollarBraceL: TokenType
147 | eq: TokenType
148 | assign: TokenType
149 | incDec: TokenType
150 | prefix: TokenType
151 | logicalOR: TokenType
152 | logicalAND: TokenType
153 | bitwiseOR: TokenType
154 | bitwiseXOR: TokenType
155 | bitwiseAND: TokenType
156 | equality: TokenType
157 | relational: TokenType
158 | bitShift: TokenType
159 | plusMin: TokenType
160 | modulo: TokenType
161 | star: TokenType
162 | slash: TokenType
163 | starstar: TokenType
164 | coalesce: TokenType
165 | _break: TokenType
166 | _case: TokenType
167 | _catch: TokenType
168 | _continue: TokenType
169 | _debugger: TokenType
170 | _default: TokenType
171 | _do: TokenType
172 | _else: TokenType
173 | _finally: TokenType
174 | _for: TokenType
175 | _function: TokenType
176 | _if: TokenType
177 | _return: TokenType
178 | _switch: TokenType
179 | _throw: TokenType
180 | _try: TokenType
181 | _var: TokenType
182 | _const: TokenType
183 | _while: TokenType
184 | _with: TokenType
185 | _new: TokenType
186 | _this: TokenType
187 | _super: TokenType
188 | _class: TokenType
189 | _extends: TokenType
190 | _export: TokenType
191 | _import: TokenType
192 | _null: TokenType
193 | _true: TokenType
194 | _false: TokenType
195 | _in: TokenType
196 | _instanceof: TokenType
197 | _typeof: TokenType
198 | _void: TokenType
199 | _delete: TokenType
200 | }
201 |
202 | class TokContext {
203 | constructor(token: string, isExpr: boolean, preserveSpace: boolean, override?: (p: Parser) => void)
204 | }
205 |
206 | const tokContexts: {
207 | b_stat: TokContext
208 | b_expr: TokContext
209 | b_tmpl: TokContext
210 | p_stat: TokContext
211 | p_expr: TokContext
212 | q_tmpl: TokContext
213 | f_expr: TokContext
214 | f_stat: TokContext
215 | f_expr_gen: TokContext
216 | f_gen: TokContext
217 | }
218 |
219 | function isIdentifierStart(code: number, astral?: boolean): boolean
220 |
221 | function isIdentifierChar(code: number, astral?: boolean): boolean
222 |
223 | interface AbstractToken {
224 | }
225 |
226 | interface Comment extends AbstractToken {
227 | type: 'Line' | 'Block'
228 | value: string
229 | start: number
230 | end: number
231 | loc?: SourceLocation
232 | range?: [number, number]
233 | }
234 |
235 | class Token {
236 | type: TokenType
237 | value: any
238 | start: number
239 | end: number
240 | loc?: SourceLocation
241 | range?: [number, number]
242 | constructor(p: Parser)
243 | }
244 |
245 | function isNewLine(code: number): boolean
246 |
247 | const lineBreak: RegExp
248 |
249 | const lineBreakG: RegExp
250 |
251 | const version: string
252 |
253 | const nonASCIIwhitespace: RegExp
254 |
255 | const keywordTypes: {
256 | _break: TokenType
257 | _case: TokenType
258 | _catch: TokenType
259 | _continue: TokenType
260 | _debugger: TokenType
261 | _default: TokenType
262 | _do: TokenType
263 | _else: TokenType
264 | _finally: TokenType
265 | _for: TokenType
266 | _function: TokenType
267 | _if: TokenType
268 | _return: TokenType
269 | _switch: TokenType
270 | _throw: TokenType
271 | _try: TokenType
272 | _var: TokenType
273 | _const: TokenType
274 | _while: TokenType
275 | _with: TokenType
276 | _new: TokenType
277 | _this: TokenType
278 | _super: TokenType
279 | _class: TokenType
280 | _extends: TokenType
281 | _export: TokenType
282 | _import: TokenType
283 | _null: TokenType
284 | _true: TokenType
285 | _false: TokenType
286 | _in: TokenType
287 | _instanceof: TokenType
288 | _typeof: TokenType
289 | _void: TokenType
290 | _delete: TokenType
291 | }
292 | }
293 |
--------------------------------------------------------------------------------
/data/jsmodule/acorn/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "acorn",
3 | "description": "ECMAScript parser",
4 | "homepage": "https://github.com/acornjs/acorn",
5 | "main": "dist/acorn.js",
6 | "types": "dist/acorn.d.ts",
7 | "module": "dist/acorn.mjs",
8 | "exports": {
9 | ".": [
10 | {
11 | "import": "./dist/acorn.mjs",
12 | "require": "./dist/acorn.js",
13 | "default": "./dist/acorn.js"
14 | },
15 | "./dist/acorn.js"
16 | ],
17 | "./package.json": "./package.json"
18 | },
19 | "version": "8.10.0",
20 | "engines": {
21 | "node": ">=0.4.0"
22 | },
23 | "maintainers": [
24 | {
25 | "name": "Marijn Haverbeke",
26 | "email": "marijnh@gmail.com",
27 | "web": "https://marijnhaverbeke.nl"
28 | },
29 | {
30 | "name": "Ingvar Stepanyan",
31 | "email": "me@rreverser.com",
32 | "web": "https://rreverser.com/"
33 | },
34 | {
35 | "name": "Adrian Heine",
36 | "web": "http://adrianheine.de"
37 | }
38 | ],
39 | "repository": {
40 | "type": "git",
41 | "url": "https://github.com/acornjs/acorn.git"
42 | },
43 | "license": "MIT",
44 | "scripts": {
45 | "prepare": "cd ..; npm run build:main"
46 | },
47 | "bin": {
48 | "acorn": "./bin/acorn"
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/data/json/blacklists.json:
--------------------------------------------------------------------------------
1 | {
2 | "04-Variables": [
3 | "npcList.twee",
4 | "npcNamed.twee",
5 | "variables-passageHeader.twee",
6 | "variables-start.twee",
7 | "variables-start2.twee"
8 | ],
9 |
10 | "base-clothing": [
11 | "canvas.twee",
12 | "canvasmodel.twee",
13 | "canvasmodel-img.twee",
14 | "lighting.twee",
15 | "update.twee"
16 | ],
17 | "base-combat": [
18 | "beast-images.twee",
19 | "close-images.twee",
20 | "consensual.twee",
21 | "doggy-images.twee",
22 | "missionary-images.twee"
23 | ],
24 | "machine": [
25 | "state.twee"
26 | ],
27 | "tentacles": [
28 | "tentacle-images.twee"
29 | ],
30 | "base-system": [
31 | "physicalAdjustments.twee",
32 | "bdsm.twee",
33 | "clamp.twee",
34 | "disable.twee",
35 | "location.twee",
36 | "physicalAdjustments.twee",
37 | "pubic-hair.twee"
38 | ],
39 | "tools": [
40 | "dynamicRendering.twee"
41 | ],
42 | "base-debug": [
43 | "debug.twee",
44 | "debug-events.twee",
45 | "scoped-var-test.twee",
46 | "test demon.twee",
47 | "test encounters.twee",
48 | "testing-encountersUI.twee",
49 | "testing-renderer.twee",
50 | "testing-room.twee",
51 | "testing-skinColor.twee"
52 | ]
53 | }
54 |
--------------------------------------------------------------------------------
/data/json/whitelists.json:
--------------------------------------------------------------------------------
1 | {
2 | "01-setup": [
3 | "weather-descriptions.js"
4 | ],
5 | "02-Helpers": [
6 | "macros.js"
7 | ],
8 | "03-JavaScript": [
9 | "base.js",
10 | "bedroom-pills.js",
11 | "clothing-shop-v2.js",
12 | "colour-namer.js",
13 | "debug-menu.js",
14 | "eyes-related.js",
15 | "furniture.js",
16 | "ingame.js",
17 | "npc-compressor.js",
18 | "sexShopMenu.js",
19 | "sexToysInventory.js",
20 | "ui.js",
21 | "time-macros.js",
22 | "time.js",
23 | "save.js",
24 | "skin.js"
25 | ],
26 | "04-Variables": [
27 | "colours.js",
28 | "feats.js",
29 | "shop.js",
30 | "plant-setup.js"
31 | ],
32 | "special-masturbation": [
33 | "actions.js",
34 | "effects.js",
35 | "macros-masturbation.js",
36 | "slime-control.js",
37 | "audience.js"
38 | ],
39 | "04-Pregnancy": [
40 | "children-story-functions.js",
41 | "pregnancy.js",
42 | "pregnancy-types.js",
43 | "story-functions.js"
44 | ],
45 | "03-Templates": [
46 | "t-actions.js",
47 | "t-bodyparts.js",
48 | "t-misc.js"
49 | ],
50 | "base-system": [
51 | "effect.js",
52 | "images.js",
53 | "text.js",
54 | "widgets.js",
55 | "stat-changes.js",
56 | "skin-color.js"
57 | ],
58 | "external": [
59 | "color-namer.js"
60 | ],
61 | "base-clothing": [
62 | "update-clothes.js",
63 | "clothing-face.js",
64 | "clothing-feet.js",
65 | "clothing-genitals.js",
66 | "clothing-handheld.js",
67 | "clothing-hands.js",
68 | "clothing-head.js",
69 | "clothing-legs.js",
70 | "clothing-lower.js",
71 | "clothing-neck.js",
72 | "clothing-over-head.js",
73 | "clothing-over-lower.js",
74 | "clothing-over-upper.js",
75 | "clothing-under-lower.js",
76 | "clothing-under-upper.js",
77 | "clothing-under-lower.js",
78 | "clothing-under-upper.js",
79 | "clothing-upper.js"
80 | ],
81 | "01-main": [
82 | "02-tooltips.js"
83 | ],
84 | "05-renderer": [
85 | "30-canvasmodel-editor.js"
86 | ],
87 | "loc-museum": [
88 | "paintings.js"
89 | ]
90 | }
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | """
2 | 基本种类:
3 | :: EVENT [WIDGET] | 一般在文件开头
4 |
5 | <>ANY<> | 有闭合的
6 | <> | 无闭合的
7 | <> | 特殊名称
8 | <> | 条件,如 if / link 等
9 |
10 | 特殊:
11 | <>ANY<>ANY<>ANY<>
12 |
13 | <>ANY<>ANY<>
14 |
15 | <>>
16 | <>>
17 |
18 | <>
19 | <>
20 | <>
21 |
22 | $VAR | 变量
23 | $VAR.FUNC(PARAM) | 调用函数
24 | $VAR.PROP | 属性
25 |
26 | STRING | 文本
27 | ANY | 有闭合的
28 | | 无闭合的
29 |
30 | TEXT
31 |
32 | //COMMENT | 注释
33 | /*COMMENT*/ | 注释(可跨行)
34 | /* COMMENT
35 | * COMMENT
36 | * COMMENT */
37 | | 注释(可跨行)
38 |
40 |
41 | 要翻译的:
42 | TEXT, STRING
43 | """
44 |
45 | import asyncio
46 | import time
47 |
48 | from src import (
49 | logger,
50 | Paratranz,
51 | ProjectDOL,
52 | PARATRANZ_TOKEN,
53 | CHINESE_VERSION,
54 | SOURCE_TYPE,
55 | )
56 |
57 | from src.tools.process_variables import VariablesProcess as VP
58 |
59 |
60 | async def process_common(dol_common: ProjectDOL, pt: Paratranz, chs_version: str):
61 | """
62 | 原版处理流程
63 | 1. 下载源码
64 | 2. 创建生肉词典
65 | 3. 下载汉化词典
66 | 4. 替换生肉词典
67 | 5. 替换游戏原文
68 | """
69 | """ 删库跑路 """
70 | await dol_common.drop_all_dirs()
71 |
72 | """ 下载源码 """
73 | await dol_common.download_from_gitgud()
74 | # await dol_common.patch_format_js()
75 |
76 | """ 预处理所有的 <> """
77 | var = VP()
78 | var.fetch_all_file_paths()
79 | var.fetch_all_set_content()
80 |
81 | """ 创建生肉词典 """
82 | await dol_common.create_dicts()
83 |
84 | """ 下载汉化词典 成品在 `raw_dicts` 文件夹里 """
85 | download_flag = (
86 | await pt.download_from_paratranz()
87 | ) # 如果下载,需要在 consts 里填上管理员的 token, 在网站个人设置里找
88 | if not download_flag:
89 | return
90 |
91 | """ 替换生肉词典 """
92 | await dol_common.update_dicts()
93 |
94 | """ 替换游戏原文 用的是 `paratranz` 文件夹里的内容覆写 """
95 | blacklist_dirs = [
96 | # "00-framework-tools",
97 | # "01-config",
98 | # "03-JavaScript",
99 | # "04-Variables",
100 | # "base-clothing",
101 | # "base-combat",
102 | # "base-debug",
103 | # "base-system",
104 | # "flavour-text-generators",
105 | # "fonts",
106 | # "overworld-forest",
107 | # "overworld-plains",
108 | # "overworld-town",
109 | # "overworld-underground",
110 | # "special-dance",
111 | # "special-exhibition",
112 | # "special-masturbation",
113 | # "special-templates"
114 | ]
115 | blacklist_files = []
116 | await dol_common.apply_dicts(blacklist_dirs, blacklist_files, debug_flag=False)
117 |
118 | # """ 有些额外需要更改的 """
119 | dol_common.change_css() # 更换一些样式和硬编码文本
120 | dol_common.replace_banner() # 更换游戏头图
121 | dol_common.change_version(chs_version) # 更换游戏版本号
122 |
123 | """ 编译成游戏 """
124 | dol_common.compile(chs_version)
125 | dol_common.package_zip(chs_version) # 自动打包成 zip
126 | dol_common.run() # 运行
127 |
128 |
129 | async def main():
130 | start = time.time()
131 | # =====
132 | dol_common = ProjectDOL(
133 | type_=SOURCE_TYPE
134 | ) # 改成 “dev” 则下载最新开发版分支的内容 common原版
135 |
136 | pt_common = Paratranz(type_=SOURCE_TYPE)
137 | if not PARATRANZ_TOKEN:
138 | logger.error("未填写 PARATRANZ_TOKEN, 汉化包下载可能失败,请前往 https://paratranz.cn/users/my 的设置栏中查看自己的 token, 并在 .env 中填写\n")
139 | return
140 |
141 | await process_common(dol_common, pt_common, chs_version=CHINESE_VERSION)
142 |
143 | end = time.time()
144 | return end - start
145 |
146 |
147 | if __name__ == "__main__":
148 | last = asyncio.run(main())
149 | logger.info(f"===== 总耗时 {last or -1:.2f}s =====")
150 | try:
151 | from win10toast import ToastNotifier
152 | except ImportError:
153 | pass
154 | else:
155 | ToastNotifier().show_toast(title="dol脚本运行完啦", msg=f"总耗时 {last or -1:.2f}s")
156 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | loguru~=0.7.0
2 | httpx~=0.24.0
3 | numpy==1.25.1
4 | pandas==2.0.3
5 | aiofiles~=23.1.0
6 | python-dotenv~=1.0.0
7 | beautifulsoup4~=4.12.2
8 | orjson~=3.9.2
9 | dukpy~=0.3.0
10 | lxml~=4.9.3
11 | typing_extensions~=4.12.2
12 | PyGithub~=2.4.0
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
1 | from .consts import *
2 | from .log import *
3 |
4 | from .paratranz import *
5 | from .parse_text import *
6 | from .project_dol import *
7 | from .download import *
8 |
--------------------------------------------------------------------------------
/src/_wip_parse_texts/__init__.py:
--------------------------------------------------------------------------------
1 | from .consts import *
2 | from .log import *
3 | from .main import *
--------------------------------------------------------------------------------
/src/_wip_parse_texts/consts.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from pathlib import Path
4 |
5 | """REGEX"""
6 | PTN_COMMENT = re.compile(r"""(/\*|)""")
7 | PTN_HEAD = re.compile(r"""::[\s\S]*?\n""")
8 | PTN_MACRO = re.compile(r"""<?([\w=\-]+)(?:\s+((?:(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)|(?://.*\n)|(?:`(?:\\.|[^`\\\n])*?`)|(?:"(?:\\.|[^"\\\n])*?")|(?:'(?:\\.|[^'\\\n])*?')|(?:\[(?:[<>]?[Ii][Mm][Gg])?\[[^\r\n]*?]]+)|[^>]|(?:>(?!>)))*?))?>>""")
9 | PTN_TAG = re.compile(r"""(?)>(?!>)""")
10 |
11 | """STRING"""
12 | GAME_TEXTS_NAME = "degrees-of-lewdity-master"
13 |
14 | """NUMBER"""
15 | GENERAL_LIMIT = 1000
16 |
17 | """PATH"""
18 | ROOT = Path(__file__).parent.parent.parent
19 | DIR_GAME = ROOT / GAME_TEXTS_NAME
20 | DIR_GAME_TEXTS = DIR_GAME / "game"
21 | DIR_TEST = ROOT / "src" / "parse_texts"
22 | DIR_PASSAGE = DIR_TEST / "passage"
23 | DIR_TARGET = DIR_TEST / "target"
24 |
25 | DIR_PARATRANZ_EXPORT = ROOT / "data" / "paratranz" / "common" / "raw"
26 |
27 |
28 | __all__ = [
29 | "PTN_COMMENT",
30 | "PTN_HEAD",
31 | "PTN_MACRO",
32 | "PTN_TAG",
33 |
34 | "GAME_TEXTS_NAME",
35 |
36 | "GENERAL_LIMIT",
37 |
38 | "ROOT",
39 | "DIR_GAME",
40 | "DIR_GAME_TEXTS",
41 | "DIR_PASSAGE",
42 | "DIR_TARGET",
43 | "DIR_TEST",
44 | "DIR_PARATRANZ_EXPORT"
45 | ]
46 |
--------------------------------------------------------------------------------
/src/_wip_parse_texts/log.py:
--------------------------------------------------------------------------------
1 | from loguru import logger as logger_
2 | import sys
3 |
4 | logger_.remove()
5 | logger_.add(sys.stdout, format="{time:HH:mm:ss} | [{level}] | {message}", colorize=True)
6 |
7 | logger = logger_
8 |
9 | __all__ = [
10 | "logger"
11 | ]
12 |
--------------------------------------------------------------------------------
/src/_wip_parse_texts/main.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import re
4 |
5 | from pathlib import Path
6 |
7 | from src._wip_parse_texts.log import logger
8 | from src._wip_parse_texts.consts import (
9 | DIR_PASSAGE,
10 | DIR_TARGET,
11 | DIR_GAME_TEXTS,
12 | DIR_PARATRANZ_EXPORT,
13 | GAME_TEXTS_NAME,
14 | PTN_COMMENT,
15 | PTN_MACRO,
16 | PTN_TAG,
17 | GENERAL_LIMIT,
18 | )
19 |
20 |
21 | class ParseTwine:
22 | def __init__(self) -> None:
23 | # 记录所有 twine 文件绝对路径
24 | self._twine_filepaths: list[Path] = []
25 |
26 | # 记录所有段落名称
27 | self._twine_passage_names: list[str] = []
28 | # 详细记录所有段落信息,等待进一步处理
29 | self._twine_passage_data: dict[str, dict] = {}
30 | # 扁平化处理,只有一层
31 | self._twine_passage_data_flat: list[dict] = []
32 |
33 | # 详细记录所有元素信息,等待进一步处理
34 | self._twine_elements_data: dict[str, dict] = {}
35 | # 扁平化处理,只有一层
36 | self._twine_elements_data_flat: list[dict] = []
37 |
38 | # 详细记录经过组合后的元素信息,等待进一步处理
39 | self._twine_combined_elements_data: dict[str, dict] = {}
40 | # 扁平化处理,只有一层
41 | self._twine_combined_elements_data_flat: list[dict] = []
42 |
43 | # 旧版汉化方式词条添加各种信息,等待进一步处理
44 | self._paratranz_detailed_raw_data: dict[str, list[dict]] = {}
45 |
46 | # 处理成可在 paratranz 导入的格式,等待进一步处理
47 | self._paratranz_elements_data: dict[str, dict] = {}
48 | # 扁平化处理,只有一层
49 | self._paratranz_elements_data_flat: list[dict] = []
50 |
51 | @staticmethod
52 | def init_dirs() -> None:
53 | """创建目录"""
54 | for dir_ in {DIR_PASSAGE, DIR_TARGET}:
55 | os.makedirs(dir_, exist_ok=True)
56 |
57 | # 入口函数
58 | def parse(self) -> None:
59 | """入口函数"""
60 | self.get_all_twine_filepaths()
61 | self.get_all_twine_passages()
62 |
63 | self.get_all_basic_elements()
64 | self.get_all_plaintext_elements()
65 |
66 | self.combine_twine_element_pairs()
67 | self.combine_twine_element_full()
68 |
69 | self.build_paratranz_detailed_raw_data()
70 | self.build_paratranz_format()
71 |
72 | # 文件路径
73 | def get_all_twine_filepaths(self) -> None:
74 | """获取所有文件绝对路径,方便下一步提取"""
75 | logger.info("开始获取所有 twine 文件绝对路径……")
76 | self._twine_filepaths = [
77 | Path(root) / file
78 | for root, _, files in os.walk(DIR_GAME_TEXTS)
79 | for file in files
80 | if file.endswith(".twee")
81 | ]
82 |
83 | logger.info("所有 twine 文件绝对路径已获取!")
84 |
85 | # 段落信息
86 | def get_all_twine_passages(self):
87 | """获取所有的段落信息,方便下一步提取和元素位置记录"""
88 | logger.info("开始获取所有 twine 段落信息……")
89 | for filepath in self._twine_filepaths:
90 | with open(filepath, "r", encoding="utf-8") as fp:
91 | content = fp.read()
92 |
93 | if not content: # 有些文件为空
94 | continue
95 |
96 | content = f"\n{content}" # 方便分割段落
97 | content_slices = content.split("\n::")[1:] # 按照段落标题的标识符分割
98 |
99 | # 这一步是以游戏源代码的 "game" 文件夹为根目录切割成相对路径
100 | relative_filepath = Path().joinpath(*filepath.parts[filepath.parts.index(GAME_TEXTS_NAME) + 2:]).__str__()
101 | for slice_ in content_slices:
102 | # 段落名称,在接下来的处理中会除去[]与{}中的内容
103 | passage_name = slice_.split("\n")[0].strip()
104 | # 段落内容,不包括段落标题行的所有内容
105 | passage_body = "\n".join(slice_.split("\n")[1:-1])
106 | passage_full = f":: {passage_name}\n{passage_body}"
107 |
108 | if passage_name.endswith("]"):
109 | # 只保留段落名称的纯文本内容
110 | passage_name = passage_name.split("[")[0].strip()
111 |
112 | passage_data = {
113 | "filepath": relative_filepath,
114 | "passage_name": passage_name,
115 | "passage_body": passage_body,
116 | "passage_full": passage_full,
117 | }
118 |
119 | self._twine_passage_names.append(passage_name)
120 | self._twine_passage_data_flat.append(passage_data)
121 | if relative_filepath not in self._twine_passage_data:
122 | self._twine_passage_data[relative_filepath] = {passage_name: passage_data}
123 | elif passage_name not in self._twine_passage_data[relative_filepath]:
124 | self._twine_passage_data[relative_filepath][passage_name] = passage_data
125 | else:
126 | raise
127 |
128 | logger.info("所有 twine 段落信息已获取!")
129 |
130 | # 基础元素
131 | def get_all_basic_elements(self):
132 | """获取所有 comment, head, macro 和 tag 的信息,剩下的就是 plain text"""
133 | logger.info("开始获取所有 twine 基础元素……")
134 |
135 | def _add_element(fp: str, pg: str, match: re.Match, type_: str):
136 | """重复的部分提出来封装"""
137 | element = {
138 | "filepath": fp,
139 | "passage": pg,
140 | "type": type_,
141 | "element": match.group(),
142 | "pos_start": match.start(),
143 | "pos_end": match.end(),
144 | }
145 | self._twine_elements_data_flat.append(element)
146 | if fp not in self._twine_elements_data:
147 | self._twine_elements_data[fp] = {pg: [element]}
148 | elif pg not in self._twine_elements_data[fp]:
149 | self._twine_elements_data[fp][pg] = [element]
150 | else:
151 | self._twine_elements_data[fp][pg].append(element)
152 |
153 | for passage in self._twine_passage_data_flat:
154 | filepath = passage["filepath"]
155 | content = passage["passage_body"]
156 | passage_name = passage["passage_name"]
157 |
158 | comments = re.finditer(PTN_COMMENT, content)
159 | for comment in comments:
160 | _add_element(filepath, passage_name, comment, "comment")
161 |
162 | macros = re.finditer(PTN_MACRO, content)
163 | for macro in macros:
164 | _add_element(filepath, passage_name, macro, "macro")
165 |
166 | tags = re.finditer(PTN_TAG, content)
167 | for tag in tags:
168 | _add_element(filepath, passage_name, tag, "tag")
169 |
170 | self.sort_elements_data()
171 | self.filter_comment_inside()
172 | logger.info("所有 twine 基础元素已获取!")
173 |
174 | # 剔除注释中元素
175 | def filter_comment_inside(self):
176 | """因为按照正则提取,有些在注释里的内容也被抓出来了"""
177 | for filepath, elements_data in self._twine_elements_data.items():
178 | for passage, elements in elements_data.items():
179 | elements_copy = elements.copy()
180 | for idx, element in enumerate(elements_copy):
181 | if element["type"] != "comment":
182 | continue
183 |
184 | for i in range(len(elements_copy) - idx):
185 | # i 从 0 开始,所以直接跳过自己
186 | if i == 0:
187 | continue
188 |
189 | # 因为按照 pos_start 排序过了,所以被注释包裹的元素一定在注释的后面
190 | # 因此当出现后者开头小于前者结尾时一定是前者是注释,后者是被注释包住的元素
191 | if elements_copy[idx + i]["pos_start"] < element["pos_end"]:
192 | elements[idx + i] = None
193 |
194 | self._twine_elements_data[filepath][passage] = [element for element in elements if element is not None]
195 | self.sort_elements_data()
196 |
197 | # 纯文本
198 | def get_all_plaintext_elements(self):
199 | # sourcery skip: hoist-statement-from-if
200 | """夹在其它元素之间的就是 plain text"""
201 | logger.info("开始获取所有 twine 纯文本元素……")
202 |
203 | for filepath, elements_data in self._twine_elements_data.items():
204 | for passage, elements in elements_data.items():
205 | content = self._twine_passage_data[filepath][passage]["passage_body"]
206 | elements_copy = elements.copy()
207 |
208 | for idx, element in enumerate(elements_copy):
209 | # 已经提取的元素开头之前可能有纯文本
210 | # 这里比较特殊,因为要进行两次判断,一次向前判断一次向后判断
211 | # 已经提取的元素开头不是段落开头的情况下,才有纯文本
212 | if idx <= 0 < element["pos_start"]:
213 | text = content[: element["pos_start"]]
214 | pos_start = 0
215 | pos_end = element["pos_start"]
216 | elements.append({
217 | "filepath": filepath,
218 | "passage": passage,
219 | "type": "text",
220 | "element": text,
221 | "pos_start": pos_start,
222 | "pos_end": pos_end,
223 | })
224 |
225 | # 非末尾,开头的向后二次判断合并进这里
226 | if idx < len(elements_copy) - 1:
227 | # 前后两元素中间没有内容,因此没有纯文本
228 | if element["pos_end"] == elements_copy[idx + 1]["pos_start"]:
229 | continue
230 |
231 | text = content[element["pos_end"]: elements_copy[idx + 1]["pos_start"]]
232 | pos_start = element["pos_end"]
233 | pos_end = elements_copy[idx + 1]["pos_start"]
234 |
235 | # 已经提取的元素末尾之后可能有纯文本
236 | else:
237 | # 已经提取的元素末尾就是段落末尾,因此没有纯文本
238 | if element["pos_end"] >= len(content):
239 | continue
240 |
241 | text = content[element["pos_end"] :]
242 | pos_start = element["pos_end"]
243 | pos_end = len(content)
244 |
245 | text_element = {
246 | "filepath": filepath,
247 | "passage": passage,
248 | "type": "text",
249 | "element": text,
250 | "pos_start": pos_start,
251 | "pos_end": pos_end,
252 | }
253 | elements.append(text_element)
254 |
255 | self._twine_elements_data[filepath][passage] = elements
256 | self.sort_elements_data()
257 | logger.info("所有 twine 纯文本元素已获取!")
258 |
259 | # 排序
260 | def sort_elements_data(self):
261 | """按照元素的位置排序,方便下一步操作"""
262 | for filepath, elements_data in self._twine_elements_data.items():
263 | for passage, elements in elements_data.items():
264 | self._twine_elements_data[filepath][passage] = sorted(elements, key=lambda elem: elem["pos_start"])
265 |
266 | # 先按照开闭组合一遍
267 | def combine_twine_element_pairs(self):
268 | """将元素按照开闭/段落进行初次组合"""
269 | logger.info("开始初次组合所有元素……")
270 |
271 | def _add_element(fp: str, pg: str, elem: str, start: int, end: int):
272 | """这是添加元素的,重复的部分提出来封装"""
273 | combined = {
274 | "filepath": fp,
275 | "passage": pg,
276 | "element": elem,
277 | "pos_start": start,
278 | "pos_end": end,
279 | "length": end - start,
280 | }
281 | if fp not in self._twine_combined_elements_data:
282 | self._twine_combined_elements_data[fp] = {pg: [combined]}
283 | elif pg not in self._twine_combined_elements_data[fp]:
284 | self._twine_combined_elements_data[fp][pg] = [combined]
285 | else:
286 | self._twine_combined_elements_data[fp][pg].append(combined)
287 |
288 | for filepath, elements_data in self._twine_elements_data.items():
289 | for passage, elements in elements_data.items():
290 | content = self._twine_passage_data[filepath][passage]["passage_body"]
291 | # 先判断整个段落的字数是否在 1000 字以内,是的话直接打包装走
292 | if len(content) <= GENERAL_LIMIT:
293 | _add_element(filepath, passage, content, 0, len(content))
294 | continue
295 |
296 | jumped_idx = None # 判断当前元素是否是需要跳过的被合并的部分
297 | elements_copy = elements.copy()
298 | for idx, element in enumerate(elements_copy):
299 | # 合并元素的过程中需要跳过被合并的部分
300 | if jumped_idx is not None and idx != jumped_idx:
301 | continue
302 |
303 | jumped_idx = None
304 |
305 | head_elem = element["element"]
306 | head_type = element["type"]
307 | head_start = element["pos_start"]
308 | head_end = element["pos_end"]
309 |
310 | # 遇到注释直接加,接下来再进一步处理
311 | if head_type == "comment":
312 | _add_element(filepath, passage, content[head_start:head_end], head_start, head_end)
313 | continue
314 |
315 | # 找闭合的 macro
316 | if head_type == "macro":
317 | # 这个是结尾,因为正常情况下不会先遇到结尾再遇到开头,因此这是没有被合并的单个元素,直接添加
318 | if head_elem.startswith("<"):
319 | _add_element(filepath, passage, content[head_start:head_end], head_start, head_end)
320 | continue
321 |
322 | macro_name = re.findall(r"^<<([\w=\-]+)", head_elem)[0] # 方便判断结尾的位置
323 | layer = 0 # 可能出现 if 套 if 这类情况,因此用层数判断是否在最外层
324 | block_start = head_start
325 |
326 | # 接下来在给定 macro 开头的情况下,找到对应的 macro 结尾
327 | # 并判断字符数是否在限制以内,是的话就组合起来,不是则跳过去找下一个
328 | # 一直到结尾都没找到对应的结尾,说明这个 macro 无需闭合,直接添加然后正常向下继续即可
329 | flag = False
330 | for i in range(len(elements_copy) - idx):
331 | """
332 | eg: 元素可为012345
333 | 元素是2时, idx=2, len=6, i=0~3
334 | i=0时指向元素2,i=3时指向元素5
335 | 所以需要把i=0跳过
336 | """
337 | if i == 0: # 代表当前的元素,直接跳过
338 | continue
339 |
340 | tail_elem = elements_copy[idx + i]["element"]
341 | tail_type = elements_copy[idx + i]["type"]
342 | tail_end = elements_copy[idx + i]["pos_end"]
343 | if tail_type != "macro": # 要找结尾,连 macro 都不是,直接跳过
344 | continue
345 |
346 | if tail_elem.startswith(f"<<{macro_name}"): # 多加一层
347 | layer += 1
348 | continue
349 |
350 | if not tail_elem.startswith(f"<{macro_name}"): # 不是结尾,直接跳过
351 | continue
352 |
353 | if layer > 0: # 是结尾,但是不是最外层
354 | layer -= 1
355 | continue
356 |
357 | # 现在是最外层结尾了
358 | block_end = tail_end
359 | length = block_end - block_start
360 | flag = True
361 |
362 | if length > GENERAL_LIMIT: # 太长,因此不考虑这个组合,只单独把这一个 macro 元素加进去就好了,然后直接跳出
363 | _add_element(filepath, passage, content[head_start:head_end], head_start, head_end,)
364 | break
365 |
366 | # 注意添加之后,应该跳过中间这些被合并的部分,从末尾的下一个元素开始继续大循环
367 | jumped_idx = idx + i + 1
368 | _add_element(filepath, passage, content[block_start:block_end], block_start, block_end)
369 | break
370 |
371 | # 因为正常情况下,无论是符合长度的,还是不符合长度的都是 flag = True
372 | # 因此仅有不闭合的才会 flag = False,此时单独将这一个元素加入其中,然后正常循环即可
373 | if not flag:
374 | _add_element(filepath, passage, content[head_start:head_end], head_start, head_end)
375 |
376 | # 找闭合的 tag, 原理同上
377 | elif head_type == "tag":
378 | # 这个是结尾,因为正常情况下不会先遇到结尾再遇到开头,因此这是没有被合并的单个元素,直接添加
379 | if head_elem.startswith(""):
380 | _add_element(filepath, passage, content[head_start:head_end], head_start, head_end)
381 | continue
382 |
383 | tag_name = re.findall(r"^<(\w+)", head_elem)[0] # 方便判断结尾的位置
384 | layer = 0 # 可能出现 span 套 span 这类情况,因此用层数判断是否在最外层
385 | block_start = head_start
386 |
387 | # 接下来在给定 macro 开头的情况下,找到对应的 tag 结尾
388 | # 并判断字符数是否在限制以内,是的话就组合起来,不是则跳过去找下一个
389 | # 一直到结尾都没找到对应的结尾,说明这个 tag 无需闭合,直接添加然后正常向下继续即可
390 | flag = False
391 | for i in range(len(elements_copy) - idx):
392 | """
393 | eg: 元素可为012345
394 | 元素是2时, idx=2, len=6, i=0~3
395 | i=0时指向元素2,i=3时指向元素5
396 | 所以需要把i=0跳过
397 | """
398 | if i == 0: # 代表当前的元素,直接跳过
399 | continue
400 |
401 | tail_elem = elements_copy[idx + i]["element"]
402 | tail_type = elements_copy[idx + i]["type"]
403 | tail_end = elements_copy[idx + i]["pos_end"]
404 | if tail_type != "tag": # 要找结尾,连 tag 都不是,直接跳过
405 | continue
406 |
407 | if tail_elem.startswith(f"<{tag_name}"): # 多加一层
408 | layer += 1
409 | continue
410 |
411 | if not tail_elem.startswith(f"{tag_name}"): # 不是结尾,直接跳过
412 | continue
413 |
414 | if layer > 0: # 是结尾,但是不是最外层
415 | layer -= 1
416 | continue
417 |
418 | # 现在是最外层结尾了
419 | block_end = tail_end
420 | length = block_end - block_start
421 | flag = True
422 |
423 | if length > GENERAL_LIMIT: # 太长,因此不考虑这个组合,只单独把这一个 tag 元素加进去就好了,然后直接跳出
424 | _add_element(filepath, passage, content[head_start:head_end], head_start, head_end)
425 | break
426 |
427 | # 注意添加之后,应该跳过中间这些被合并的部分,从末尾的下一个元素开始继续大循环
428 | jumped_idx = idx + i + 1
429 | _add_element(filepath, passage, content[block_start:block_end], block_start, block_end)
430 | break
431 |
432 | # 因为正常情况下,无论是符合长度的,还是不符合长度的都是 flag = True
433 | # 因此仅有不闭合的才会 flag = False,此时单独将这一个元素加入其中,然后正常循环即可
434 | if not flag:
435 | _add_element(filepath, passage, content[head_start:head_end], head_start, head_end)
436 |
437 | # 只剩纯文本了,没什么好说的,直接无脑加就行
438 | else:
439 | _add_element(filepath, passage, content[head_start:head_end], head_start, head_end)
440 |
441 | logger.info("所有元素已初次组合!")
442 |
443 | # 再按照字数组合一遍
444 | def combine_twine_element_full(self):
445 | """将元素按照字数进行二次组合"""
446 | logger.info("开始二次组合所有元素……")
447 | for filepath, elements_data in self._twine_combined_elements_data.items():
448 | for passage, elements in elements_data.items():
449 | if len(elements) == 1: # 这是一整个段落都合并了的情况,直接添加然后跳出就好
450 | self._twine_combined_elements_data_flat.extend(elements)
451 | continue
452 |
453 | elements_copy = elements.copy()
454 | temp_elements = []
455 | jumped_idx = None # 判断当前元素是否是需要跳过的被合并的部分
456 | for idx, element in enumerate(elements_copy):
457 | # 合并元素的过程中需要跳过被合并的部分
458 | if jumped_idx is not None and idx != jumped_idx:
459 | continue
460 |
461 | jumped_idx = None
462 |
463 | head_start = element["pos_start"]
464 | head_end = element["pos_end"]
465 |
466 | # 要累加的
467 | full_elem = element["element"]
468 | full_length = element["length"]
469 | pos_start = head_start
470 | pos_end = head_end
471 | if full_length > GENERAL_LIMIT: # 这一个已经超了,不需要继续组合
472 | temp_elements.append(element)
473 | self._twine_combined_elements_data_flat.append(element)
474 | continue
475 |
476 | flag = False
477 | for i in range(len(elements_copy) - idx):
478 | """
479 | eg: 元素可为012345
480 | 元素是2时, idx=2, len=6, i=0~3
481 | i=0时指向元素2,i=3时指向元素5
482 | 所以需要把i=0跳过
483 | """
484 | if i == 0: # i==0 指自己,因此跳过
485 | continue
486 |
487 | tail_elem = elements_copy[idx + i]["element"]
488 | tail_end = elements_copy[idx + i]["pos_end"]
489 | tail_length = elements_copy[idx + i]["length"]
490 | full_length += tail_length
491 | if full_length > GENERAL_LIMIT: # 超了,只合并到上一处
492 | combined = {
493 | "filepath": filepath,
494 | "passage": passage,
495 | "element": full_elem,
496 | "pos_start": pos_start,
497 | "pos_end": pos_end,
498 | "length": full_length - tail_length,
499 | }
500 | temp_elements.append(combined)
501 | self._twine_combined_elements_data_flat.append(combined)
502 | # flag 用来判断在推出循环时是否加上了
503 | flag = True
504 | # 同样需要跳过被合并的部分
505 | jumped_idx = idx + i
506 | break
507 |
508 | # 确定不超限后再加上
509 | full_elem = f"{full_elem}{tail_elem}"
510 | pos_end = tail_end
511 |
512 | # 说明在退出循环时当前的元素没有添加入内
513 | if not flag:
514 | combined = {
515 | "filepath": filepath,
516 | "passage": passage,
517 | "element": full_elem,
518 | "pos_start": pos_start,
519 | "pos_end": pos_end,
520 | "length": full_length,
521 | }
522 | temp_elements.append(combined)
523 | self._twine_combined_elements_data_flat.append(combined)
524 | break # 到头了,所以直接退出循环
525 |
526 | # 用新组合后的替换掉之前的
527 | self._twine_combined_elements_data[filepath][passage] = temp_elements
528 |
529 | logger.info("所有元素已二次组合!")
530 |
531 | # 一次性函数,将旧汉化方式的译文、词条状态提取出详细信息
532 | def build_paratranz_detailed_raw_data(self):
533 | """一次性函数,将旧汉化方式的译文、词条状态提取出详细信息"""
534 | logger.info("开始生成旧汉化方式译文详细信息……")
535 |
536 | def _add_element(fp_: str, orig: str, trns: str, stg: int, info: list):
537 | data = {
538 | "filepath": fp_,
539 | "original": orig,
540 | "translation": trns,
541 | "stage": stg,
542 | "pos_info": info,
543 | }
544 | if fp_ not in self._paratranz_detailed_raw_data:
545 | self._paratranz_detailed_raw_data[fp_] = [data]
546 | else:
547 | self._paratranz_detailed_raw_data[fp_].append(data)
548 |
549 | for root, dirs, files in os.walk(DIR_PARATRANZ_EXPORT):
550 | if "失效" in root or "日志" in root or "测试" in root:
551 | continue
552 |
553 | for file in files:
554 | if ".js." in file:
555 | # TODO 对 JS 的处理
556 | continue
557 | else:
558 | filename = file.replace(".csv.json", ".twee")
559 | filepath = Path(root) / filename
560 | filepath = Path().joinpath(*filepath.parts[filepath.parts.index("raw") + 1:]).__str__()
561 |
562 | # 文件在新版里没有,可能删了或者改名了
563 | if filepath not in self._twine_passage_data:
564 | logger.warning(f"{filepath} 不存在!可能删改了!")
565 | continue
566 |
567 | with open(Path(root) / file, "r", encoding="utf-8") as fp:
568 | paratranz_raws: list[dict] = json.load(fp)
569 |
570 | for pz_raw in paratranz_raws:
571 | original = pz_raw["original"]
572 | translation = pz_raw["translation"]
573 | stage = pz_raw["stage"]
574 | pos_info = []
575 |
576 | flag = False
577 | for passage_name, passage_data in self._twine_passage_data[filepath].items():
578 | passage_body: str = passage_data["passage_body"]
579 |
580 | # 词条不在这个段落里
581 | if original not in passage_body:
582 | continue
583 |
584 | # 可能出现多次,全部找到
585 | original_re = re.escape(original)
586 | matches = re.finditer(original_re, passage_body)
587 | pos_info.extend([
588 | {
589 | "passage": passage_name,
590 | "pos_start": match.start(),
591 | "pos_end": match.end(),
592 | }
593 | for match in matches
594 | ])
595 | # 也有可能在所有段落中都找不到,这种情况下报错
596 | flag = True
597 |
598 | if not flag:
599 | logger.error(f"!找不到词条 - {original} | {filepath}")
600 |
601 | _add_element(filepath, original, translation, stage, pos_info)
602 | logger.info("旧汉化方式译文详细信息已生成!")
603 |
604 | # 一次性函数,用来将旧汉化方式的译文、词条状态迁移到新汉化方式用的
605 | def build_paratranz_combined_raw_data(self):
606 | for filepath, paratranz_detailed_datas in self._paratranz_detailed_raw_data.items():
607 | for paratranz_data in paratranz_detailed_datas:
608 | original = paratranz_data["original"]
609 | translation = paratranz_data["translation"]
610 | stage = paratranz_data["stage"]
611 | for pos_info in paratranz_data["pos_info"]:
612 | passage = pos_info["passage"]
613 | paratranz_pos_start = pos_info["pos_start"]
614 | paratranz_pos_end = pos_info["pos_end"]
615 |
616 | for element in self._twine_combined_elements_data[filepath][passage]:
617 | element_pos_start = element["pos_start"]
618 | element_pos_end = element["pos_end"]
619 | # 情况1: pz 被 elem 包住
620 | if (
621 | element_pos_start < paratranz_pos_start
622 | and element_pos_end > paratranz_pos_end
623 | ):
624 | ...
625 |
626 | # 情况2:
627 |
628 | # 修改格式为可以在 paratranz 导入的文件
629 | def build_paratranz_format(self):
630 | """将结果转为可供 paratranz 识别的格式"""
631 | logger.info("开始修改为 paratranz 格式……")
632 |
633 | def _add_element(fp: str, pg: str, k: str, orig: str, trns: str, ctx: str, stg: int):
634 | """
635 | 这是添加元素的,重复的部分提出来封装
636 | :param fp: 文件路径
637 | :param pg: 段落名
638 | :param k: 键,唯一标识
639 | :param orig: 原文
640 | :param trns: 译文
641 | :param ctx: 前后文,可不写
642 | :param stg: 未翻译 0/已翻译 1/有疑问 2/已检查 3/已审核 5/已锁定 9/已隐藏 -1
643 | """
644 | paratranz = {
645 | "key": k,
646 | "original": orig,
647 | "translation": trns,
648 | "context": ctx,
649 | "stage": stg,
650 | }
651 | if fp not in self._paratranz_elements_data:
652 | self._paratranz_elements_data[fp] = {pg: [paratranz]}
653 | elif pg not in self._paratranz_elements_data[fp]:
654 | self._paratranz_elements_data[fp][pg] = [paratranz]
655 | else:
656 | self._paratranz_elements_data[fp][pg].append(paratranz)
657 |
658 | for filepath, elements_data in self._twine_combined_elements_data.items():
659 | for passage, elements in elements_data.items():
660 | for idx, element in enumerate(elements):
661 | key = f"{filepath.replace('.twee', '')}|{passage}|{idx}"
662 | _add_element(filepath, passage, key, element["element"], "", "", 0)
663 | logger.info("已修改为 paratranz 格式!")
664 |
665 | # 导出为文件
666 | def export_data(self):
667 | """导出数据"""
668 | logger.info("开始导出所有文件……")
669 | with open(DIR_TARGET / "twine_passage_names.json", "w", encoding="utf-8") as fp:
670 | json.dump(self._twine_passage_names, fp, ensure_ascii=False, indent=2)
671 |
672 | with open(DIR_TARGET / "twine_passage_data.json", "w", encoding="utf-8") as fp:
673 | json.dump(self._twine_passage_data, fp, ensure_ascii=False, indent=2)
674 |
675 | with open(DIR_TARGET / "twine_elements_data.json", "w", encoding="utf-8") as fp:
676 | json.dump(self._twine_elements_data, fp, ensure_ascii=False, indent=2)
677 |
678 | with open(DIR_TARGET / "twine_combined_elements_data.json", "w", encoding="utf-8") as fp:
679 | json.dump(self._twine_combined_elements_data, fp, ensure_ascii=False, indent=2)
680 |
681 | with open(DIR_TARGET / "twine_combined_elements_data_flat.json", "w", encoding="utf-8") as fp:
682 | json.dump(self._twine_combined_elements_data_flat, fp, ensure_ascii=False, indent=2,)
683 |
684 | with open(DIR_TARGET / "paratranz_detailed_raw_data.json", "w", encoding="utf-8") as fp:
685 | json.dump(self._paratranz_detailed_raw_data, fp, ensure_ascii=False, indent=2)
686 |
687 | with open(DIR_TARGET / "paratranz_elements_data.json", "w", encoding="utf-8") as fp:
688 | json.dump(self._paratranz_elements_data, fp, ensure_ascii=False, indent=2)
689 | logger.info("所有文件已导出!")
690 |
691 |
692 | def main():
693 | parser = ParseTwine()
694 | parser.init_dirs()
695 | parser.parse()
696 | parser.export_data()
697 |
698 |
699 | if __name__ == "__main__":
700 | main()
701 |
702 | __all__ = ["ParseTwine"]
703 |
--------------------------------------------------------------------------------
/src/ast_javascript/__init__.py:
--------------------------------------------------------------------------------
1 | from .acorn import *
2 | import re
3 | from dataclasses import dataclass
4 | from pathlib import Path
5 | from pprint import pprint
6 | from re import Pattern
7 | from typing import Union, Any
8 | from enum import Enum
9 | from loguru import logger
10 |
11 |
12 | class Patterns(Enum):
13 | SPACE: str = " "
14 | RETURN: str = "\n"
15 | NUMBERS: str = "0123456789"
16 |
17 | STORY_START: str = ":" # :: STORY
18 |
19 | VAR_NAME_HEAD: str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
20 | VAR_NAME_BODY: str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789"
21 |
22 | WIDGET_START: str = "<"
23 | TAG_START: str = "<"
24 | TAG_SLASH: str = "/"
25 |
26 | WIDGET_END: str = ">"
27 | TAG_END: str = ">"
28 |
29 |
30 | @dataclass
31 | class Token:
32 | type: str
33 | value: Any
34 |
35 | def __repr__(self):
36 | return self.__str__()
37 |
38 | def __str__(self):
39 | return fr"({self.type}) || {self.value}"
40 |
41 |
42 | class Tokenizer:
43 | """词法分析器"""
44 | def __init__(self, raw_code: Union[str, bytes]):
45 | self._raw_code = raw_code
46 |
47 | self._tokens: list = []
48 |
49 | def tokenize(self):
50 | """str->tokens"""
51 | index: int = 0
52 | while index < self._raw_code.__len__():
53 | current_char = self._raw_code[index]
54 |
55 | # 空格
56 | if current_char == Patterns.SPACE.value:
57 | index += 1
58 | # 换行
59 | elif current_char == Patterns.RETURN.value:
60 | index += 1
61 | # 数字
62 | elif current_char in Patterns.NUMBERS.value:
63 | value = ""
64 | while current_char in Patterns.NUMBERS.value:
65 | value += current_char
66 | index += 1
67 | current_char = self._raw_code[index]
68 | self._tokens.append(Token(type="number", value=value))
69 | # 故事(:: Story)
70 | elif current_char == Patterns.STORY_START.value:
71 | if (index == 0 or self._raw_code[index-1] == Patterns.RETURN.value) and self._raw_code[index+1] == Patterns.STORY_START.value:
72 | value = ""
73 | if self._raw_code[index+2] == Patterns.SPACE.value:
74 | index += 3
75 | current_char = self._raw_code[index]
76 | while current_char != Patterns.RETURN.value:
77 | value += current_char
78 | index += 1
79 | current_char = self._raw_code[index]
80 | self._tokens.append(Token(type="story", value=value))
81 | elif current_char == Patterns.TAG_START.value:
82 | # 1.
83 | if self._raw_code[index+1] == Patterns.VAR_NAME_HEAD.value:
84 | value = ""
85 | index += 1
86 | current_char = self._raw_code[index]
87 | while current_char != Patterns.TAG_END.value:
88 | value += current_char
89 | index += 1
90 | current_char =self._raw_code[index]
91 | self._tokens.append(Token(type="tag", value=value))
92 | # 2.
93 | elif self._raw_code[index+1] == Patterns.TAG_SLASH.value:
94 | value = ""
95 | index += 2
96 | current_char = self._raw_code[index]
97 | while current_char not in {Patterns.TAG_END.value, Patterns.SPACE.value}:
98 | value += current_char
99 | index += 1
100 | current_char =self._raw_code[index]
101 | self._tokens.append(Token(type="tag_close", value=value))
102 | # 3. <>
103 | elif self._raw_code[index+1] == Patterns.WIDGET_START.value:
104 | value = ""
105 | type_ = "widget"
106 | index += 2
107 | current_char = self._raw_code[index]
108 | # 4. <>
109 | if current_char == Patterns.TAG_SLASH.value:
110 | type_ = "widget_close"
111 | index += 1
112 | current_char = self._raw_code[index]
113 | while current_char not in {Patterns.TAG_END.value, Patterns.SPACE.value}:
114 | value += current_char
115 | index += 1
116 | current_char =self._raw_code[index]
117 | self._tokens.append(Token(type=type_, value=value))
118 | # 3. 小于号
119 | # 4. <-!--
120 | else:
121 | index += 1
122 | continue
123 | else:
124 | index += 1
125 | continue
126 | return self._tokens
127 |
128 |
129 | class Parser:
130 | """语法分析器"""
131 |
132 |
133 | class Traverserer:
134 | """遍历器"""
135 |
136 |
137 | def parse(raw: str):
138 | return Tokenizer(raw).tokenize()
139 |
140 |
141 | if __name__ == '__main__':
142 | with open(Path(r"D:\GitHub\vrelnir_localization\degrees-of-lewdity-master\game\00-framework-tools\02-version\waiting-room.twee"), "r", encoding="utf-8") as fp:
143 | code = fp.read()
144 | result = parse(code)
145 | pprint(result)
146 |
--------------------------------------------------------------------------------
/src/ast_javascript/acorn.py:
--------------------------------------------------------------------------------
1 | import dukpy
2 | from src import *
3 | from dataclasses import dataclass, asdict
4 | from typing import Literal, Union, TypedDict, Callable, Any
5 | from typing_extensions import Self
6 | from types import FunctionType
7 |
8 | DIR_JS_MODULE_ROOT = DIR_DATA_ROOT / "jsmodule"
9 | DIR_ARCON_ROOT = DIR_JS_MODULE_ROOT / "acorn"
10 | SOURCE_TYPE = Literal['script', 'module']
11 | ECMA_VERSION = Literal[
12 | 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 'latest']
13 | REGISTER_FUNC = """
14 | function registerFunc(key) {
15 | return function () { var param = Array.prototype.slice.call(arguments); return call_python.call.apply(call_python, [null, key].concat(param)); };
16 | }
17 | """
18 |
19 |
20 | class Position(TypedDict):
21 | line: int
22 | column: int
23 | offset: int
24 |
25 |
26 | class TokenType(TypedDict, total=False):
27 | label: str
28 | keyword: str
29 | beforeExpr: bool
30 | startsExpr: bool
31 | isLoop: bool
32 | isAssign: bool
33 | prefix: bool
34 | postfix: bool
35 | binop: int
36 | updateContext: Callable[[Self], None] | None # Optional[] 是 3.9 之前的写法
37 |
38 |
39 | class SourceLocation(TypedDict):
40 | start: Position
41 | end: Position
42 | source: str | None
43 |
44 |
45 | class Token(TypedDict):
46 | type: TokenType
47 | value: Any
48 | start: int
49 | end: int
50 | range: list[int] | None
51 | loc: SourceLocation | None
52 |
53 |
54 | class Comment(TypedDict):
55 | type: Literal['Line', 'Block']
56 | value: str
57 | start: int
58 | end: int
59 | range: list[int] | None
60 | loc: SourceLocation | None
61 |
62 |
63 | class AcornOptionParam(TypedDict, total=False):
64 | ecmaVersion: ECMA_VERSION
65 | sourceType: SOURCE_TYPE
66 | ranges: bool
67 | preserveParens: bool
68 | locations: bool
69 | checkPrivateFields: bool
70 | allowHashBang: bool
71 | allowReturnOutsideFunction: bool
72 | allowImportExportEverywhere: bool
73 | allowReserved: bool | None
74 | allowAwaitOutsideFunction: bool | None
75 | allowSuperOutsideMethod: bool | None
76 | onInsertedSemicolon: Callable[[int, Position | None], None]
77 | onTrailingComma: Callable[[int, Position | None], None]
78 | onToken: Union[Callable[[Token], None], list[Token]]
79 | onComment: Union[Callable[[bool, str, int, int, Position | None, int, Position | None], None], list[Token]]
80 |
81 |
82 | @dataclass
83 | class AcornOption:
84 | ecmaVersion: ECMA_VERSION = 2020
85 | sourceType: SOURCE_TYPE = "script"
86 | ranges: bool = False
87 | preserveParens: bool = False
88 | locations: bool = False
89 | checkPrivateFields: bool = True
90 | allowHashBang: bool = False
91 | allowReturnOutsideFunction: bool = False
92 | allowImportExportEverywhere: bool = False
93 | allowReserved: bool | None = None
94 | allowAwaitOutsideFunction: bool | None = None
95 | allowSuperOutsideMethod: bool | None = None
96 | onToken = None
97 | onComment = None
98 |
99 | def to_dict(self):
100 | return {
101 | k: str(v) if v is not None else None
102 | for k, v in asdict(self).items()
103 | }
104 |
105 | @staticmethod
106 | def parse_option(option: AcornOptionParam = None):
107 | if option is None:
108 | option = {}
109 | _option = {}
110 | func = {}
111 | for key, value in option.items():
112 | if hasattr(AcornOption, key) and not isinstance(value, FunctionType):
113 | _option[key] = value
114 | if isinstance(value, FunctionType):
115 | func[key] = value
116 | return AcornOption(**_option).to_dict(), func
117 |
118 |
119 | @dataclass
120 | class JSSyntaxError(Exception):
121 | pos: int = -1
122 | loc: dict[str, int] = dict
123 | raisedAt: int = -1
124 | name: str = ""
125 | message: str = ""
126 |
127 | def to_string(self):
128 | return f"{self.name}:{self.message}({self.line}行:{self.column}位)"
129 |
130 | def __str__(self):
131 | return self.to_string()
132 |
133 | @property
134 | def line(self):
135 | return self.loc["line"] if "line" in self.loc else -1
136 |
137 | @property
138 | def column(self):
139 | return self.loc["column"] if "column" in self.loc else -1
140 |
141 | def err_code(self, code: list[str]):
142 | res = code[self.line - 1]
143 | column = self.column
144 | err_str = ""
145 | if column < len(res):
146 | line_str_len = len(str(self.line))
147 | line_space = (" " * line_str_len)
148 | column_space = (" " * (column + 1))
149 | err_str = f"{line_space}{column_space}~"
150 |
151 | msg = f"{self.to_string()}\n{self.line} {res}"
152 | if err_str != "":
153 | msg += "\n" + err_str
154 | return msg
155 |
156 |
157 | class Acorn:
158 | def __init__(self):
159 | self._jsi = dukpy.JSInterpreter()
160 | self._jsi.loader.register_path(DIR_JS_MODULE_ROOT / "acorn" / "dist")
161 |
162 | @property
163 | def jsi(self):
164 | return self._jsi
165 |
166 | def parse(self, code_text: str, option: AcornOptionParam = None):
167 | self.install_dep()
168 | arcon_option, func = AcornOption.parse_option(option)
169 | # print(arcon_option)
170 | code = [REGISTER_FUNC, "var acorn = require('acorn')",
171 | "function parseArcon() {var option = Object.assign({},dukpy['option']); try{"]
172 | for key, value in func.items():
173 | code.append(f"option['{key}'] = registerFunc('{key}')")
174 | self._jsi.export_function(key, value)
175 | code.extend((
176 | "var result =acorn.parse(dukpy['code_text'], option);return result}catch (e){if (!(e instanceof SyntaxError)) throw e;var err = Object.assign({}, e); err.name = e.name;err.message = e.message;return err;}}",
177 | "parseArcon()"))
178 | result = self._jsi.evaljs(code, code_text=code_text, option=arcon_option)
179 | if "name" in result and result["name"] == 'SyntaxError':
180 | raise JSSyntaxError(**result)
181 | return result
182 |
183 | @staticmethod
184 | def install_dep():
185 | if not DIR_ARCON_ROOT.exists():
186 | dukpy.install_jspackage("acorn", None, DIR_ARCON_ROOT)
187 |
188 |
189 | __all__ = [
190 | "DIR_JS_MODULE_ROOT",
191 | "DIR_ARCON_ROOT",
192 |
193 | "AcornOptionParam",
194 | "AcornOption",
195 | "Acorn",
196 |
197 | "Position",
198 | "TokenType",
199 | "SourceLocation",
200 | "Token",
201 | "Comment",
202 | "JSSyntaxError"
203 |
204 | ]
205 |
--------------------------------------------------------------------------------
/src/consts.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from enum import Enum
3 | from dotenv import load_dotenv
4 |
5 | import os
6 | import platform
7 | import sys
8 |
9 | load_dotenv()
10 | """当前系统"""
11 | PLATFORM_SYSTEM = platform.system()
12 | PLATFORM_ARCHITECTURE = platform.architecture()[0]
13 | SYSTEM_ARGV = sys.argv
14 | GITHUB_ACTION_DEV = len(SYSTEM_ARGV) > 1 and SYSTEM_ARGV[1] == "-D"
15 | GITHUB_ACTION_ISBETA = len(SYSTEM_ARGV) > 2 and SYSTEM_ARGV[2] == "beta"
16 |
17 | """汉化仓库"""
18 | PARATRANZ_TOKEN = os.getenv("PARATRANZ_TOKEN") or "" # 必填,在个人设置里
19 | PARATRANZ_BASE_URL = "https://paratranz.cn/api"
20 | PARATRANZ_HEADERS = {"Authorization": PARATRANZ_TOKEN}
21 | PARATRANZ_PROJECT_DOL_ID = 4780 # DOL 项目 ID
22 | CHINESE_VERSION = os.getenv("CHINESE_VERSION") or "" # 必填,参考 README
23 | SOURCE_TYPE = os.getenv("SOURCE_TYPE") or "common" # 必填,common 或 dev
24 |
25 | """Modloader"""
26 | REPOSITORY_MODLOADER_ARTIFACTS = "https://api.github.com/repos/Lyoko-Jeremie/DoLModLoaderBuild/actions/artifacts"
27 | GITHUB_ACCESS_TOKEN = os.getenv("GITHUB_ACCESS_TOKEN") or ""
28 |
29 | """源代码仓库"""
30 | REPOSITORY_URL_COMMON = "https://gitgud.io/Vrelnir/degrees-of-lewdity"
31 | REPOSITORY_ZIP_URL_COMMON = "https://gitgud.io/Vrelnir/degrees-of-lewdity/-/archive/master/degrees-of-lewdity-master.zip"
32 | REPOSITORY_COMMITS_URL_COMMON = "https://gitgud.io/api/v4/projects/8430/repository/commits"
33 | REPOSITORY_URL_DEV = "https://gitgud.io/Vrelnir/degrees-of-lewdity"
34 | REPOSITORY_ZIP_URL_DEV = "https://gitgud.io/Vrelnir/degrees-of-lewdity/-/archive/dev/degrees-of-lewdity-dev.zip"
35 |
36 | """本地目录"""
37 | DIR_ROOT = Path(__file__).parent.parent
38 | DIR_DATA_ROOT = DIR_ROOT / "data"
39 | DIR_JSON_ROOT = DIR_DATA_ROOT / "json"
40 | DIR_TEMP_ROOT = DIR_DATA_ROOT / "temp"
41 | DIR_MODS_ROOT = DIR_DATA_ROOT / "mods"
42 |
43 | DIR_GAME_ROOT_COMMON_NAME = "degrees-of-lewdity-master"
44 | DIR_GAME_ROOT_COMMON = DIR_ROOT / DIR_GAME_ROOT_COMMON_NAME
45 | DIR_GAME_TEXTS_COMMON = DIR_GAME_ROOT_COMMON / "game"
46 | DIR_GAME_CSS_COMMON = DIR_GAME_ROOT_COMMON / "modules" / "css"
47 | DIR_GAME_ANDROID_ROOT_COMMON = DIR_GAME_ROOT_COMMON / "devTools" / "androidsdk" / "image" / "cordova"
48 |
49 | DIR_GAME_ROOT_DEV_NAME = "degrees-of-lewdity-dev"
50 | DIR_GAME_ROOT_DEV = DIR_ROOT / DIR_GAME_ROOT_DEV_NAME
51 | DIR_GAME_TEXTS_DEV = DIR_GAME_ROOT_DEV / "game"
52 | DIR_GAME_CSS_DEV = DIR_GAME_ROOT_DEV / "modules" / "css"
53 | DIR_GAME_ANDROID_ROOT_DEV = DIR_GAME_ROOT_DEV / "devTools" / "androidsdk" / "image" / "cordova"
54 |
55 | DIR_RAW_DICTS = DIR_DATA_ROOT / "raw_dicts"
56 |
57 | DIR_PARATRANZ = DIR_DATA_ROOT / "paratranz"
58 |
59 | """文件"""
60 | FILE_REPOSITORY_ZIP = DIR_TEMP_ROOT / "dol.zip"
61 | FILE_PARATRANZ_ZIP = DIR_TEMP_ROOT / "paratranz_export.zip"
62 | FILE_COMMITS = DIR_JSON_ROOT / "commits.json"
63 | FILE_MODS = DIR_JSON_ROOT / "mod.json"
64 | FILE_VERSION_EDIT_COMMON = DIR_GAME_TEXTS_COMMON / "01-config" / "sugarcubeConfig.js"
65 |
66 | SUFFIX_TWEE = ".twee"
67 | SUFFIX_JS = ".js"
68 |
69 |
70 | class DirNamesTwee(Enum):
71 | """特殊的目录名"""
72 |
73 | FRAMEWORK = "00-framework-tools"
74 | CONFIG = "01-config"
75 | JAVASCRIPT = "03-JavaScript"
76 | VARIABLES = "04-Variables"
77 | BASE_CLOTHING = "base-clothing"
78 | BASE_COMBAT = "base-combat"
79 | BASE_DEBUG = "base-debug"
80 | BASE_HAIR = "base-hair"
81 | BASE_SYSTEM = "base-system"
82 | FLAVOUR_TEXT_GENERATORS = "flavour-text-generators"
83 |
84 | OVERWORLD = "overworld-"
85 |
86 | LOCATION = "loc-"
87 | SPECIAL = "special-"
88 | NORMAL = OVERWORLD or LOCATION or SPECIAL
89 |
90 |
91 | class FileNamesTwee(Enum):
92 | """特殊的文件名"""
93 |
94 | """ 00-framework-tools """
95 | WAITING_ROOM_FULL = "waiting-room.twee" # FULL 代表这就是文件名
96 |
97 | """ 01-config """
98 | START_FULL = "start.twee"
99 | VERSION_INFO_FULL = "versionInfo.twee"
100 |
101 | """ 04-Variables """
102 | CANVASMODEL_FULL = "canvasmodel-example.twee"
103 | PASSAGE_FOOTER_FULL = "variables-passageFooter.twee"
104 | VERSION_UPDATE_FULL = "variables-versionUpdate.twee"
105 | PREGNANCY_VAR_FULL = "pregnancyVar.twee"
106 | VARIABLES_STATIC_FULL = "variables-static.twee"
107 |
108 | """ base-clothing """
109 | CAPTIONTEXT_FULL = "captiontext.twee"
110 | CLOTHING = "clothing-" # 没有FULL代表文件名中包含这个文本
111 | CLOTHING_SETS_FULL = "clothing-sets.twee"
112 | CLOTHING_IMAGES_FULL = "images.twee"
113 | INIT_FULL = "init.twee"
114 | WARDROBES_FULL = "wardrobes.twee"
115 |
116 | """ base-combat """
117 | ACTIONS_FULL = "actions.twee"
118 | ACTIONS = "actions"
119 | STALK_FULL = "stalk.twee"
120 | GENERATION = "generation.twee"
121 | TENTACLE_ADV_FULL = "tentacle-adv.twee"
122 | TENTACLES_FULL = "tentacles.twee"
123 | COMBAT_EFFECTS_FULL = "effects.twee"
124 | NPC_DAMAGE_FULL = "npc-damage.twee"
125 | NPC_GENERATION_FULL = "npc-generation.twee"
126 | SPEECH_SYDNEY_FULL = "speech-sydney.twee"
127 | SPEECH_FULL = "speech.twee"
128 | STRUGGLE_FULL = "struggle.twee"
129 | SWARMS_FULL = "swarms.twee"
130 | SWARM_EFFECTS_FULL = "swarm-effects.twee"
131 | COMBAT_WIDGETS_FULL = "widgets.twee"
132 | COMBAT_IMAGES_FULL = "images.twee"
133 |
134 | """ base-hair """
135 | HAIR_STYLES_FULL = "hair-styles.twee"
136 |
137 | """ base-system """
138 | CHARACTERISTICS_FULL = "characteristics.twee"
139 | SOCIAL_FULL = "social.twee"
140 | TRAITS_FULL = "traits.twee"
141 | BODYWRITING_FULL = "bodywriting.twee"
142 | BODYWRITING_OBJECTS_FULL = "bodywriting-objects.twee"
143 | CAPTION_FULL = "caption.twee"
144 | DEVIANCY_FULL = "deviancy.twee"
145 | SYSTEM_EXHIBITIONISM_FULL = "exhibitionism.twee"
146 | FAME_FULL = "fame.twee"
147 | FEATS_FULL = "feats.twee"
148 | MOBILE_STATS_FULL = "mobileStats.twee"
149 | NAME_LIST_FULL = "name-list.twee"
150 | NAMED_NPCS_FULL = "named-npcs.twee"
151 | NICKNAMES_FULL = "nicknames.twee"
152 | PLANT_OBJECTS_FULL = "plant-objects.twee"
153 | PROMISCUITY_FULL = "promiscuity.twee"
154 | RADIO_FULL = "radio.twee"
155 | SETTINGS_FULL = "settings.twee"
156 | SKILL_DIFFICULTIES_FULL = "skill-difficulties.twee"
157 | SLEEP_FULL = "sleep.twee"
158 | STAT_CHANGES_FULL = "stat-changes.twee"
159 | TENDING_FULL = "tending.twee"
160 | TEXT_FULL = "text.twee"
161 | TIME_FULL = "time.twee"
162 | TIPS_FULL = "tips.twee"
163 | TRANSFORMATIONS_FULL = "transformations.twee"
164 | SYSTEM_WIDGETS_FULL = "widgets.twee"
165 | PERSISTENT_NPCS_FULL = "persistent-npcs.twee"
166 | JOURNAL_FULL = "journal.twee"
167 |
168 | """ flavour-text-generators """
169 | BODY_COMMENTS_FULL = "body-comments.twee"
170 | EXHIBITIONISM_FULL = "exhibitionism.twee"
171 | EZ_THESAURUS_FULL = "ez-thesaurus.twee"
172 |
173 |
174 | class DirNamesJS(Enum):
175 | """要抓的 JS 目录"""
176 |
177 | SETUP = "01-setup"
178 | HELPERS = "02-Helpers"
179 | JAVASCRIPT = "03-JavaScript"
180 | VARIABLES = "04-Variables"
181 | SPECIAL_MASTURBATION = "special-masturbation"
182 | PREGNANCY = "04-Pregnancy"
183 | TIME = "time"
184 | TEMPLATES = "03-Templates"
185 | EXTERNAL = "external"
186 | BASE_SYSTEM = "base-system"
187 | BASE_CLOTHING = "base-clothing"
188 | MAIN = "01-main"
189 | RENDERER = "05-renderer"
190 |
191 |
192 | class FileNamesJS(Enum):
193 | """要抓的 JS 文件"""
194 | """01-setup"""
195 | WEATHER_DESCRIPTION_FULL = "weather-descriptions.js"
196 |
197 | """02-Helpers"""
198 | MACROS_FULL = "macros.js"
199 |
200 | """ 03-JavaScript """
201 | BASE_FULL = "base.js"
202 | BEDROOM_PILLS_FULL = "bedroom-pills.js"
203 | DEBUG_MENU_FULL = "debug-menu.js"
204 | EYES_RELATED = "eyes-related.js"
205 | FURNITURE_FULL = "furniture.js"
206 | INGAME_FULL = "ingame.js"
207 | SEXSHOP_MENU_FULL = "sexShopMenu.js"
208 | SEXTOY_INVENTORY_FULL = "sexToysInventory.js"
209 | UI_FULL = "ui.js"
210 | NPC_COMPRESSOR_FULL = "npc-compressor.js"
211 | COLOUR_NAMER_FULL = "colour-namer.js"
212 | CLOTHING_SHOP_V2_FULL = "clothing-shop-v2.js"
213 | TIME_FULL = "time.js"
214 | TIME_MACROS_FULL = "time-macros.js"
215 | SAVE_FULL = "save.js"
216 |
217 | """ 04-variables """
218 | COLOURS_FULL = "colours.js"
219 | FEATS_FULL = "feats.js"
220 | SHOP_FULL = "shop.js"
221 | PLANT_SETUP_FULL = "plant-setup.js"
222 |
223 | """ special-masturbation """
224 | ACTIONS_FULL = "actions.js"
225 | EFFECTS_FULL = "effects.js"
226 | MACROS_MASTURBATION_FULL = "macros-masturbation.js"
227 |
228 | """ 04-Pregnancy """
229 | CHILDREN_STORY_FUNCTIONS_FULL = "children-story-functions.js"
230 | PREGNANCY_FULL = "pregnancy.js"
231 | STORY_FUNCTIONS_FULL = "story-functions.js"
232 | PREGNANCY_TYPES_FULL = "pregnancy-types.js"
233 |
234 | """ 03-Templates """
235 | T_MISC_FULL = "t-misc.js"
236 | T_ACTIONS_FULL = "t-actions.js"
237 | T_BODYPARTS_FULL = "t-bodyparts.js"
238 |
239 | """ external """
240 | COLOR_NAMER_FULL = "color-namer.js"
241 |
242 | """ base-system """
243 | EFFECT_FULL = "effect.js"
244 | TEXT_FULL = "text.js"
245 | WIDGETS_FULL = "widgets.js"
246 | STAT_CHANGES_FULL = "stat-changes.js"
247 |
248 | """ base-clothing """
249 | UDPATE_CLOTHES_FULL = "update-clothes.js"
250 | CLOTHING = "clothing-"
251 |
252 | """ 01-main """
253 | TOOLTIPS = "02-tooltips.js"
254 |
255 | """ 05-renderer """
256 | CANVASMODEL_EDITOR_FULL = "30-canvasmodel-editor.js"
257 |
258 |
259 | __all__ = [
260 | "SYSTEM_ARGV",
261 | "GITHUB_ACTION_DEV",
262 | "GITHUB_ACTION_ISBETA",
263 | "PLATFORM_SYSTEM",
264 | "PLATFORM_ARCHITECTURE",
265 | "PARATRANZ_BASE_URL",
266 | "PARATRANZ_HEADERS",
267 | "PARATRANZ_TOKEN",
268 | "PARATRANZ_PROJECT_DOL_ID",
269 | "CHINESE_VERSION",
270 | "SOURCE_TYPE",
271 | "REPOSITORY_URL_COMMON",
272 | "REPOSITORY_ZIP_URL_COMMON",
273 | "REPOSITORY_COMMITS_URL_COMMON",
274 | "REPOSITORY_URL_DEV",
275 | "REPOSITORY_ZIP_URL_DEV",
276 | "DIR_ROOT",
277 | "DIR_DATA_ROOT",
278 | "DIR_JSON_ROOT",
279 | "DIR_TEMP_ROOT",
280 | "DIR_MODS_ROOT",
281 | "DIR_GAME_ROOT_COMMON_NAME",
282 | "DIR_GAME_ROOT_COMMON",
283 | "DIR_GAME_TEXTS_COMMON",
284 | "DIR_GAME_CSS_COMMON",
285 | "DIR_GAME_ANDROID_ROOT_COMMON",
286 | "DIR_GAME_ROOT_DEV_NAME",
287 | "DIR_GAME_ROOT_DEV",
288 | "DIR_GAME_TEXTS_DEV",
289 | "DIR_GAME_CSS_DEV",
290 | "DIR_GAME_ANDROID_ROOT_DEV",
291 | "DIR_RAW_DICTS",
292 | "DIR_PARATRANZ",
293 | "FILE_REPOSITORY_ZIP",
294 | "FILE_PARATRANZ_ZIP",
295 | "FILE_COMMITS",
296 | "FILE_MODS",
297 | "FILE_VERSION_EDIT_COMMON",
298 | "SUFFIX_TWEE",
299 | "SUFFIX_JS",
300 | "DirNamesTwee",
301 | "FileNamesTwee",
302 | "DirNamesJS",
303 | "FileNamesJS",
304 |
305 | "REPOSITORY_MODLOADER_ARTIFACTS",
306 | "GITHUB_ACCESS_TOKEN"
307 | ]
308 |
--------------------------------------------------------------------------------
/src/download.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from aiofiles import open as aopen
3 | import httpx
4 |
5 | from .log import logger
6 |
7 |
8 | async def chunk_split(filesize: int, chunk: int = 2) -> list[list[int]]:
9 | """给大文件切片"""
10 | step = filesize // chunk
11 | arr = range(0, filesize, step)
12 | result = [[arr[i], arr[i + 1] - 1] for i in range(len(arr) - 1)]
13 | result[-1][-1] = filesize - 1
14 | # logger.info(f"chunks: {result}")
15 | return result
16 |
17 |
18 | async def chunk_download(
19 | url: str,
20 | client: httpx.AsyncClient,
21 | start: int,
22 | end: int,
23 | idx: int,
24 | full: int,
25 | save_path: Path,
26 | headers_: dict = None,
27 | ):
28 | """切片下载"""
29 | if not save_path.exists():
30 | with open(save_path, "wb") as _:
31 | ...
32 | headers = (
33 | {"Range": f"bytes={start}-{end}"} | headers_
34 | if headers_
35 | else {"Range": f"bytes={start}-{end}"}
36 | )
37 | response = await client.get(url, headers=headers, follow_redirects=True, timeout=60)
38 | async with aopen(save_path, "rb+") as fp:
39 | await fp.seek(start)
40 | await fp.write(response.content)
41 | logger.info(f"\t- 切片 {idx + 1} / {full} 已下载")
42 |
43 |
44 | __all__ = ["chunk_split", "chunk_download"]
45 |
--------------------------------------------------------------------------------
/src/log.py:
--------------------------------------------------------------------------------
1 | from loguru import logger as logger_
2 | import sys
3 |
4 | logger_.remove()
5 | logger_.add(sys.stdout, format="{time:HH:mm:ss} | [{level}] | {message}", colorize=True)
6 |
7 | logger = logger_
8 |
9 | __all__ = ["logger"]
10 |
--------------------------------------------------------------------------------
/src/paratranz.py:
--------------------------------------------------------------------------------
1 | from zipfile import ZipFile, BadZipfile
2 |
3 | import os
4 | import contextlib
5 | import httpx
6 |
7 | from .consts import *
8 | from .log import logger
9 |
10 |
11 | class Paratranz:
12 | """下载汉化包相关"""
13 | def __init__(self, type_: str = "common"):
14 | self._type = type_
15 | self._project_id = PARATRANZ_PROJECT_DOL_ID
16 | self._mention_name = "" if self._type == "common" else "dev"
17 |
18 | async def download_from_paratranz(self) -> bool:
19 | """从 paratranz 下载汉化包"""
20 | os.makedirs(DIR_PARATRANZ, exist_ok=True)
21 | with contextlib.suppress(httpx.TimeoutException):
22 | await self.trigger_export()
23 |
24 | async with httpx.AsyncClient(verify=False) as client:
25 | flag = False
26 | for _ in range(3):
27 | try:
28 | await self.download_export(client)
29 | await self.unzip_export()
30 | except (httpx.ConnectError, httpx.TimeoutException, BadZipfile) as e:
31 | continue
32 | else:
33 | flag = True
34 | break
35 | if not flag:
36 | logger.error(f"***** 无法正常下载 Paratranz {self._mention_name}汉化包!请检查网络连接情况,以及是否填写了正确的 TOKEN!\n")
37 | return False
38 | return True
39 |
40 | async def trigger_export(self):
41 | """触发导出"""
42 | logger.info(f"===== 开始导出{self._mention_name}汉化文件 ...")
43 | url = f"{PARATRANZ_BASE_URL}/projects/{self._project_id}/artifacts"
44 | httpx.post(url, headers=PARATRANZ_HEADERS, verify=False)
45 | logger.info(f"##### {self._mention_name}汉化文件已导出 !\n")
46 |
47 | async def download_export(self, client: httpx.AsyncClient):
48 | """下载文件"""
49 | logger.info(f"===== 开始下载{self._mention_name}汉化文件 ...")
50 | url = f"{PARATRANZ_BASE_URL}/projects/{self._project_id}/artifacts/download"
51 | headers = PARATRANZ_HEADERS
52 | content = (await client.get(url, headers=headers, follow_redirects=True)).content
53 | with open(DIR_TEMP_ROOT / f"paratranz_export{self._mention_name}.zip", "wb") as fp:
54 | fp.write(content)
55 | logger.info(f"##### {self._mention_name}汉化文件已下载 !\n")
56 |
57 | async def unzip_export(self):
58 | """解压"""
59 | logger.info(f"===== 开始解压{self._mention_name}汉化文件 ...")
60 | with ZipFile(DIR_TEMP_ROOT / f"paratranz_export{self._mention_name}.zip") as zfp:
61 | zfp.extractall(DIR_PARATRANZ / self._type)
62 | logger.info(f"##### {self._mention_name}汉化文件已解压 !\n")
63 |
64 |
65 | __all__ = [
66 | "Paratranz"
67 | ]
68 |
69 |
--------------------------------------------------------------------------------
/src/project_dol.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import csv
3 | import datetime
4 | import re
5 | import os
6 | import platform
7 | from .ast_javascript import Acorn, JSSyntaxError
8 | from typing import Any
9 | from urllib.parse import quote
10 | from zipfile import ZipFile as zf, ZIP_DEFLATED
11 | from aiofiles import open as aopen
12 | from pathlib import Path
13 |
14 | import asyncio
15 | import json
16 | import httpx
17 | import shutil
18 | import subprocess
19 | import time
20 | import webbrowser
21 | import stat
22 |
23 | from .consts import *
24 | from .log import logger
25 | from .parse_text import *
26 | from .download import *
27 |
28 | LOGGER_COLOR = logger.opt(colors=True)
29 |
30 |
31 | class ProjectDOL:
32 | """本地化主类"""
33 | def __init__(self, type_: str = "common"):
34 | with open(DIR_JSON_ROOT / "blacklists.json", "r", encoding="utf-8") as fp:
35 | self._blacklists: dict[str, list] = json.load(fp)
36 |
37 | with open(DIR_JSON_ROOT / "whitelists.json", "r", encoding="utf-8") as fp:
38 | self._whitelists: dict[str, list] = json.load(fp)
39 |
40 | self._type: str = type_
41 | self._version: str = None
42 | self._mention_name = "" if self._type == "common" else "dev"
43 | self._commit: dict[str, Any] = None
44 | self._acorn = Acorn()
45 | if FILE_COMMITS.exists():
46 | with open(FILE_COMMITS, "r", encoding="utf-8") as fp:
47 | self._commit: dict[str, Any] = json.load(fp)
48 |
49 | self._is_latest = False
50 | self._paratranz_file_lists: list[Path] = None
51 | self._raw_dicts_file_lists: list[Path] = None
52 | self._game_texts_file_lists: list[Path] = None
53 |
54 | def _init_dirs(self, version: str):
55 | """创建目标文件夹"""
56 | os.makedirs(DIR_TEMP_ROOT, exist_ok=True)
57 | os.makedirs(DIR_RAW_DICTS / self._type / version / "csv", exist_ok=True)
58 |
59 | """ 获取最新版本 """
60 | async def fetch_latest_version(self, is_quiet: bool = True):
61 | async with httpx.AsyncClient(verify=False) as client:
62 | if self._type == "common":
63 | url = f"{REPOSITORY_URL_COMMON}/-/raw/master/version"
64 | else:
65 | url = f"{REPOSITORY_URL_DEV}/-/raw/dev/version"
66 | response = await client.get(url)
67 | if not is_quiet:
68 | logger.info(f"当前{self._mention_name}仓库最新版本: {response.text}")
69 | self._version = response.text
70 | self._init_dirs(self._version)
71 |
72 | """ 下载源码 """
73 | async def download_from_gitgud(self):
74 | """从 gitgud 下载源仓库文件"""
75 | if not self._version:
76 | await self.fetch_latest_version()
77 | if self._is_latest: # 下载慢,是最新就不要重复下载了
78 | dol_path_zip = DIR_ROOT / f"dol{self._mention_name}.zip"
79 | if dol_path_zip.exists():
80 | with contextlib.suppress(shutil.Error, FileNotFoundError):
81 | shutil.move(dol_path_zip, DIR_TEMP_ROOT)
82 | await self.unzip_latest_repository()
83 | return
84 | await self.fetch_latest_repository()
85 | await self.unzip_latest_repository()
86 |
87 | async def fetch_latest_repository(self):
88 | """获取最新仓库内容"""
89 | logger.info(f"===== 开始获取最新{self._mention_name}仓库内容 ...")
90 | async with httpx.AsyncClient(verify=False) as client:
91 | if self._type == "common":
92 | zip_url = REPOSITORY_ZIP_URL_COMMON
93 | else:
94 | zip_url = REPOSITORY_ZIP_URL_DEV
95 | flag = False
96 | for _ in range(3):
97 | try:
98 | response = await client.head(zip_url, timeout=60, follow_redirects=True)
99 | filesize = int(response.headers["Content-Length"])
100 | chunks = await chunk_split(filesize, 64)
101 | except (httpx.ConnectError, KeyError) as e:
102 | continue
103 | else:
104 | flag = True
105 | break
106 |
107 | if not flag:
108 | logger.error("***** 无法正常下载最新仓库源码!请检查你的网络连接是否正常!")
109 | tasks = [
110 | chunk_download(zip_url, client, start, end, idx, len(chunks), DIR_TEMP_ROOT / f"dol{self._mention_name}.zip")
111 | for idx, (start, end) in enumerate(chunks)
112 | ]
113 | await asyncio.gather(*tasks)
114 | logger.info(f"##### 最新{self._mention_name}仓库内容已获取! \n")
115 |
116 | async def unzip_latest_repository(self):
117 | """解压到本地"""
118 | logger.info(f"===== 开始解压{self._mention_name}最新仓库内容 ...")
119 | with zf(DIR_TEMP_ROOT / f"dol{self._mention_name}.zip") as zfp:
120 | zfp.extractall(DIR_ROOT)
121 | logger.info(f"##### 最新{self._mention_name}仓库内容已解压! \n")
122 |
123 | async def patch_format_js(self):
124 | """汉化 format.js"""
125 | logger.info(f"===== 开始替换 format.js ...")
126 | shutil.copyfile(
127 | DIR_DATA_ROOT / "jsmodule" / "format.js",
128 | DIR_GAME_ROOT_COMMON / "devTools" / "tweego" / "storyFormats" / "sugarcube-2" / "format.js"
129 | )
130 | logger.info(f"##### format.js 已替换!\n")
131 |
132 | """ 创建生肉词典 """
133 | async def create_dicts(self):
134 | """创建字典"""
135 | await self._fetch_all_text_files()
136 | await self._create_all_text_files_dir()
137 | await self._process_texts()
138 |
139 | async def _fetch_all_text_files(self):
140 | """获取所有文本文件"""
141 | logger.info(f"===== 开始获取{self._mention_name}所有文本文件位置 ...")
142 | self._game_texts_file_lists = []
143 | if self._type == "common":
144 | texts_dir = DIR_GAME_TEXTS_COMMON
145 | else:
146 | texts_dir = DIR_GAME_TEXTS_DEV
147 | for root, dir_list, file_list in os.walk(texts_dir):
148 | dir_name = Path(root).absolute().name
149 | for file in file_list:
150 | if not file.endswith(SUFFIX_TWEE):
151 | if not file.endswith(SUFFIX_JS):
152 | continue
153 |
154 | if dir_name in self._whitelists and file in self._whitelists[dir_name]:
155 | self._game_texts_file_lists.append(Path(root).absolute() / file)
156 | continue
157 |
158 | if dir_name not in self._blacklists:
159 | self._game_texts_file_lists.append(Path(root).absolute() / file)
160 | elif (
161 | not self._blacklists[dir_name]
162 | or file in self._blacklists[dir_name]
163 | ):
164 | continue
165 | else:
166 | self._game_texts_file_lists.append(Path(root).absolute() / file)
167 |
168 | logger.info(f"##### {self._mention_name}所有文本文件位置已获取 !\n")
169 |
170 | async def _create_all_text_files_dir(self):
171 | """创建目录防报错"""
172 | if not self._version:
173 | await self.fetch_latest_version()
174 | if self._type == "common":
175 | dir_name = DIR_GAME_ROOT_COMMON_NAME
176 | else:
177 | dir_name = DIR_GAME_ROOT_DEV_NAME
178 | for file in self._game_texts_file_lists:
179 | target_dir = file.parent.parts[file.parts.index(dir_name)+1:]
180 | target_dir_csv = (DIR_RAW_DICTS / self._type / self._version / "csv").joinpath(*target_dir)
181 | if not target_dir_csv.exists():
182 | os.makedirs(target_dir_csv, exist_ok=True)
183 | target_dir_json = (DIR_RAW_DICTS / self._type / self._version / "json").joinpath(*target_dir)
184 | if not target_dir_json.exists():
185 | os.makedirs(target_dir_json, exist_ok=True)
186 |
187 | async def _process_texts(self):
188 | """处理翻译文本为键值对"""
189 | logger.info(f"===== 开始处理{self._mention_name}翻译文本为键值对 ...")
190 | tasks = [
191 | self._process_for_gather(idx, file)
192 | for idx, file in enumerate(self._game_texts_file_lists)
193 | ]
194 | await asyncio.gather(*tasks)
195 | logger.info(f"##### {self._mention_name}翻译文本已处理为键值对 ! \n")
196 |
197 | async def _process_for_gather(self, idx: int, file: Path):
198 | target_file = Path().joinpath(*file.parts[file.parts.index("game")+1:]).with_suffix("")
199 |
200 | with open(file, "r", encoding="utf-8") as fp:
201 | lines = fp.readlines()
202 | with open(file, "r", encoding="utf-8") as fp:
203 | content = fp.read()
204 | if file.name.endswith(SUFFIX_TWEE):
205 | pt = ParseTextTwee(lines, file)
206 | pre_bool_list = pt.pre_parse_set_run()
207 | elif file.name.endswith(SUFFIX_JS):
208 | pt = ParseTextJS(lines, file)
209 | target_file = f"{target_file}.js"
210 | else:
211 | return
212 | able_lines = pt.parse()
213 | if file.name.endswith(SUFFIX_TWEE) and pt.pre_bool_list:
214 | able_lines = [
215 | True if pre_bool_list[idx] or line else False
216 | for idx, line in enumerate(able_lines)
217 | ]
218 |
219 | if not any(able_lines):
220 | logger.warning(f"\t- ***** 文件 {file} 无有效翻译行 !")
221 | return
222 | try:
223 | results_lines_csv = [
224 | (f"{idx_ + 1}_{'_'.join(self._version[2:].split('.'))}|", _.strip())
225 | for idx_, _ in enumerate(lines)
226 | if able_lines[idx_]
227 | ]
228 | results_lines_json = await self._build_json_results_with_passage(lines, able_lines, content, file.__str__().split("\\game\\")[-1].split("/game/")[-1])
229 | except IndexError:
230 | logger.error(f"lines: {len(lines)} - parsed: {len(able_lines)}| {file}")
231 | results_lines_csv = None
232 | results_lines_json = None
233 | if results_lines_csv:
234 | with open(DIR_RAW_DICTS / self._type / self._version / "csv" / "game" / f"{target_file}.csv", "w", encoding="utf-8-sig", newline="") as fp:
235 | csv.writer(fp).writerows(results_lines_csv)
236 | if results_lines_json:
237 | with open(DIR_RAW_DICTS / self._type / self._version / "json" / "game" / f"{target_file}.json", "w", encoding="utf-8", newline="") as fp:
238 | json.dump(results_lines_json, fp, ensure_ascii=False, indent=2)
239 | # logger.info(f"\t- ({idx + 1} / {len(self._game_texts_file_lists)}) {target_file} 处理完毕")
240 |
241 | async def _build_json_results_with_passage(self, lines: list[str], able_lines: list[bool], content: str, file: str) -> list[dict]:
242 | """导出成带 passage 注释的行文本"""
243 | results_lines_json = []
244 | passage_name = None
245 | pos_relative = None
246 | pos_global = 0
247 | for idx, line in enumerate(lines):
248 | if line.startswith("::"):
249 | pos_relative = 0
250 | tmp_ = line.lstrip(":: ")
251 | if "[" not in line:
252 | passage_name = tmp_.strip()
253 | else:
254 | for idx_, char in enumerate(tmp_):
255 | if char != "[":
256 | continue
257 | passage_name = tmp_[:idx_].strip()
258 | break
259 | else:
260 | raise
261 |
262 | if able_lines[idx]:
263 | pos_start = 0
264 | if line != line.lstrip(): # 前面的 \t \s 也要算上
265 | for char in line:
266 | if char == line.strip()[0]:
267 | break
268 | pos_start += 1
269 | results_lines_json.append({
270 | "passage": passage_name, # 非 twee 文件为 null
271 | "filepath": file,
272 | "key": f"{idx + 1}_{'_'.join(self._version[2:].split('.'))}|",
273 | "original": line.strip(),
274 | "translation": "",
275 | "pos": pos_relative + pos_start if pos_relative is not None else pos_global + pos_start # 非 twee 文件为 null
276 | })
277 | if content[pos_global + pos_start] != line.lstrip()[0]:
278 | logger.error(f"pos可能不对!{file} | {passage_name} | {line}".replace("\t", "\\t").replace("\n", "\\n"))
279 | if pos_relative is not None and not line.startswith("::"):
280 | pos_relative += len(line)
281 | pos_global += len(line)
282 | return results_lines_json
283 |
284 | """ 去重生肉词典 """
285 | async def shear_off_repetition(self):
286 | """目前仅限世扩"""
287 | logger.info(f"===== 开始去重{self._mention_name}文本 ...")
288 | # 不要对原版调用去重
289 | if self._type == "common":
290 | raise Exception("不要对原版调用去重")
291 |
292 | for root, dir_list, file_list in os.walk(DIR_RAW_DICTS / self._type / self._version / "csv" / "game"):
293 | if "失效词条" in root:
294 | continue
295 |
296 | for file in file_list:
297 | common_file_path = DIR_PARATRANZ / "common" / "utf8" / Path().joinpath(*(Path(root) / file).split("game//")[1])
298 | if not common_file_path.exists():
299 | continue
300 | mod_file_path = Path(root) / file
301 |
302 | with open(mod_file_path, "r", encoding="utf-8") as fp:
303 | mod_data = list(csv.reader(fp))
304 |
305 | with open(common_file_path, "r", encoding="utf-8") as fp:
306 | common_data = list(csv.reader(fp))
307 | common_ens: dict = {
308 | row[-2] if len(row) > 2 else row[1]: idx_
309 | for idx_, row in enumerate(common_data)
310 | } # 旧英文: 旧英文行键
311 |
312 | # mod 中的键也在原版中,直接删掉
313 | for idx, row in enumerate(mod_data.copy()):
314 | if row[-1] in common_ens:
315 | mod_data[idx] = None
316 |
317 | mod_data = [_ for _ in mod_data if _]
318 | if not mod_data:
319 | os.remove(mod_file_path)
320 | continue
321 |
322 | with open(mod_file_path, "w", encoding="utf-8-sig", newline="") as fp:
323 | csv.writer(fp).writerows(mod_data)
324 |
325 | if not os.listdir(Path(root)):
326 | shutil.rmtree(Path(root))
327 | logger.info(f"##### {self._mention_name}所有文本已去重 !\n")
328 |
329 | """ 替换生肉词典 """
330 | async def update_dicts(self):
331 | """更新字典"""
332 | if not self._version:
333 | await self.fetch_latest_version()
334 | logger.info(f"===== 开始更新{self._mention_name}字典 ...")
335 | file_mapping: dict = {}
336 | for root, dir_list, file_list in os.walk(DIR_PARATRANZ / self._type / "utf8"): # 导出的旧字典
337 | if "失效词条" in root:
338 | continue
339 | for file in file_list:
340 | file_mapping[Path(root).absolute() / file] = (
341 | DIR_RAW_DICTS / self._type / self._version / "csv" / "game" / Path(root).relative_to(DIR_PARATRANZ / self._type / "utf8") / file,
342 | DIR_RAW_DICTS / self._type / self._version / "json" / "game" / Path(root).relative_to(DIR_PARATRANZ / self._type / "utf8") / f'{file.removesuffix(".csv")}.json',
343 | )
344 |
345 | tasks = [
346 | self._update_for_gather(old_file, new_file, json_file)
347 | for old_file, (new_file, json_file) in file_mapping.items()
348 | ]
349 | await asyncio.gather(*tasks)
350 | await self._integrate_json()
351 | logger.info(f"##### {self._mention_name}字典更新完毕 !\n")
352 |
353 | async def _update_for_gather(self, old_file: Path, new_file: Path, json_file: Path):
354 | """
355 | gather 用
356 | :param old_file: 下载的汉化文件的绝对路径
357 | :param new_file: 本地新抓字典文件的绝对路径
358 | """
359 | if not new_file.exists():
360 | unavailable_file = DIR_RAW_DICTS / self._type / self._version / "csv" / "game" / "失效词条" / Path().joinpath(*old_file.parts[old_file.parts.index("utf8")+1:])
361 | os.makedirs(unavailable_file.parent, exist_ok=True)
362 | with open(old_file, "r", encoding="utf-8") as fp:
363 | unavailables = list(csv.reader(fp))
364 | with open(unavailable_file, "w", encoding="utf-8-sig", newline="") as fp:
365 | csv.writer(fp).writerows(unavailables)
366 | return
367 |
368 | with open(old_file, "r", encoding="utf-8") as fp:
369 | old_data = list(csv.reader(fp))
370 | old_ens: dict = {
371 | row[-2] if len(row) > 2 else row[1]: idx_
372 | for idx_, row in enumerate(old_data)
373 | } # 旧英文: 旧英文行键
374 |
375 | with open(new_file, "r", encoding="utf-8") as fp:
376 | new_data = list(csv.reader(fp))
377 | new_ens: dict = {
378 | row[-1]: idx_
379 | for idx_, row in enumerate(new_data)
380 | } # 字典英文: 旧英文行键
381 |
382 | with open(json_file, "r", encoding="utf-8") as fp:
383 | json_data: list[dict] = json.load(fp)
384 |
385 | # 1. 未变的键和汉化直接替换
386 | for idx_, row in enumerate(new_data):
387 | if row[-1] in old_ens:
388 | new_data[idx_][0] = old_data[old_ens[row[-1]]][0]
389 | if len(old_data[old_ens[row[-1]]]) >= 3:
390 | ts = old_data[old_ens[row[-1]]][-1].strip()
391 | new_data[idx_].append(ts)
392 | try:
393 | json_data[idx_]["translation"] = ts
394 | except IndexError as e:
395 | logger.error(f"json与csv长度不同: {json_file}")
396 |
397 | # 2. 不存在的英文移入失效词条
398 | unavailables = []
399 | for idx_, row in enumerate(old_data):
400 | if len(row) <= 2: # 没翻译的,丢掉!
401 | continue
402 | if row[-2] == row[-1]: # 不用翻译的,丢掉!
403 | continue
404 |
405 | old_en = row[-2]
406 | if old_en not in new_ens:
407 | # logger.info(f"\t- old: {old_en}")
408 | unavailables.append(old_data[idx_])
409 | unavailable_file = DIR_RAW_DICTS / self._type / self._version / "csv" / "game" / "失效词条" / Path().joinpath(*old_file.parts[old_file.parts.index("utf8")+1:]) if unavailables else None
410 | with open(old_file, "w", encoding="utf-8-sig", newline="") as fp:
411 | csv.writer(fp).writerows(old_data)
412 |
413 | with open(new_file, "w", encoding="utf-8-sig", newline="") as fp:
414 | csv.writer(fp).writerows(new_data)
415 |
416 | with open(new_file, "r", encoding="utf-8-sig") as fp:
417 | problem_data = fp.readlines()
418 |
419 | with open(json_file, "w", encoding="utf-8") as fp:
420 | json.dump(json_data, fp, ensure_ascii=False, indent=2)
421 |
422 | for idx, line in enumerate(problem_data):
423 | if "" in line:
424 | problem_data[idx] = line.replace("", "")
425 |
426 | with open(new_file, "w", encoding="utf-8-sig") as fp:
427 | fp.writelines(problem_data)
428 |
429 | if unavailable_file:
430 | os.makedirs(unavailable_file.parent, exist_ok=True)
431 | with open(unavailable_file, "w", encoding="utf-8-sig", newline="") as fp:
432 | csv.writer(fp).writerows(unavailables)
433 |
434 | async def _integrate_json(self):
435 | """把 json 字典合并成一个大的"""
436 | integrated_dict = []
437 | for root, dir_list, file_list in os.walk(DIR_RAW_DICTS / self._type / self._version / "json" / "game"):
438 | for file in file_list:
439 | with open(Path(root) / file, "r", encoding="utf-8") as fp:
440 | json_data: list[dict] = json.load(fp)
441 |
442 | json_data = [
443 | item for item in json_data
444 | if item["original"] != item["translation"] and item["translation"]
445 | ]
446 | integrated_dict.extend(json_data)
447 | i18n_dict = await self._wash_json(integrated_dict)
448 | with open(DIR_DATA_ROOT / "json" / "i18n.json", "w", encoding="utf-8") as fp:
449 | json.dump(i18n_dict, fp, ensure_ascii=False, indent=2)
450 |
451 | @staticmethod
452 | async def _wash_json(integrated_dict: list[dict]) -> dict:
453 | """处理为 i18n mod 可接受的格式"""
454 | i18n_dict = {
455 | "typeB": {
456 | "TypeBOutputText": [],
457 | "TypeBInputStoryScript": []
458 | }
459 | }
460 | for data in integrated_dict:
461 | result_data = {
462 | "f": data["original"], "t": data["translation"], "pos": data["pos"]
463 | }
464 |
465 | filename = Path(data["filepath"]).name
466 | result_data["fileName"] = filename
467 | if filename.endswith(".js"):
468 | result_data["js"] = True
469 | elif filename.endswith(".css"):
470 | result_data["css"] = True
471 |
472 | if data["passage"]:
473 | result_data["pN"] = data["passage"]
474 | i18n_dict["typeB"]["TypeBInputStoryScript"].append(result_data)
475 | continue
476 | i18n_dict["typeB"]["TypeBOutputText"].append(result_data)
477 | return i18n_dict
478 |
479 | """ 替换游戏原文 """
480 | async def apply_dicts(self, blacklist_dirs: list[str] = None, blacklist_files: list[str] = None, debug_flag: bool = False, type_manual: str = None):
481 | """汉化覆写游戏文件"""
482 | if not self._version:
483 | await self.fetch_latest_version()
484 |
485 | if self._type == "common":
486 | DIR_GAME_TEXTS = DIR_GAME_TEXTS_COMMON
487 | else:
488 | DIR_GAME_TEXTS = DIR_GAME_TEXTS_DEV
489 | logger.info(f"===== 开始覆写{self._mention_name}汉化 ...")
490 |
491 | type_manual = type_manual or self._type
492 | # 脑子转不过来,先这样写吧
493 | if type_manual != self._type:
494 | os.makedirs(DIR_RAW_DICTS / "common" / self._version / "csv" / "game", exist_ok=True)
495 | for tree in os.listdir(DIR_PARATRANZ / "common" / "utf8"):
496 | with contextlib.suppress(shutil.Error, FileNotFoundError):
497 | shutil.move(DIR_PARATRANZ / "common" / "utf8" / tree, DIR_RAW_DICTS / "common" / self._version / "csv" / "game")
498 |
499 | file_mapping: dict = {}
500 | for root, dir_list, file_list in os.walk(DIR_RAW_DICTS / type_manual / self._version / "csv"):
501 | if any(_ in Path(root).absolute().__str__() for _ in blacklist_dirs):
502 | continue
503 | if "失效词条" in root:
504 | continue
505 | for file in file_list:
506 | # logger.warning(f"替换文件:{file}")
507 | if any(_ in file for _ in blacklist_files):
508 | continue
509 | if file.endswith(".js.csv"):
510 | file_mapping[Path(root).absolute() / file] = DIR_GAME_TEXTS / Path(root).relative_to(DIR_RAW_DICTS / type_manual / self._version / "csv" / "game") / f"{file.split('.')[0]}.js".replace("utf8\\", "")
511 | else:
512 | file_mapping[Path(root).absolute() / file] = DIR_GAME_TEXTS / Path(root).relative_to(DIR_RAW_DICTS / type_manual / self._version / "csv" / "game") / f"{file.split('.')[0]}.twee".replace("utf8\\", "")
513 |
514 | tasks = [
515 | self._apply_for_gather(csv_file, twee_file, debug_flag=debug_flag)
516 | for idx, (csv_file, twee_file) in enumerate(file_mapping.items())
517 | ]
518 | await asyncio.gather(*tasks)
519 | logger.info(f"##### {self._mention_name}汉化覆写完毕 !\n")
520 |
521 | async def _apply_for_gather(self, csv_file: Path, target_file: Path, debug_flag: bool = False):
522 | """gather 用"""
523 | with open(target_file, "r", encoding="utf-8") as fp:
524 | raw_targets: list[str] = fp.readlines()
525 | raw_targets_temp = raw_targets.copy()
526 |
527 | with open(csv_file, "r", encoding="utf-8") as fp:
528 | for row in csv.reader(fp):
529 | if len(row) < 3: # 没汉化
530 | continue
531 | en, zh = row[-2:]
532 | en, zh = en.strip(), zh.strip()
533 | if not zh: # 没汉化/汉化为空
534 | continue
535 |
536 | zh = re.sub('^(“)', '"', zh)
537 | zh = re.sub('(”)$', '"', zh)
538 | if self._is_lack_angle(zh, en):
539 | logger.warning(f"\t!!! 可能的尖括号数量错误:{en} | {zh} | https://paratranz.cn/projects/{PARATRANZ_PROJECT_DOL_ID}/strings?text={quote(en)}")
540 | if debug_flag:
541 | webbrowser.open(f"https://paratranz.cn/projects/{PARATRANZ_PROJECT_DOL_ID}/strings?text={quote(en)}")
542 | if self._is_different_event(zh, en):
543 | logger.warning(f"\t!!! 可能的事件名称错翻:{en} | {zh} | https://paratranz.cn/projects/{PARATRANZ_PROJECT_DOL_ID}/strings?text={quote(en)}")
544 | if debug_flag:
545 | webbrowser.open(f"https://paratranz.cn/projects/{PARATRANZ_PROJECT_DOL_ID}/strings?text={quote(en)}")
546 |
547 | for idx_, target_row in enumerate(raw_targets_temp):
548 | if not target_row.strip():
549 | continue
550 |
551 | if en == target_row.strip():
552 | raw_targets[idx_] = target_row.replace(en, zh).replace(" \n", "\n").lstrip(" ")
553 | raw_targets_temp[idx_] = ""
554 |
555 | if target_file.name.endswith(".js"):
556 | try:
557 | self._acorn.parse("".join(raw_targets))
558 | LOGGER_COLOR.info(f"JS 语法检测通过 {target_file}")
559 | except JSSyntaxError as err:
560 | try:
561 | LOGGER_COLOR.error(f"{target_file} | {err.err_code(raw_targets)}")
562 | except ValueError as e:
563 | LOGGER_COLOR.error(f"{target_file}")
564 | with open(target_file, "w", encoding="utf-8") as fp:
565 | fp.writelines(raw_targets)
566 | # logger.info(f"\t- ({idx + 1} / {full}) {target_file.__str__().split('game')[1]} 覆写完毕")
567 |
568 | @staticmethod
569 | def _is_lack_angle(line_zh: str, line_en: str):
570 | """<<> 缺一个 >"""
571 | if ("<" not in line_en and ">" not in line_en) or ParseTextTwee.is_only_marks(line_en):
572 | return False
573 |
574 | # 首尾不好判断
575 | if line_zh[0] == "<":
576 | line_zh = f"_{line_zh}"
577 | if line_en[0] == "<":
578 | line_en = f"_{line_en}"
579 | if line_zh[-1] == ">":
580 | line_zh = f"{line_zh}_"
581 | if line_en[-1] == ">":
582 | line_en = f"{line_en}_"
583 |
584 | left_angle_single_zh = re.findall(r"[^<=](<)[^<=3]", line_zh)
585 | right_angle_single_zh = re.findall(r"[^>=](>)[^>=:]", line_zh)
586 | if "<<" not in line_en and ">>" not in line_en:
587 | if len(left_angle_single_zh) == len(right_angle_single_zh):
588 | return False
589 | # print(f"las: {len(left_angle_single_zh)}({left_angle_single_zh}) - ras: {len(right_angle_single_zh)}({right_angle_single_zh}) | {line_zh}")
590 | left_angle_single_en = re.findall(r"[^<=](<)[^<=3]", line_en)
591 | right_angle_single_en = re.findall(r"[^>=](>)[^>=:]", line_en)
592 | return (
593 | len(left_angle_single_en) != len(left_angle_single_zh)
594 | or len(right_angle_single_en) != len(right_angle_single_zh)
595 | ) # 形如 < > <, 也只有这一种情况
596 |
597 | left_angle_double_zh = re.findall(r"(<<)", line_zh)
598 | right_angle_double_zh = re.findall(r"(>>)", line_zh)
599 | if len(left_angle_double_zh) == len(right_angle_double_zh):
600 | return False
601 |
602 | left_angle_double_en = re.findall(r"(<<)", line_en)
603 | right_angle_double_en = re.findall(r"(>>)", line_en)
604 | return (
605 | len(left_angle_double_en) != len(left_angle_double_zh)
606 | or len(right_angle_double_en) != len(right_angle_double_zh)
607 | ) # 形如 << >> <<
608 |
609 | @staticmethod
610 | def _is_lack_square(line_zh: str, line_en: str):
611 | """缺一个 [ 或 ]"""
612 | if "[" not in line_en and "]" not in line_en:
613 | return False
614 |
615 | # 首尾不好判断
616 | if line_zh[0] == "[":
617 | line_zh = f"_{line_zh}"
618 | if line_en[0] == "[":
619 | line_en = f"_{line_en}"
620 | if line_zh[-1] == "]":
621 | line_zh = f"{line_zh}_"
622 | if line_en[-1] == "]":
623 | line_en = f"{line_en}_"
624 |
625 | left_square_single_zh = re.findall(r"[^\[](\[)[^\[]", line_zh)
626 | right_square_single_zh = re.findall(r"[^]](])[^]]", line_zh)
627 | if "[[" not in line_en and "]]" not in line_en:
628 | if len(left_square_single_zh) == len(right_square_single_zh):
629 | return False
630 | left_square_single_en = re.findall(r"[^\[](\[)[^\[]", line_en)
631 | right_square_single_en = re.findall(r"[^]](])[^]]", line_en)
632 | return (
633 | len(left_square_single_en) != len(left_square_single_zh)
634 | or len(right_square_single_en) != len(right_square_single_zh)
635 | ) # 形如 [ ] [, 也只有这一种情况
636 |
637 | left_square_double_zh = re.findall(r"(\[\[)", line_zh)
638 | right_square_double_zh = re.findall(r"(]])", line_zh)
639 | if len(left_square_double_zh) == len(right_square_double_zh):
640 | return False
641 | left_square_double_en = re.findall(r"(\[\[)", line_en)
642 | right_square_double_en = re.findall(r"(]])", line_en)
643 | return (
644 | len(left_square_double_en) != len(left_square_double_zh)
645 | or len(right_square_double_en) != len(right_square_double_zh)
646 | ) # 形如 [[ ]] [[
647 |
648 | @staticmethod
649 | def _is_different_event(line_zh: str, line_en: str):
650 | """<> 中 EVENT 打错了"""
651 | if "< None:
697 | ref_name = self.get_type("master", "dev")
698 | async with httpx.AsyncClient(verify=False) as client:
699 | response = await client.get(REPOSITORY_COMMITS_URL_COMMON, params={"ref_name": ref_name})
700 | if response.status_code != 200:
701 | logger.error("获取源仓库 commit 出错!")
702 | return None
703 | repo_json = response.json()
704 | if not repo_json:
705 | return None
706 | latest_commit = repo_json[0]
707 | logger.info(f'latest commit: {latest_commit["id"]}')
708 | self._is_latest = bool(self._commit and latest_commit["id"] == self._commit["id"])
709 | if self._is_latest:
710 | return None
711 | logger.info(f"===== 开始写入{self._mention_name}最新 commit ...")
712 | with open(FILE_COMMITS, "w") as fp:
713 | json.dump(latest_commit, fp, ensure_ascii=False, indent=2)
714 | logger.info(f"#### {self._mention_name}最新 commit 已写入!")
715 |
716 | def get_type(self, common, dev):
717 | if self._type == "common":
718 | return common
719 | else:
720 | return dev
721 |
722 | @property
723 | def game_dir(self) -> Path:
724 | """获得游戏目录"""
725 | return self.get_type(DIR_GAME_ROOT_COMMON, DIR_GAME_ROOT_DEV)
726 |
727 | """其他要修改的东西"""
728 | def change_css(self):
729 | """字体间距"""
730 | css_dir = DIR_GAME_CSS_COMMON if self._type == "common" else DIR_GAME_CSS_DEV
731 | with open(css_dir / "base.css", "r", encoding="utf-8") as fp:
732 | lines = fp.readlines()
733 | for idx, line in enumerate(lines):
734 | match line.strip():
735 | case "max-height: 2.4em;":
736 | lines[idx] = line.replace("2.4em;", "7em;")
737 | continue
738 | case 'content: " months";':
739 | lines[idx] = line.replace(" months", "月数")
740 | continue
741 | case 'content: " weeks";':
742 | lines[idx] = line.replace(" weeks", "周数")
743 | break
744 | case _:
745 | continue
746 | with open(css_dir / "base.css", "w", encoding="utf-8") as fp:
747 | fp.writelines(lines)
748 |
749 | def replace_banner(self):
750 | """汉化版条幅"""
751 | shutil.copyfile(
752 | DIR_DATA_ROOT / "img" / "banner.png",
753 | DIR_GAME_ROOT_COMMON / "img" / "misc" / "banner.png"
754 | )
755 |
756 | def change_version(self, version: str = ""):
757 | """修改版本号"""
758 | with open(FILE_VERSION_EDIT_COMMON, "r", encoding="utf-8") as fp:
759 | lines = fp.readlines()
760 | for idx, line in enumerate(lines):
761 | if "versionName: " in line.strip():
762 | lines[idx] = f'versionName: "{version}",\n'
763 | break
764 | with open(FILE_VERSION_EDIT_COMMON, "w", encoding="utf-8") as fp:
765 | fp.writelines(lines)
766 |
767 | """ 删删删 """
768 | async def drop_all_dirs(self, force=False):
769 | """恢复到最初时的样子"""
770 | if not force:
771 | await self.get_lastest_commit()
772 | logger.warning("===== 开始删库跑路 ...")
773 | await self._drop_temp()
774 | await self._drop_gitgud()
775 | await self._drop_dict()
776 | await self._drop_paratranz()
777 | logger.warning("##### 删库跑路完毕 !\n")
778 |
779 | async def _drop_temp(self):
780 | """删掉临时文件"""
781 | if DIR_TEMP_ROOT.exists():
782 | if not self._is_latest:
783 | shutil.rmtree(DIR_TEMP_ROOT, ignore_errors=True)
784 | return
785 | if FILE_REPOSITORY_ZIP.exists():
786 | with contextlib.suppress(shutil.Error, FileNotFoundError):
787 | shutil.move(FILE_REPOSITORY_ZIP, DIR_ROOT)
788 |
789 | if (DIR_TEMP_ROOT / f"dol{self._mention_name}.zip").exists():
790 | with contextlib.suppress(shutil.Error, FileNotFoundError):
791 | shutil.move(DIR_TEMP_ROOT / f"dol{self._mention_name}.zip", DIR_ROOT)
792 |
793 | if (DIR_TEMP_ROOT / "dol世扩.zip").exists():
794 | with contextlib.suppress(shutil.Error, FileNotFoundError):
795 | shutil.move(DIR_TEMP_ROOT / "dol世扩.zip", DIR_ROOT)
796 |
797 | shutil.rmtree(DIR_TEMP_ROOT, ignore_errors=True)
798 | logger.warning("\t- 缓存目录已删除")
799 |
800 | async def _drop_gitgud(self):
801 | """删掉游戏库"""
802 | shutil.rmtree(self.game_dir, ignore_errors=True)
803 | logger.warning(f"\t- {self._mention_name}游戏目录已删除")
804 |
805 | async def _drop_dict(self):
806 | """删掉生成的字典"""
807 | if not self._version:
808 | await self.fetch_latest_version()
809 | shutil.rmtree(DIR_RAW_DICTS / self._type / self._version, ignore_errors=True)
810 | shutil.rmtree(DIR_RAW_DICTS / "common" / self._version, ignore_errors=True)
811 | logger.warning(f"\t- {self._mention_name}字典目录已删除")
812 |
813 | async def _drop_paratranz(self):
814 | """删掉下载的汉化包"""
815 | shutil.rmtree(DIR_PARATRANZ / self._type, ignore_errors=True)
816 | logger.warning(f"\t- {self._mention_name}汉化目录已删除")
817 |
818 | """ 编译游戏 """
819 | def compile(self, chs_version: str = ""):
820 | """编译游戏"""
821 | logger.info("===== 开始编译游戏 ...")
822 | # self._before_compile(chs_version)
823 | if platform.system() == "Windows":
824 | self._compile_for_windows()
825 | elif platform.system() == "Linux":
826 | self._compile_for_linux()
827 | else:
828 | raise Exception("什么电脑系统啊?")
829 | logger.info("##### 游戏编译完毕 !")
830 |
831 | def _before_compile(self, chs_version: str = ""):
832 | """修改一些编译设置"""
833 | with open(self.game_dir / "compile.bat", "r", encoding="utf-8") as fp:
834 | content = fp.read()
835 | content = content.replace("Degrees of Lewdity VERSION.html", "Degrees of Lewdity.html")
836 | with open(self.game_dir / "compile.bat", "w", encoding="utf-8") as fp:
837 | fp.write(content)
838 |
839 | with open(self.game_dir / "devTools" / "androidsdk" / "image" / "cordova" / "comfig.xml", "r", encoding="utf-8") as fp:
840 | lines = fp.readlines()
841 | for idx, line in enumerate(lines):
842 | if 'id="' in line:
843 | lines[idx] = 'id="dol-chs"\n'
844 | continue
845 | if 'version="' in line:
846 | lines[idx] = f'version="{chs_version}"\n'
847 | continue
848 | if 'android-packageName="' in line:
849 | lines[idx] = 'android-packageName="com.vrelnir.DegreesOfLewdityCHS"\n'
850 | continue
851 | if 'Degrees of Lewdity' in line:
852 | lines[idx] = 'Degrees of Lewdity 汉化版\n'
853 | with open(self.game_dir / "devTools" / "androidsdk" / "image" / "cordova" / "comfig.xml", "w", encoding="utf-8") as fp:
854 | fp.writelines(lines)
855 |
856 | def _compile_for_windows(self):
857 | """win"""
858 | subprocess.Popen(self.game_dir / "compile.bat")
859 | time.sleep(5)
860 | logger.info(f"\t- Windows 游戏编译完成,位于 {self.game_dir / 'Degrees of Lewdity VERSION.html'}")
861 |
862 | def _compile_for_linux(self):
863 | """linux"""
864 | if GITHUB_ACTION_DEV:
865 | tweego_exe = "tweego_linux86" if PLATFORM_ARCHITECTURE == "32bit" else "tweego_linux64"
866 | tweego_exe_file = self.game_dir / "devTools" / "tweego" / tweego_exe
867 | tweego_exe_file.chmod(tweego_exe_file.stat().st_mode | stat.S_IEXEC)
868 | tweego_compile_sh = self.game_dir / "compile.sh"
869 | tweego_compile_sh.chmod(tweego_compile_sh.stat().st_mode | stat.S_IEXEC)
870 | subprocess.Popen("bash ./compile.sh", env=os.environ, shell=True, cwd=self.game_dir)
871 | time.sleep(5)
872 | logger.info(f"\t- Linux 游戏编译完成,位于 {self.game_dir / 'Degrees of Lewdity VERSION.html'}")
873 |
874 | def _compile_for_mobile(self):
875 | """android"""
876 |
877 | """ 打包游戏 """
878 | def package_zip(self, chs_version: str = "chs"):
879 | """ 打包游戏 """
880 | today = datetime.datetime.now().strftime("%Y%m%d")
881 | with zf(DIR_GAME_ROOT_COMMON / f"dol-{chs_version}-{today}.zip", "w", compresslevel=9, compression=ZIP_DEFLATED) as zfp:
882 | for root, dir_list, file_list in os.walk(DIR_GAME_ROOT_COMMON):
883 | for file in file_list:
884 | filepath = Path((Path(root) / file).__str__().split("degrees-of-lewdity-master/")[-1].split("degrees-of-lewdity-master\\")[-1])
885 | if (file in {
886 | "Degrees of Lewdity VERSION.html",
887 | "style.css",
888 | }
889 | or "degrees-of-lewdity-master/img/" in root
890 | or "degrees-of-lewdity-master\\img\\" in root
891 | or filepath == Path("LICENSE")
892 | ):
893 | zfp.write(filename=DIR_GAME_ROOT_COMMON / filepath, arcname=filepath, compresslevel=9)
894 |
895 | async def copy_to_git(self):
896 | """复制到git"""
897 | git_repo = os.getenv("GIT_REPO")
898 | dol_chinese_path = DIR_ROOT / git_repo
899 | if not dol_chinese_path.exists():
900 | logger.warning(f"不存在{git_repo}文件夹")
901 | return
902 |
903 | logger.info("===== 开始复制到 git ...")
904 | game_dir_path = self.game_dir
905 | game_dir = os.listdir(game_dir_path)
906 |
907 | logger.info(f"game_dir: {game_dir}")
908 | for file in game_dir:
909 | if file.startswith("Degrees of Lewdity") and file.endswith("html"):
910 | dol_html = "beta" if GITHUB_ACTION_ISBETA else "index"
911 | game_html = game_dir_path / file
912 | logger.info("复制到GIT文件夹")
913 | shutil.copyfile(
914 | game_html,
915 | dol_chinese_path / f"{dol_html}.html",
916 | )
917 | beeesssmod_dir_path =dol_chinese_path / "beeesssmod"
918 | beeesssmod_dir = Path(beeesssmod_dir_path)
919 | if beeesssmod_dir.is_dir():
920 | logger.info("同步到美化包文件夹")
921 | shutil.copyfile(
922 | game_html,
923 | beeesssmod_dir_path / f"{dol_html}.html",
924 | )
925 |
926 | elif file in {"style.css", "DolSettingsExport.json"}:
927 | logger.info(f"game_dir file: {file}")
928 | shutil.copyfile(
929 | game_dir_path / file,
930 | dol_chinese_path / file,
931 | )
932 | dol_chinese_img_path = dol_chinese_path / "img"
933 |
934 |
935 | shutil.copytree(
936 | self.game_dir / "img",
937 | dol_chinese_img_path,
938 | True,
939 | ignore=lambda src,files: [f for f in files if f.endswith(".js") or f.endswith(".bat")],
940 | dirs_exist_ok=True,
941 | )
942 | logger.info("##### 复制到 git 已完毕! ")
943 | await self.drop_all_dirs(True)
944 |
945 | """ 在浏览器中启动 """
946 | def run(self):
947 | webbrowser.open((self.game_dir / "Degrees of Lewdity VERSION.html").__str__())
948 |
949 | """ i18n 相关"""
950 | async def download_modloader_autobuild(self):
951 | async with httpx.AsyncClient(verify=False) as client:
952 | await self._get_latest_modloader_autobuild(client)
953 |
954 | async def _get_latest_modloader_autobuild(self, client: httpx.AsyncClient):
955 | response = await client.get(REPOSITORY_MODLOADER_ARTIFACTS)
956 | url = response.json()["artifacts"][0]["archive_download_url"]
957 |
958 | logger.info(f"url: {url}")
959 | async with client.stream("GET", url, headers={
960 | "accept": "application/vnd.github+json",
961 | "Authorization": f"Bearer {GITHUB_ACCESS_TOKEN}"
962 | }, follow_redirects=True, timeout=60) as response:
963 | async with aopen(DIR_TEMP_ROOT / "modloader.zip", "wb+") as afp:
964 | async for char in response.iter_raw():
965 | await afp.write(char)
966 |
967 |
968 | __all__ = [
969 | "ProjectDOL"
970 | ]
971 |
--------------------------------------------------------------------------------
/src/tools/__init__.py:
--------------------------------------------------------------------------------
1 | """一些不在脚本正文中使用的辅助小工具"""
2 | from .process_variables import *
3 |
--------------------------------------------------------------------------------
/src/tools/build_release/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NumberSir/vrelnir_localization/526475b4a1e9b23f0708c066ee5d68f955b14b4d/src/tools/build_release/__init__.py
--------------------------------------------------------------------------------
/src/tools/build_release/apk-build-tools/cmdline-tools/latest.zip:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:e69fe118b8d683903200c2584634258478d911d09d20ced99899ca845d29c106
3 | size 133233915
4 |
--------------------------------------------------------------------------------
/src/tools/build_release/apk-build-tools/gradle/gradle.zip:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:300bf744020b1adc7a0487cc8c62671063d10a02982fcf3e89e0b585173ccfe5
3 | size 115999274
4 |
--------------------------------------------------------------------------------
/src/tools/build_release/consts.py:
--------------------------------------------------------------------------------
1 | import os
2 | from dotenv import load_dotenv
3 | from pathlib import Path
4 |
5 | load_dotenv()
6 | ACCESS_TOKEN = os.getenv('GITHUB_ACCESS_TOKEN')
7 |
8 | ROOT = Path(__file__).parent
9 | DIR_TEMP = ROOT / "tmp"
10 | DIR_BUILD = ROOT / "build"
11 | DIR_BUILD_ASSETS = DIR_BUILD / "assets"
12 | DIR_CREDITS = ROOT / "credits"
13 | DIR_APK_BUILD_TOOLS = ROOT / "apk-build-tools"
14 |
15 | DIR_GAME = ROOT.parent.parent.parent / "degrees-of-lewdity-master"
16 | DIR_APK_BUILDER = DIR_GAME / "devTools" / "apkbuilder"
17 | DIR_DIST = DIR_GAME / "dist"
18 | DIR_REPO = Path("D:\\Users\\numbersir\\Documents\\GitHub\\Degrees-of-Lewdity-Chinese-Localization")
19 |
20 | FILE_LICENSE = DIR_GAME / "LICENSE"
21 | FILE_CREDITS = DIR_CREDITS / "CREDITS.md"
22 | FILE_README = DIR_REPO / "README.md"
23 |
24 | FILE_GRADLE = DIR_APK_BUILD_TOOLS / "gradle" / "gradle.zip"
25 | FILE_CMDLINE = DIR_APK_BUILD_TOOLS / "cmdline-tools" / "latest.zip"
26 |
27 | HTML_FILENAME = "Degrees of Lewdity.html"
28 | APK_DEFAULT_FILENAME_PREFIX = "Degrees-of-Lewdity"
29 |
30 |
31 | __all__ = [
32 | "ACCESS_TOKEN",
33 |
34 | "ROOT",
35 | "DIR_TEMP",
36 | "DIR_BUILD",
37 | "DIR_BUILD_ASSETS",
38 | "DIR_CREDITS",
39 | "DIR_APK_BUILD_TOOLS",
40 |
41 | "DIR_GAME",
42 | "DIR_APK_BUILDER",
43 | "DIR_DIST",
44 | "DIR_REPO",
45 |
46 | "FILE_LICENSE",
47 | "FILE_CREDITS",
48 | "FILE_README",
49 |
50 | "FILE_GRADLE",
51 | "FILE_CMDLINE",
52 |
53 | "HTML_FILENAME",
54 | "APK_DEFAULT_FILENAME_PREFIX"
55 | ]
--------------------------------------------------------------------------------
/src/tools/build_release/download.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from aiofiles import open as aopen
3 | import httpx
4 |
5 |
6 | async def chunk_split(filesize: int, chunk: int = 2) -> list[list[int]]:
7 | """给大文件切片"""
8 | step = filesize // chunk
9 | arr = range(0, filesize, step)
10 | result = [[arr[i], arr[i + 1] - 1] for i in range(len(arr) - 1)]
11 | result[-1][-1] = filesize - 1
12 | return result
13 |
14 |
15 | async def chunk_download(
16 | url: str,
17 | client: httpx.AsyncClient,
18 | start: int,
19 | end: int,
20 | save_path: Path,
21 | headers_: dict = None,
22 | ):
23 | """切片下载"""
24 | if not save_path.exists():
25 | with open(save_path, "wb") as fp:
26 | pass
27 | headers = (
28 | {"Range": f"bytes={start}-{end}"} | headers_
29 | if headers_
30 | else {"Range": f"bytes={start}-{end}"}
31 | )
32 | response = await client.get(url, headers=headers, follow_redirects=True, timeout=60)
33 | async with aopen(save_path, "rb+") as fp:
34 | await fp.seek(start)
35 | await fp.write(response.content)
36 |
37 |
38 | __all__ = ["chunk_split", "chunk_download"]
39 |
--------------------------------------------------------------------------------
/src/tools/build_release/log.py:
--------------------------------------------------------------------------------
1 | from loguru import logger as logger_
2 | import sys
3 |
4 | logger_.remove()
5 | logger_.add(sys.stdout, format="{time:HH:mm:ss} | [{level}] | {message}", colorize=True)
6 |
7 | logger = logger_
8 |
9 | __all__ = ["logger"]
10 |
--------------------------------------------------------------------------------
/src/tools/build_release/run_build.py:
--------------------------------------------------------------------------------
1 | """
2 | 运行前提:
3 | 1. 写好了 README
4 | 2. 写好了 CREDITS
5 | 3. 按照 degrees-of-lewdity-master/devTools/apkbuilder/README-windows.txt 配好了 nodejs 和 jdk17
6 |
7 | 运行步骤:
8 | 1. 触发 ModLoader 自动构建
9 | 2. 触发汉化包自动构建
10 |
11 | 3. 下载构建好的 ModLoader 包和图片包
12 | 4. 下载构建好的汉化包
13 |
14 | 5. 解压 ModLoader 包出两个 HTML 文件
15 | 6. 把 README, Credits, LICENSE 和 HTML 打成压缩包
16 | 7. 替换官方 APK 中的 HTML
17 | 8. 删除官方 APK 中的 img 文件夹
18 | 9. 计算 MD5
19 | 10. 提取出 README 中的本次更新部分
20 | 11. 构建 Release
21 | """
22 | import asyncio
23 | import hashlib
24 | import os
25 | import re
26 | import shutil
27 | import subprocess
28 | import time
29 | import zipfile
30 | from zipfile import ZipFile
31 | from pathlib import Path
32 |
33 | import httpx
34 |
35 | from github import Github, Auth
36 | from github.Repository import Repository
37 |
38 | from src.tools.build_release.download import *
39 | from src.tools.build_release.log import *
40 | from src.tools.build_release.consts import *
41 |
42 |
43 | class ReleaseBuild:
44 | def __init__(self, github: Github, client: httpx.AsyncClient):
45 | self._client = client
46 | self._github = github
47 | self._repository: "Repository" = github.get_repo("Eltirosto/Degrees-of-Lewdity-Chinese-Localization")
48 |
49 | """ INIT """
50 | @staticmethod
51 | def clear():
52 | """清理临时文件夹和构建文件"""
53 | shutil.rmtree(DIR_TEMP, ignore_errors=True)
54 | shutil.rmtree(DIR_BUILD_ASSETS, ignore_errors=True)
55 | os.makedirs(DIR_TEMP, exist_ok=True)
56 | os.makedirs(DIR_BUILD_ASSETS, exist_ok=True)
57 | logger.info("initialize successfully")
58 |
59 | """ ACTION """ # TODO
60 | def _trigger(self):
61 | """触发自动构建"""
62 | ...
63 |
64 | def trigger_mod_loader(self):
65 | """触发 ModLoader 自动构建 (Lyoko-Jeremie/DoLModLoaderBuild)"""
66 | ...
67 |
68 | def trigger_i18n(self):
69 | """触发 I18N 自动构建 (NumberSir/DoL-I18n-Build)"""
70 | ...
71 |
72 | """ DOWNLOAD """
73 | async def _download(self, repo_name: str):
74 | """下载构建好的文件"""
75 | repo = self.github.get_repo(repo_name)
76 | release = repo.get_latest_release()
77 | assets = release.get_assets()
78 | for asset in assets:
79 | response = await self.client.head(asset.browser_download_url, timeout=60, follow_redirects=True)
80 | filesize = int(response.headers["Content-Length"])
81 | chunks = await chunk_split(filesize, 64)
82 |
83 | if (DIR_TEMP / asset.name).exists():
84 | continue
85 |
86 | tasks = {
87 | chunk_download(asset.browser_download_url, self.client, start, end, DIR_TEMP / asset.name)
88 | for idx, (start, end) in enumerate(chunks)
89 | }
90 | await asyncio.gather(*tasks)
91 |
92 | async def download_mod_loader(self):
93 | """下载 ModLoader 和 Imagepack"""
94 | await self._download("Lyoko-Jeremie/DoLModLoaderBuild")
95 | logger.info("ModLoader & Imagepack downloaded successfully")
96 |
97 | async def download_i18n(self):
98 | """下载 ModI18N """
99 | await self._download("NumberSir/DoL-I18n-Build")
100 | logger.info("ModI18N downloaded successfully")
101 |
102 | """ DECOMPRESS """
103 | def decompress_mod_loader(self):
104 | """只要解压出来的 html 文件"""
105 | with zipfile.ZipFile(DIR_TEMP / self.mod_loader_filename, "r") as zfp:
106 | for file in zfp.filelist:
107 | if not file.filename.endswith(".html"):
108 | continue
109 | zfp.extract(file, DIR_TEMP)
110 |
111 | """ BUILD ASSETS """
112 | def move_i18n(self):
113 | """统一移到一个文件夹里"""
114 | shutil.copyfile(
115 | DIR_TEMP / self.i18n_filename,
116 | DIR_BUILD_ASSETS / self.i18n_filename,
117 | )
118 | logger.info("ModI18N built successfully")
119 |
120 | def rename_image_pack(self):
121 | """默认不带版本号,加上"""
122 | (DIR_TEMP / self.image_pack_filename).rename(DIR_TEMP / f'{self.image_pack_filename.split("-")[0].split(".")[0]}-{self.game_version}.mod.zip')
123 |
124 | def move_image_pack(self):
125 | """统一移到一个文件夹里"""
126 | shutil.copyfile(
127 | DIR_TEMP / self.image_pack_filename,
128 | DIR_BUILD_ASSETS / self.image_pack_filename,
129 | )
130 | logger.info("Imagepack built successfully")
131 |
132 | def _build_compress(self, html_filepath: Path, polyfill_suffix: str = ""):
133 | """构建游戏本体压缩包"""
134 | with ZipFile(DIR_BUILD_ASSETS / f"DoL-ModLoader-{self.game_version}-v{self.mod_loader_version}{polyfill_suffix}.zip", mode="w", compression=zipfile.ZIP_DEFLATED) as zfp:
135 | zfp.write(filename=FILE_README, arcname=FILE_README.name, compresslevel=9)
136 | zfp.write(filename=FILE_LICENSE, arcname=FILE_LICENSE.name, compresslevel=9)
137 | zfp.write(filename=FILE_CREDITS, arcname=FILE_CREDITS.name, compresslevel=9)
138 | zfp.write(filename=html_filepath, arcname=HTML_FILENAME, compresslevel=9)
139 | logger.info(f"Zipfile{polyfill_suffix} built successfully")
140 |
141 | def build_compress_normal(self):
142 | """构建游戏本体压缩包 (正常版)"""
143 | return self._build_compress(DIR_TEMP / self.html_filename)
144 |
145 | def build_compress_polyfill(self):
146 | """构建游戏本体压缩包 (兼容版)"""
147 | return self._build_compress(DIR_TEMP / self.html_polyfill_filename, "-polyfill")
148 |
149 | @staticmethod
150 | def _pre_build_apk():
151 | """用源码自带的打包工具打包前处理环境和打包脚本"""
152 | with ZipFile(FILE_GRADLE, "r") as zfp:
153 | zfp.extractall(DIR_APK_BUILDER / "androidsdk" / "gradle")
154 |
155 | with ZipFile(FILE_CMDLINE, "r") as zfp:
156 | zfp.extractall(DIR_APK_BUILDER / "androidsdk" / "cmdline-tools" / "latest")
157 |
158 | def replace_special_texts(filepath: Path, old: str, new: str = ""):
159 | with open(filepath, "r", encoding="utf-8") as fp:
160 | content = fp.read()
161 | with open(filepath, "w", encoding="utf-8") as fp:
162 | fp.write(content.replace(old, new))
163 |
164 | replace_special_texts(DIR_APK_BUILDER / "setup_deps.bat", "pause")
165 | replace_special_texts(DIR_APK_BUILDER / "build_app_debug.bat", "pause")
166 | replace_special_texts(DIR_APK_BUILDER / "scripts" / "prepare_files.js", '["img"]', "[]")
167 | replace_special_texts(DIR_APK_BUILDER / "scripts" / "prevent_unnecessary_deletes.js", '["img"]', "[]")
168 | replace_special_texts(DIR_APK_BUILDER / "www" / "custom_cordova_additions.js", "Press BACK again to exit", "再次点击返回键退出")
169 |
170 | def _build_apk(self, html_filepath: Path, polyfill_suffix: str = ""):
171 | """构建游戏本体 apk"""
172 | self._pre_build_apk()
173 | shutil.copyfile(
174 | html_filepath,
175 | DIR_GAME / HTML_FILENAME,
176 | )
177 | subprocess.Popen(DIR_APK_BUILDER / "setup_deps.bat", cwd=DIR_APK_BUILDER).wait()
178 | subprocess.Popen(DIR_APK_BUILDER / "build_app_debug.bat", cwd=DIR_APK_BUILDER, stdout=subprocess.DEVNULL).wait()
179 | shutil.copyfile(
180 | DIR_DIST / self.apk_filename,
181 | DIR_BUILD_ASSETS / f"DoL-ModLoader-{self.game_version}-v{self.mod_loader_version}{polyfill_suffix}.APK",
182 | )
183 | logger.info(f"Apk{polyfill_suffix} built successfully")
184 |
185 | def build_apk_normal(self):
186 | """构建游戏本体 apk (普通版)"""
187 | return self._build_apk(DIR_TEMP / self.html_filename)
188 |
189 | def build_apk_polyfill(self):
190 | """构建游戏本体 apk (兼容版)"""
191 | return self._build_apk(DIR_TEMP / self.html_polyfill_filename, "-polyfill")
192 |
193 | @staticmethod
194 | def rename_pre(flag: bool = True):
195 | """如果是预发布,就加上 pre 后缀"""
196 | if not flag:
197 | return
198 | for file in os.listdir(DIR_BUILD_ASSETS):
199 | if file.endswith(".mod.zip"):
200 | shutil.move(DIR_BUILD_ASSETS / file, DIR_BUILD_ASSETS / f"{file[:-8]}-pre{file[-8:]}")
201 | else:
202 | shutil.move(DIR_BUILD_ASSETS / file, DIR_BUILD_ASSETS / f"{file[:-4]}-pre{file[-4:]}")
203 |
204 | """ BUILD RELEASE """
205 | @staticmethod
206 | def fetch_changelog() -> str:
207 | """手动填好 README 之后提取本次的更新日志"""
208 | with open(FILE_README, "r", encoding="utf-8") as fp:
209 | lines = fp.readlines()
210 |
211 | result = ""
212 | issues = []
213 | flag = False
214 | for line in lines:
215 | line = line.strip(">").strip().strip("-").strip()
216 | if not line:
217 | continue
218 |
219 | if not line.startswith("20") and not flag:
220 | continue
221 |
222 | if flag:
223 | if line.startswith("20"):
224 | break
225 | result = f"{result}\n{line}"
226 | flag = True
227 |
228 | issues.extend(re.findall(r"\[(issue[\-dc]*\d+)*?]", result))
229 | result = f"{result}\n"
230 | for line in lines:
231 | if not line.startswith("[issue"):
232 | continue
233 |
234 | issue = line.split(":")[0].strip("[").strip("]")
235 | if issue not in issues:
236 | continue
237 |
238 | result = f"{result}\n{line.strip()}"
239 | return result
240 |
241 | @staticmethod
242 | def calculate_md5() -> str:
243 | """计算上传文件的 MD5 值"""
244 | result = "md5:"
245 | for file in os.listdir(DIR_BUILD_ASSETS):
246 | with open(DIR_BUILD_ASSETS / file, "rb") as fp:
247 | content = fp.read()
248 | result = f"{result}\n`{file}`: `{hashlib.md5(content).hexdigest()}`"
249 | return result
250 |
251 | @property
252 | def section_changelog(self) -> str:
253 | """最终发布日志中的更新日志部分"""
254 | return self.fetch_changelog()
255 |
256 | @property
257 | def section_md5(self) -> str:
258 | """最终发布日志中的 MD5 部分"""
259 | return self.calculate_md5()
260 |
261 | def generate_release_note(self) -> str:
262 | """生成最终的发布日志"""
263 | return (
264 | f"{self.section_changelog}"
265 | "\n\n"
266 | f"{self.section_md5}"
267 | "\n\n"
268 | "## 致谢名单\n"
269 | "[CREDITS.md](CREDITS.md)"
270 | )
271 |
272 | def save_release_note(self):
273 | """保存到本地以便校对"""
274 | with open(DIR_BUILD / "note.md", "w", encoding="utf-8") as fp:
275 | fp.write(self.release_note)
276 | logger.info("release note generated successfully")
277 |
278 | def release(self, *, draft: bool = True):
279 | """
280 | 发布!
281 |
282 | :param draft: 是否是草稿
283 | """
284 | git_release = self.repository.create_git_release(
285 | tag=f"v{self.game_version}-chs-{self.i18n_version}",
286 | name=f"v{self.game_version}-chs-{self.i18n_version}",
287 | message=self.release_note,
288 | draft=draft
289 | )
290 | for file in os.listdir(DIR_BUILD_ASSETS):
291 | git_release.upload_asset(
292 | path=(DIR_BUILD_ASSETS / file).__str__()
293 | )
294 | logger.info(f"RELEASE{'-draft' if draft else ''} successfully")
295 |
296 | """ PROPERTY """
297 | @property
298 | def client(self) -> httpx.AsyncClient:
299 | """http 客户端"""
300 | return self._client
301 |
302 | @property
303 | def github(self) -> Github:
304 | """github 客户端"""
305 | return self._github
306 |
307 | @property
308 | def repository(self) -> Repository:
309 | """发布仓库"""
310 | return self._repository
311 |
312 | @property
313 | def release_note(self) -> str:
314 | """发布日志"""
315 | return self.generate_release_note()
316 |
317 | """ FILENAME """
318 | @staticmethod
319 | def _get_tmp_filename(prefix: str = "", suffix: str = "") -> str:
320 | """下载的 artifact 的文件名"""
321 | return [
322 | file
323 | for file in os.listdir(DIR_TEMP)
324 | if file.startswith(prefix)
325 | if file.endswith(suffix)
326 | ][0]
327 |
328 | @staticmethod
329 | def _get_dist_filename() -> str:
330 | """构建好的 apk 的文件名"""
331 | return [file for file in os.listdir(DIR_GAME / "dist")][0]
332 |
333 | def get_mod_loader_filename(self) -> str:
334 | """下载的自动构建的 modloader 的文件名"""
335 | return self._get_tmp_filename(prefix="DoL-ModLoader")
336 |
337 | def get_image_pack_filename(self) -> str:
338 | """下载的自动构建的 imagepack 的文件名"""
339 | return self._get_tmp_filename(prefix="GameOriginalImagePack")
340 |
341 | def get_i18n_filename(self) -> str:
342 | """下载的自动构建的 i18n 的文件名"""
343 | return self._get_tmp_filename(prefix="ModI18N")
344 |
345 | def get_html_filename(self) -> str:
346 | """解压出来的普通游戏本体 html 文件名"""
347 | return self._get_tmp_filename(prefix="Degrees of Lewdity", suffix="mod.html")
348 |
349 | def get_html_polyfill_filename(self) -> str:
350 | """解压出来的兼容游戏本体 html 文件名"""
351 | return self._get_tmp_filename(prefix="Degrees of Lewdity", suffix="polyfill.html")
352 |
353 | @property
354 | def mod_loader_filename(self) -> str:
355 | """下载的自动构建的 modloader 的文件名"""
356 | return self.get_mod_loader_filename()
357 |
358 | @property
359 | def image_pack_filename(self) -> str:
360 | """下载的自动构建的 imagepack 的文件名"""
361 | return self.get_image_pack_filename()
362 |
363 | @property
364 | def i18n_filename(self) -> str:
365 | """下载的自动构建的 i18n 的文件名"""
366 | return self.get_i18n_filename()
367 |
368 | @property
369 | def html_filename(self) -> str:
370 | """解压出来的普通游戏本体 html 文件名"""
371 | return self.get_html_filename()
372 |
373 | @property
374 | def html_polyfill_filename(self) -> str:
375 | """解压出来的兼容游戏本体 html 文件名"""
376 | return self.get_html_polyfill_filename()
377 |
378 | @property
379 | def apk_filename(self) -> str:
380 | """构建好的 apk 的文件名"""
381 | return self._get_dist_filename()
382 |
383 | """ VERSION """
384 | @staticmethod
385 | def get_game_version() -> str:
386 | """游戏本体版本号"""
387 | with open(DIR_GAME / "version", "r", encoding="utf-8") as fp:
388 | return fp.read().strip()
389 |
390 | def get_i18n_version(self) -> str:
391 | """i18n 版本号"""
392 | return self.i18n_filename.rstrip('mod.zip').split("-")[-1]
393 |
394 | def get_mod_loader_version(self) -> str:
395 | """modloader 版本号"""
396 | return self.mod_loader_filename.split("-")[2]
397 |
398 | @property
399 | def game_version(self) -> str:
400 | """游戏本体版本号"""
401 | return self.get_game_version()
402 |
403 | @property
404 | def i18n_version(self) -> str:
405 | """i18n 版本号"""
406 | return self.get_i18n_version()
407 |
408 | @property
409 | def mod_loader_version(self) -> str:
410 | """游戏本体版本号"""
411 | return self.get_mod_loader_version()
412 |
413 |
414 | async def main():
415 | start = time.time()
416 | async with httpx.AsyncClient(verify=False) as client:
417 | with Github(auth=Auth.Token(ACCESS_TOKEN)) as github:
418 | process = ReleaseBuild(github, client)
419 | """ 开始 """
420 | process.clear()
421 |
422 | """ 运行 """ # TODO
423 | # process.trigger_mod_loader()
424 | # process.trigger_i18n()
425 |
426 | """ 下载 """
427 | await process.download_mod_loader()
428 | await process.download_i18n()
429 |
430 | """ 解压 """
431 | process.decompress_mod_loader()
432 |
433 | """ 打包 """
434 | process.rename_image_pack()
435 | process.move_i18n()
436 | process.move_image_pack()
437 | process.build_compress_normal()
438 | process.build_compress_polyfill()
439 | process.build_apk_normal()
440 | process.build_apk_polyfill()
441 | process.rename_pre(flag=False) # 预览版
442 |
443 | """ 发布 """ # TODO
444 | process.release(draft=True)
445 |
446 | cost = time.time() - start
447 | logger.info(f"cost {cost:.2f} seconds")
448 | return cost
449 |
450 | if __name__ == '__main__':
451 | cost = asyncio.run(main())
452 |
453 | try:
454 | from win10toast import ToastNotifier
455 | except ImportError:
456 | pass
457 | else:
458 | ToastNotifier().show_toast(title="RELEASE SCRIPT RUN DONE", msg=f"cost {cost or -1:.2f}s")
459 |
460 |
--------------------------------------------------------------------------------
/src/tools/build_release/run_credits.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import datetime
3 | import os
4 | from pathlib import Path
5 |
6 | from src.log import logger
7 |
8 | import httpx
9 | from urllib.parse import quote
10 | from lxml import etree
11 |
12 | from src.consts import PARATRANZ_TOKEN, GITHUB_ACCESS_TOKEN
13 |
14 | DIR_RELEASE = Path("D:\\Users\\numbersir\\Documents\\GitHub\\Degrees-of-Lewdity-Chinese-Localization")
15 |
16 |
17 | class Credit:
18 | def __init__(self, client: httpx.AsyncClient):
19 | self._client = client
20 |
21 | """ PARATRANZ """
22 | async def build_paratranz_members(self, project_id: int = 4780) -> list[str]:
23 | """Paratranz 有贡献的"""
24 | members = await self._get_paratranz_members(project_id)
25 | return self._filter_scored_paratranz_members(members)
26 |
27 | async def _get_paratranz_members(self, project_id: int = 4780):
28 | url = f"https://paratranz.cn/api/projects/{project_id}/members"
29 | headers = {"Authorization": PARATRANZ_TOKEN}
30 | response = await self.client.get(url, headers=headers)
31 | logger.info("paratranz members Finished")
32 | return response.json()
33 |
34 | def _filter_scored_paratranz_members(self, members: list[dict]) -> list[str]:
35 | return sorted([
36 | f'{member["user"]["username"]}({member["user"]["nickname"]})'
37 | if member["user"].get("nickname")
38 | else f'{member["user"]["username"]}'
39 | for member in members
40 | if member["totalPoints"]
41 | ])
42 |
43 | """ WIKI """
44 | async def build_miraheze_members(self, limit: int = 600):
45 | """中文维基有贡献的"""
46 | members_html = await self._get_miraheze_members(limit)
47 | return self._filter_scored_miraheze_members(members_html)
48 |
49 | async def _get_miraheze_members(self, limit: int = 600):
50 | url = f"https://degreesoflewditycn.miraheze.org/wiki/{quote('特殊:用户列表')}"
51 | params = {
52 | "editsOnly": 1,
53 | "wpFormIdentifier": "mw-listusers-form",
54 | "limit": limit,
55 | }
56 | response = await self.client.get(url, params=params)
57 | logger.info("miraheze members Finished")
58 | return response.text
59 |
60 | def _filter_scored_miraheze_members(self, html: str):
61 | html = etree.HTML(html)
62 | return sorted(html.xpath("//bdi/text()"))
63 |
64 | """ GITHUB """
65 | async def build_issue_members(
66 | self,
67 | owner: str = "Eltirosto",
68 | repo: str = "Degrees-of-Lewdity-Chinese-Localization",
69 | per_page: int = 100,
70 | pages: int = 6,
71 | ):
72 | """有反馈过 issue 的"""
73 | members_data = await self._get_issue_members(owner, repo, per_page, pages)
74 | return sorted(list(set(self._filter_issue_members(members_data))))
75 |
76 | async def _get_issue_members(
77 | self,
78 | owner: str = "Eltirosto",
79 | repo: str = "Degrees-of-Lewdity-Chinese-Localization",
80 | per_page: int = 100,
81 | pages: int = 6,
82 | ) -> list[dict]:
83 | url = f"https://api.github.com/repos/{owner}/{repo}/issues"
84 | headers = {"Authorization": f"Bearer {GITHUB_ACCESS_TOKEN}"}
85 | results = []
86 | for page in range(1, pages + 1):
87 | params = {"state": "all", "per_page": per_page, "page": page}
88 | response = await self.client.get(url, params=params, headers=headers)
89 | results.extend(response.json())
90 | logger.info("issue members Finished")
91 | return results
92 |
93 | def _filter_issue_members(self, members_data: list[dict]):
94 | return [member["user"]["login"] for member in members_data]
95 |
96 | """ PROPERTY """
97 | @property
98 | def client(self):
99 | return self._client
100 |
101 |
102 | async def main():
103 | async with httpx.AsyncClient(verify=False) as client:
104 | credit = Credit(client)
105 | paratranz_members: list[str] = await credit.build_paratranz_members()
106 | miraheze_members: list[str] = await credit.build_miraheze_members()
107 | issue_members: list[str] = await credit.build_issue_members(pages=3)
108 |
109 | paratranz_members: str = "\n- ".join(paratranz_members)
110 | miraheze_members: str = "\n- ".join(miraheze_members)
111 | issue_members: str = "\n- ".join(issue_members)
112 |
113 | time = datetime.datetime.now().strftime("%Y%m%d")
114 | os.makedirs(Path(__file__).parent / "credits", exist_ok=True)
115 |
116 | content = (
117 | "## 欲都孤儿 贡献者名单\n"
118 | f"> {time}\n"
119 | "### 为汉化做出过贡献的诸位(排名不分先后):\n"
120 | "\n"
121 | "点击展开
\n\n"
122 | f"- {paratranz_members}\n\n"
123 | " \n\n"
124 | "### 为建设中文维基提供过贡献的诸位(排名不分先后):\n"
125 | "\n"
126 | "点击展开
\n\n"
127 | f"- {miraheze_members}\n\n"
128 | " \n\n"
129 | "### 为改进汉化内容提供过贡献的诸位(排名不分先后):\n"
130 | "\n"
131 | "点击展开
\n\n"
132 | f"- {issue_members}\n\n"
133 | " \n\n"
134 | "---\n"
135 | "本游戏的汉化版制作、维护与更新属实不易,十分感谢以上不吝提供帮助、做出贡献的诸位。"
136 | )
137 |
138 | with open(Path(__file__).parent / "credits" / "CREDITS.md", "w", encoding="utf-8") as fp:
139 | fp.write(content)
140 |
141 | with open(DIR_RELEASE / "CREDITS.md", "w", encoding="utf-8") as fp:
142 | fp.write(content)
143 |
144 |
145 | if __name__ == "__main__":
146 | asyncio.run(main())
147 |
--------------------------------------------------------------------------------
/src/tools/process_changelog/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NumberSir/vrelnir_localization/526475b4a1e9b23f0708c066ee5d68f955b14b4d/src/tools/process_changelog/__init__.py
--------------------------------------------------------------------------------
/src/tools/process_changelog/main.py:
--------------------------------------------------------------------------------
1 | """把 changelog.txt 处理成 paratranz 能识别的 json 格式"""
2 | import json
3 | import os
4 | from pathlib import Path
5 |
6 | from src.consts import DIR_PARATRANZ
7 |
8 |
9 | VERSION: str | None = None
10 | CHANGELOG_TXT: list | None = None
11 | os.makedirs("data", exist_ok=True)
12 |
13 | def changelog2paratranz(version: str = None):
14 | global VERSION, CHANGELOG_TXT
15 |
16 | with open(Path(__file__).parent.parent.parent.parent / "degrees-of-lewdity-master" / "DoL Changelog.txt", "r", encoding="utf-8") as fp:
17 | lines = fp.readlines()
18 |
19 | current_version = version or lines[0].strip().strip(":").split(",")[0] # 0.4.3.0
20 | current_version_main = ".".join(current_version.split(".")[:-1]) # 0.4.3
21 | for idx, line in enumerate(lines):
22 | line = line.strip()
23 | if not line:
24 | continue
25 | if line[0].isnumeric() and line.strip(":")[-1].isnumeric() and ".".join(line.split(".")[:-1]) != current_version_main:
26 | lines = lines[:idx]
27 | break
28 |
29 | result = [
30 | {
31 | "key": line.strip(),
32 | "original": line.strip(),
33 | "translation": ""
34 | } for line in lines if line.strip()
35 | ]
36 | with open(Path(__file__).parent / "data" / f"{current_version}.json", "w", encoding="utf-8") as fp:
37 | json.dump(result, fp, ensure_ascii=False, indent=2)
38 | VERSION = current_version
39 | CHANGELOG_TXT = lines
40 |
41 |
42 | def paratranz2changelog(version: str = None):
43 | global VERSION, CHANGELOG_TXT
44 | if version is None:
45 | version = VERSION
46 |
47 | downloaded_raw_dir = DIR_PARATRANZ / "common" / "raw" / "更新日志" / f"{version}.json.json"
48 | if not downloaded_raw_dir.exists():
49 | return
50 | with open(downloaded_raw_dir, "r", encoding="utf-8") as fp:
51 | data = json.load(fp)
52 |
53 | data = [
54 | (item["original"], item["translation"])
55 | for item in data
56 | ]
57 | for en, cn in data:
58 | for idx_changelog, line in enumerate(CHANGELOG_TXT):
59 | line = line.strip()
60 | if line == en:
61 | CHANGELOG_TXT[idx_changelog] = f"{cn}\n"
62 | continue
63 | with open(Path(__file__).parent / "data" / f"{version}.txt", "w", encoding="utf-8") as fp:
64 | fp.writelines(CHANGELOG_TXT)
65 |
66 |
67 | if __name__ == "__main__":
68 | changelog2paratranz()
69 | paratranz2changelog()
70 |
--------------------------------------------------------------------------------
/src/tools/process_variables/__init__.py:
--------------------------------------------------------------------------------
1 | from .main import VariablesProcess
2 |
--------------------------------------------------------------------------------
/src/tools/process_variables/main.py:
--------------------------------------------------------------------------------
1 | """
2 | 如:
3 | <>
4 | <>
5 | <>
6 | """
7 | import asyncio
8 | import json
9 | import os
10 | import re
11 | from enum import Enum
12 | from pathlib import Path
13 | from pprint import pprint
14 |
15 | from src import logger
16 | from src.consts import *
17 | from aiofiles import open as aopen
18 |
19 | SELF_ROOT = Path(__file__).parent
20 |
21 | ALL_NEEDED_TRANSLATED_set_CONTENTS = None
22 |
23 | FREQ_FUNCTIONS = {
24 | ".push(",
25 | ".pushUnique(",
26 | ".delete(",
27 | ".deleteAt(",
28 | ".splice("
29 | }
30 |
31 |
32 | class Regexes(Enum):
33 | VARS_REGEX = re.compile("""([$_][$A-Z_a-z][$0-9A-Z_a-z]*)""")
34 |
35 | SET_RUN_REGEXES: re.Pattern = re.compile("""<<(run|set)(?:\s+((?:(?:\/\*[^*]*\*+(?:[^/*][^*]*\*+)*\/)|(?:\/\/.*\n)|(?:`(?:\\.|[^`\\\n])*?`)|(?:"(?:\\.|[^"\\\n])*?")|(?:'(?:\\.|[^'\\\n])*?')|(?:\[(?:[<>]?[Ii][Mm][Gg])?\[[^\r\n]*?\]\]+)|[^>]|(?:>(?!>)))*?))?>>""")
36 |
37 |
38 | class VariablesProcess:
39 | """我再也不会不写注释了"""
40 | def __init__(self):
41 | self._all_file_paths = set() # 所有 .twee 文件绝对路径
42 |
43 | self._categorize_variables = [] # 所有 .twee 文件中的变量分类:{path, variables}
44 | self._all_variables = [] # 所有 .twee 文件中的变量
45 |
46 | self._categorize_all_set_contents = []
47 | self._all_set_contents = []
48 | self._categorize_all_needed_translated_set_contents = []
49 | self._all_needed_translated_set_contents = []
50 |
51 | def fetch_all_file_paths(self) -> set[Path]:
52 | """ 获取所有 .twee 文件绝对路径"""
53 | for root, dir_list, file_list in os.walk(DIR_GAME_TEXTS_COMMON):
54 | for file in file_list:
55 | if file.endswith(SUFFIX_TWEE):
56 | self._all_file_paths.add(Path(root).absolute() / file)
57 | return self._all_file_paths
58 |
59 | async def fetch_all_variables(self) -> None:
60 | """ 获取所有 .twee 文件中存在的变量,写入文件,创建 vars 目录 """
61 | tasks = {
62 | self._fetch_all_variables(file)
63 | for file in self._all_file_paths
64 | }
65 |
66 | await asyncio.gather(*tasks)
67 | os.makedirs(SELF_ROOT / "vars", exist_ok=True)
68 |
69 | with open(SELF_ROOT / "vars" / "_variables.json", "w", encoding="utf-8") as fp:
70 | # self._categorize_variables = sorted(self._categorize_variables)
71 | json.dump(self._categorize_variables, fp, ensure_ascii=False, indent=2)
72 |
73 | with open(SELF_ROOT / "vars" / "_all_variables.json", "w", encoding="utf-8") as fp:
74 | json.dump(sorted(list(set(self._all_variables))), fp, ensure_ascii=False, indent=2)
75 |
76 | async def _fetch_all_variables(self, file: Path):
77 | """ 异步任务用 """
78 | filename = file.name
79 | async with aopen(file, "r", encoding="utf-8") as fp:
80 | raw = await fp.read()
81 | variables = re.findall(Regexes.VARS_REGEX.value, raw)
82 |
83 | if not variables:
84 | return
85 |
86 | self._categorize_variables.append({
87 | "path": str(file).split("\\game\\")[1],
88 | "variables": sorted(list(set(variables)))
89 | })
90 | self._all_variables.extend(list(set(variables)))
91 |
92 | async def build_variables_notations(self):
93 | """ 哪些变量可以翻译,写入文件,暂时弃用 """
94 | filepath = DIR_DATA_ROOT / "json" / "variables_notations.json"
95 |
96 | old_data = {}
97 | if filepath.exists():
98 | with open(filepath, "r", encoding="utf-8") as fp:
99 | old_data: dict = json.load(fp)
100 |
101 | new_data = {
102 | var: {
103 | "var": var,
104 | "desc": "",
105 | "canBeTranslated": False
106 | } for var in self._all_variables
107 | }
108 |
109 | if old_data:
110 | for key, items in old_data.items():
111 | if items["desc"]:
112 | new_data[key] = items
113 |
114 | with open(DIR_DATA_ROOT / "json" / "variables_notations.json", "w", encoding="utf-8") as fp:
115 | json.dump(new_data, fp, ensure_ascii=False, indent=2)
116 |
117 | def fetch_all_set_content(self):
118 | """ 获取所有 <> 内容,写入 setto 目录里,有了就不要再创建了"""
119 | global ALL_NEEDED_TRANSLATED_set_CONTENTS
120 |
121 | if ALL_NEEDED_TRANSLATED_set_CONTENTS:
122 | return ALL_NEEDED_TRANSLATED_set_CONTENTS
123 |
124 | if (SELF_ROOT / "setto" / "_needed_translated_set_contents.json").exists():
125 | with open(SELF_ROOT / "setto" / "_needed_translated_set_contents.json", "r", encoding="utf-8") as fp:
126 | data = json.load(fp)
127 | ALL_NEEDED_TRANSLATED_set_CONTENTS = data
128 | return data
129 |
130 | for file in self._all_file_paths:
131 | self._fetch_all_set_content(file)
132 |
133 | os.makedirs(SELF_ROOT / "setto", exist_ok=True)
134 | with open(SELF_ROOT / "setto" / "_set_contents.json", "w", encoding="utf-8") as fp:
135 | json.dump(self._categorize_all_set_contents, fp, ensure_ascii=False, indent=2)
136 |
137 | ALL_NEEDED_TRANSLATED_set_CONTENTS = self._categorize_all_needed_translated_set_contents
138 | with open(SELF_ROOT / "setto" / "_needed_translated_set_contents.json", "w", encoding="utf-8") as fp:
139 | json.dump(self._categorize_all_needed_translated_set_contents, fp, ensure_ascii=False, indent=2)
140 |
141 | self._all_set_contents = sorted(list(set(self._all_set_contents)))
142 | with open(SELF_ROOT / "setto" / "_all_set_contents.json", "w", encoding="utf-8") as fp:
143 | json.dump(self._all_set_contents, fp, ensure_ascii=False, indent=2)
144 |
145 | self._all_needed_translated_set_contents = sorted(list(set(self._all_needed_translated_set_contents)))
146 | with open(SELF_ROOT / "setto" / "_all_needed_translated_set_contents.json", "w", encoding="utf-8") as fp:
147 | json.dump(self._all_needed_translated_set_contents, fp, ensure_ascii=False, indent=2)
148 |
149 | return self._categorize_all_needed_translated_set_contents
150 |
151 | def _fetch_all_set_content(self, file: Path):
152 | """ 异步任务用 """
153 | filename = file.name
154 | with open(file, "r", encoding="utf-8") as fp:
155 | raw = fp.read()
156 | all_set_contents = re.findall(Regexes.SET_RUN_REGEXES.value, raw)
157 | """
158 | 标准语法:
159 | <>
160 |
161 | 1. 有明显标识符分割的:
162 | - set X to Y
163 | - set X is Y
164 | - set X = Y (+=, -=, *=, /=, %=)
165 | 2. 没有明显标识符分割的:
166 | - set {X:Y, ...}
167 | - set X
168 | - set X[Y]
169 | - set X["Y"] ('', ``)
170 | - set X++ (--)
171 | - set X.FUNC(Y)
172 | """
173 |
174 | if len(all_set_contents) < 2:
175 | return
176 |
177 | all_heads, all_set_contents = [_[0] for _ in all_set_contents], [_[1] for _ in all_set_contents]
178 | var_targets_dict = {}
179 | var_lines_dict = {}
180 | for idx, content in enumerate(all_set_contents):
181 | head = all_heads[idx]
182 | var, target, line = self._process_content(head, content)
183 | if not any({var, target, line}):
184 | continue
185 |
186 | if var in var_targets_dict:
187 | var_targets_dict[var].append(target)
188 | else:
189 | var_targets_dict[var] = [target]
190 |
191 | if var in var_lines_dict:
192 | var_lines_dict[var].append(line)
193 | else:
194 | var_lines_dict[var] = [line]
195 |
196 | self._categorize_all_set_contents.append({
197 | "path": file.__str__(),
198 | "vars": [
199 | {"var": var, "targets": targets, "lines": lines}
200 | for (var, targets), (var_, lines) in zip(
201 | var_targets_dict.items(),
202 | var_lines_dict.items()
203 | )
204 | ]
205 | })
206 |
207 | all_set_contents = [
208 | f"set {content}"
209 | for content in all_set_contents
210 | ]
211 | self._all_set_contents.extend(list(set(all_set_contents)))
212 |
213 | vars_ = []
214 | for (var, targets), (var_, lines) in zip(
215 | var_targets_dict.items(),
216 | var_lines_dict.items()
217 | ):
218 | targets_ = [
219 | target
220 | for target in targets
221 | if self.is_needed_translated(target)
222 | ]
223 | lines_ = [
224 | lines[idx]
225 | for idx, target in enumerate(targets)
226 | if self.is_needed_translated(target)
227 | ]
228 | if not targets_:
229 | continue
230 | vars_.append({"var": var, "targets": targets_, "lines": lines_})
231 |
232 | if vars_:
233 | self._categorize_all_needed_translated_set_contents.append({
234 | "path": file.__str__(),
235 | "vars": vars_
236 | })
237 |
238 | vars_needed_translated = {var_item["var"] for var_item in vars_}
239 | self._all_needed_translated_set_contents.extend(list(set([
240 | content
241 | for content in all_set_contents
242 | if content.split(" ")[1] in vars_needed_translated
243 | ])))
244 |
245 | def _process_content(self, head: str, content: str):
246 | """
247 | :param content: <>
248 | """
249 | content: str
250 |
251 | var = content
252 | target = content
253 |
254 | # 1. 一定不是字符串的
255 | if content.endswith("++") or content.endswith("--"):
256 | return None, None, None
257 | elif "Time.set" in content:
258 | return None, None, None
259 |
260 | # 2. 有明显分隔符的
261 | elif re.findall(r"\sto", content):
262 | var, target = re.split(r"\sto", content, 1)
263 | elif re.findall(r"[+\-*/%]*=", content):
264 | var, target = re.split(r"[+\-*/%]*=", content, 1)
265 | elif re.findall(r"\sis\s", content):
266 | var, target = re.split(r"\sis\s", content, 1)
267 |
268 | # 3. 纯函数/纯变量
269 | # 数量比较多的
270 | elif any(
271 | f in content
272 | for f in FREQ_FUNCTIONS
273 | ):
274 | for f_ in FREQ_FUNCTIONS:
275 | if f_ not in content:
276 | continue
277 | vars_ = re.findall(Regexes.VARS_REGEX.value, content)
278 | if not vars_:
279 | return None, None, None
280 | var = vars_[0]
281 | target = content.split(f_)[-1]
282 | break
283 |
284 | # 括号包起来的就是 target
285 | elif "(" in content:
286 | vars_ = re.findall(Regexes.VARS_REGEX.value, content)
287 | if not vars_:
288 | return None, None, None
289 | var = vars_[0]
290 | target = "(".join(content.split("(")[1:]).rstrip(")")
291 | # 没括号,纯变量
292 | else:
293 | vars_ = re.findall(Regexes.VARS_REGEX.value, content)
294 | if not vars_:
295 | return None, None, None
296 | var = vars_[0]
297 | target = content
298 |
299 | var = var.strip()
300 | target = target.strip()
301 | line = f"<<{head} {content}>>"
302 |
303 | if target.isnumeric():
304 | target = float(target)
305 | elif target in {"true", "false"}:
306 | target = target == "true"
307 | elif target == "null":
308 | target = None
309 |
310 | return var, target, line
311 |
312 | @staticmethod
313 | def is_needed_translated(target: str):
314 | return (
315 | False
316 | if target is None
317 | else not isinstance(target, (float, bool))
318 | )
319 |
320 |
321 | def main():
322 | var = VariablesProcess()
323 | var.fetch_all_file_paths()
324 | var.fetch_all_set_content()
325 | # await var.fetch_all_variables()
326 | # await var.build_variables_notations()
327 |
328 | if __name__ == '__main__':
329 | # line = "$_clothes[$_revealType].pushUnique($_wornClothing.name)"
330 | # def test():
331 | # vp = VariablesProcess()
332 | # vp.fetch_all_file_paths()
333 | # return vp._process_content(line, {}, {})
334 | #
335 | # result = test()
336 | # pprint(result)
337 | main()
338 |
339 | __all__ = [
340 | "VariablesProcess"
341 | ]
342 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from pathlib import Path
3 | from src.parse_text import ParseTextTwee, ParseTextJS
4 |
5 | FILE_BASE = r"E:\Users\numbersir\Documents\GitHub\vrelnir_localization\degrees-of-lewdity-master\game"
6 | FILE_NAME = r"base-clothing/clothing-handheld.js"
7 | FILE_PATH = Path(rf"{FILE_BASE}/{FILE_NAME}")
8 |
9 | with open(FILE_PATH, "r", encoding="utf-8") as fp:
10 | CONTENT = fp.read()
11 | with open(FILE_PATH, "r", encoding="utf-8") as fp:
12 | LINES = fp.readlines()
13 | PT = (
14 | ParseTextTwee(lines=LINES, filepath=FILE_PATH)
15 | if FILE_PATH.name.endswith("twee")
16 | else ParseTextJS(lines=LINES, filepath=FILE_PATH)
17 | )
18 |
19 |
20 | async def test_fetch_lines():
21 | # sourcery skip: no-conditionals-in-tests
22 | # sourcery skip: no-loop-in-tests
23 | """抓了哪些行"""
24 | bl = PT.parse()
25 | if FILE_PATH.name.endswith("twee"):
26 | pre_bool_list = PT.pre_parse_set_run(False)
27 | if not pre_bool_list:
28 | bl = [
29 | bool(line)
30 | for idx, line in enumerate(bl)
31 | ]
32 | else:
33 | bl = [
34 | True
35 | if pre_bool_list[idx] or line
36 | else False for idx, line in enumerate(bl)
37 | ]
38 | # print(f"bool: {len(bl)} - lines: {len(LINES)} - pre: {len(pre_bool_list)}")
39 | for idx, line in enumerate(LINES):
40 | if bl[idx]:
41 | # ...
42 | print(f"{idx + 1}: {line}", end="")
43 |
44 |
45 | async def test_fetch_pos():
46 | # sourcery skip: no-conditionals-in-tests
47 | # sourcery skip: no-loop-in-tests
48 | """抓的位置对不对"""
49 | able_lines = PT.parse()
50 | pre_bool_list = PT.pre_parse_set_run()
51 | able_lines = [
52 | True if pre_bool_list[idx] or line else False
53 | for idx, line in enumerate(able_lines)
54 | ]
55 | passage_name = None
56 | pos_relative = None
57 | pos_global = 0
58 | for idx, line in enumerate(LINES):
59 | if line.startswith("::"):
60 | pos_relative = 0
61 | tmp_ = line.lstrip(":: ")
62 | if "[" not in line:
63 | passage_name = tmp_.strip()
64 | else:
65 | for idx_, char in enumerate(tmp_):
66 | if char != "[":
67 | continue
68 | passage_name = tmp_[: idx_ - 1].strip()
69 | break
70 | else:
71 | raise
72 |
73 | if able_lines[idx]:
74 | pos_start = 0
75 | if line != line.lstrip(): # 前面的 \t \s 也要算上
76 | for char in line:
77 | if char == line.strip()[0]:
78 | break
79 | pos_start += 1
80 | print(f"passage: {passage_name}")
81 | print(f"line: {line}".replace("\t", "\\t").replace("\n", "\\n"))
82 | print(
83 | f"pos: {pos_relative + pos_start if pos_relative is not None else pos_global + pos_start}"
84 | )
85 | print(
86 | f"global: {pos_global + pos_start}: {len(CONTENT)} | {CONTENT[pos_global + pos_start]}\n"
87 | )
88 | if pos_relative is not None and not line.startswith("::"):
89 | pos_relative += len(line)
90 | pos_global += len(line)
91 |
92 |
93 | if __name__ == "__main__":
94 | asyncio.run(test_fetch_lines())
95 |
--------------------------------------------------------------------------------