├── browserslist
├── .npmrc
├── .gitignore
├── bin.js
├── .npmignore
├── babel.config.js
├── README.md
├── src
├── extractImageClass.js
├── transformPageMeta.js
├── transformClassSize.js
├── transformFontSize.js
├── index.js
└── utils
│ └── wxmlParser.ts
└── package.json
/browserslist:
--------------------------------------------------------------------------------
1 | node >= 12
2 |
--------------------------------------------------------------------------------
/.npmrc:
--------------------------------------------------------------------------------
1 | registry=https://registry.npmjs.org
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | .DS_Store
3 | dist
4 |
--------------------------------------------------------------------------------
/bin.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | require('./dist/index')
--------------------------------------------------------------------------------
/.npmignore:
--------------------------------------------------------------------------------
1 | src
2 | babel.config.js
3 | browserslist
4 | yarn.lock
5 | **/__tests__
6 |
--------------------------------------------------------------------------------
/babel.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | presets: ["@babel/preset-typescript", "@babel/preset-env"],
3 | };
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # miniprogram-elder-transform
2 | > 小程序适老化自动适配工具
3 |
4 | ## 注意
5 | 本适老化适配工具不能覆盖所有场景,经过本工具转换后仍需要进行测试和手动适配,以符合产品预期
6 |
7 | ## 使用
8 |
9 | ```bash
10 | # 转换当前目录下小程序源码,需要保证 app.json 在目录下
11 | npx miniprogram-elder-transform .
12 | ```
13 |
14 | ## 原理
15 |
16 | 转换工具会首先自动给页面`wxml`加上 ``,从而将用户设置的字体大小作用到小程序的 `rem` 大小单位。
17 |
18 | 之后,转换工具会将`wxss`中的字体大小,行高,图片宽高等样式,转换为根据`rem`缩放的形式:
19 |
20 | 如转换前`wxss`样式为 `font-size: 14px;`,则转换后为 `font-size: calc(14px + 0.5 * (1rem - 16px));`。
21 |
--------------------------------------------------------------------------------
/src/extractImageClass.js:
--------------------------------------------------------------------------------
1 | import * as wxmlParser from "./utils/wxmlParser";
2 |
3 | /**
4 | * 提取 wxml 中图片对应的 wxss className
5 | * @param {string} source
6 | * @param {string} filename
7 | * @returns {Promise}
8 | */
9 | export default async function extractImageClass(source, filename) {
10 | const wxmlAst = wxmlParser.parse(filename, source);
11 | const imageClasses = [];
12 | wxmlParser.walk(wxmlAst, {
13 | begin(path) {
14 | if (
15 | path.node.type !== "element" ||
16 | !["image", "icon"].includes(path.node.tag)
17 | )
18 | return;
19 | let classAttr;
20 | path.node.attrs.forEach((attr) => {
21 | if (attr.name === "class") classAttr = attr;
22 | });
23 | if (!classAttr || !classAttr.value || /\{\{/.test(classAttr.value))
24 | return;
25 | imageClasses.push(...classAttr.value.split(" "));
26 | },
27 | });
28 |
29 | return imageClasses;
30 | }
31 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "miniprogram-elder-transform",
3 | "version": "0.0.4",
4 | "description": "miniprogram transformer for elder",
5 | "main": "index.js",
6 | "bin": {
7 | "miniprogram-elder-transform": "bin.js"
8 | },
9 | "engines": {
10 | "node": ">= 12"
11 | },
12 | "scripts": {
13 | "build": "babel src --extensions '.js,.ts' --out-dir dist --ignore **/__tests__ --verbose",
14 | "test": "echo \"Error: no test specified\" && exit 1",
15 | "prepublishOnly": "npm run build"
16 | },
17 | "repository": {
18 | "type": "git",
19 | "url": "git@github.com:wechat-miniprogram/miniprogram-elder-transform.git"
20 | },
21 | "author": "chyizheng",
22 | "license": "ISC",
23 | "dependencies": {
24 | "fast-glob": "^3.2.7",
25 | "postcss": "^8.3.5",
26 | "postcss-selector-parser": "^6.0.6",
27 | "postcss-value-parser": "^4.1.0",
28 | "source-map": "^0.7.3",
29 | "yargs": "^17.1.1"
30 | },
31 | "devDependencies": {
32 | "@babel/cli": "^7.14.8",
33 | "@babel/core": "^7.14.8",
34 | "@babel/preset-env": "^7.14.9",
35 | "@babel/preset-typescript": "^7.14.5"
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/src/transformPageMeta.js:
--------------------------------------------------------------------------------
1 | import * as wxmlParser from "./utils/wxmlParser";
2 |
3 | /**
4 | * 转换 wxml 的 设置
5 | * @param {string} source
6 | * @param {string} filename
7 | */
8 | export default async function transformPageMeta(source, filename) {
9 | const ast = wxmlParser.parse(filename, source);
10 |
11 | let rootPath;
12 | let pageMeta;
13 | wxmlParser.walk(ast, {
14 | begin(path) {
15 | if (!rootPath) rootPath = path;
16 | if (path.node.type !== "element" || path.node.tag !== "page-meta") return;
17 | pageMeta = path.node;
18 | },
19 | });
20 |
21 | if (!pageMeta) {
22 | // 顶部插入一个
23 | pageMeta = {
24 | type: "element",
25 | tag: "page-meta",
26 | attrs: [],
27 | children: [],
28 | };
29 | rootPath.insertBefore(pageMeta);
30 | rootPath.insertBefore({ type: "text", text: "\n" }); // 插入换行
31 | }
32 |
33 | let hasRootFontSize = false;
34 | pageMeta.attrs.forEach((attr) => {
35 | if (attr.name === "root-font-size") {
36 | hasRootFontSize = true;
37 |
38 | if (attr.value !== "system") {
39 | console.warn(
40 | `cannot change in ${filename}`
41 | );
42 | } else {
43 | attr.value = "system";
44 | }
45 | }
46 | });
47 |
48 | // 给 添加一个 root-font-size
49 | if (!hasRootFontSize) {
50 | pageMeta.attrs.push({
51 | name: "root-font-size",
52 | value: "system",
53 | });
54 | }
55 |
56 | const { code } = wxmlParser.codegen(ast);
57 | return code;
58 | }
59 |
--------------------------------------------------------------------------------
/src/transformClassSize.js:
--------------------------------------------------------------------------------
1 | import postcss from "postcss";
2 | import selectorParser from "postcss-selector-parser";
3 | import valueParser from "postcss-value-parser";
4 |
5 | /**
6 | * 转换 wxss 中的特定 class 宽高插件
7 | * @return {import('postcss').Plugin}
8 | */
9 | const transformClassSizePlugin = ({classNames = []} = {}) => {
10 | return {
11 | postcssPlugin: "transform-class-size",
12 | Once(root, { result }) {
13 | root.walkRules(rule => {
14 | let selectorMatch
15 | // 判断 selector 是否命中对应 className
16 | selectorParser(selectors => {
17 | selectorMatch = selectors.every(selector => {
18 | let lastClassNode
19 | selector.walkClasses(node => { lastClassNode = node })
20 | return lastClassNode && classNames.includes(lastClassNode.value)
21 | })
22 | }).processSync(rule.selector)
23 |
24 | if (!selectorMatch) return
25 |
26 |
27 | const factor = 0.5
28 | rule.walkDecls(/^(width|height)$/, decl => {
29 | // 必须要带单位才进行转换
30 | const unit = valueParser.unit(decl.value);
31 | if (!unit) return;
32 | decl.value = `calc(${decl.value} + ${factor} * (1rem - 16px))`;
33 | })
34 | });
35 | },
36 | };
37 | };
38 |
39 | /**
40 | * @param {string} source
41 | * @param {string} filename
42 | * @return {Promise}
43 | */
44 | export default async function transformClassSize(source, classNames, filename) {
45 | const { css } = await postcss([transformClassSizePlugin({classNames})]).process(source, {
46 | from: undefined,
47 | });
48 | return css;
49 | }
50 |
--------------------------------------------------------------------------------
/src/transformFontSize.js:
--------------------------------------------------------------------------------
1 | import postcss from "postcss";
2 | import valueParser from "postcss-value-parser";
3 |
4 | /**
5 | * 转换 wxss 中的 font-size 插件
6 | * @return {import('postcss').Plugin}
7 | */
8 | const transformFontSizePlugin = (options = {}) => {
9 | return {
10 | postcssPlugin: "transform-font-size",
11 | Once(root, { result }) {
12 | root.walkRules((rule) => {
13 | let fontSizeValue;
14 | const factor = 0.5;
15 | rule.walkDecls("font-size", (decl) => {
16 | // font-size 必须要带单位才进行转换
17 | const unit = valueParser.unit(decl.value);
18 | if (!unit) return;
19 |
20 | fontSizeValue = decl.value;
21 | decl.value = `calc(${decl.value} + ${factor} * (1rem - 16px))`;
22 | });
23 | if (!fontSizeValue) return;
24 |
25 | rule.walkDecls((decl) => {
26 | if (
27 | !["height", "line-height", "min-height", "max-height"].includes(
28 | decl.prop
29 | )
30 | )
31 | return;
32 |
33 | // line-height 必须要带单位才进行转换
34 | const unit = valueParser.unit(decl.value);
35 | if (!unit) return;
36 |
37 | decl.value = `calc(${decl.value} + ${factor} * (1rem - 16px))`;
38 | });
39 | });
40 | },
41 | };
42 | };
43 |
44 | /**
45 | * @param {string} source
46 | * @param {string} filename
47 | * @return {Promise}
48 | */
49 | export default async function transformFontSize(source, filename) {
50 | const { css } = await postcss([transformFontSizePlugin()]).process(source, {
51 | from: undefined,
52 | });
53 | return css;
54 | }
55 |
--------------------------------------------------------------------------------
/src/index.js:
--------------------------------------------------------------------------------
1 | import glob from "fast-glob";
2 | import transformFontSize from "./transformFontSize";
3 | import transformPageMeta from "./transformPageMeta";
4 | import transformClassSize from "./transformClassSize";
5 | import extractImageClass from "./extractImageClass";
6 | import fs from "fs";
7 | import path from "path";
8 | import yargs from "yargs";
9 |
10 | async function transformWxss(cwd) {
11 | const entries = await glob("**/*.wxss", { cwd, absolute: true });
12 | for (const entry of entries) {
13 | const source = await fs.promises.readFile(entry, "utf8");
14 | const output = await transformFontSize(source, entry);
15 | await fs.promises.writeFile(entry, output, "utf8");
16 | }
17 | }
18 |
19 | async function transformPagesWxml(cwd) {
20 | const appJsonPath = path.join(cwd, "app.json");
21 | if (!fs.existsSync(appJsonPath)) return;
22 | const appJson = JSON.parse(await fs.promises.readFile(appJsonPath, "utf8"));
23 | const subpackages = appJson.subpackages || [];
24 | const subPages = subpackages.reduce((arr, item) => {
25 | const res = (item.pages || []).map((i) => item.root + i);
26 | return arr.concat(res);
27 | }, []);
28 | const pages = [...appJson.pages, ...subPages];
29 | for (const page of pages) {
30 | const entry = path.join(
31 | cwd,
32 | (page[0] === "/" ? "." + page : page) + ".wxml"
33 | );
34 | const source = await fs.promises.readFile(entry, "utf8");
35 | const output = await transformPageMeta(source, entry);
36 | await fs.promises.writeFile(entry, output, "utf8");
37 | }
38 | }
39 |
40 | async function transformImageSize(cwd) {
41 | const wxmlEntries = await glob("**/*.wxml", { cwd, absolute: true });
42 | const allImageClasses = [];
43 | for (const wxmlEntry of wxmlEntries) {
44 | const wxmlSource = await fs.promises.readFile(wxmlEntry, "utf8");
45 | const imageClasses = await extractImageClass(wxmlSource, wxmlEntry);
46 |
47 | allImageClasses.push(...imageClasses);
48 | }
49 |
50 | const wxssEntries = await glob("**/*.(wxss|css|less|sass|scss)", {
51 | cwd,
52 | absolute: true,
53 | });
54 | for (const wxssEntry of wxssEntries) {
55 | const wxssSource = await fs.promises.readFile(wxssEntry, "utf8");
56 | const output = await transformClassSize(
57 | wxssSource,
58 | allImageClasses,
59 | wxssEntry
60 | );
61 | await fs.promises.writeFile(wxssEntry, output, "utf8");
62 | }
63 | }
64 |
65 | const argv = yargs(process.argv.slice(2)).usage(
66 | "$0 ",
67 | "transform mini program source code",
68 | (yargs) => {
69 | yargs.positional("baseDir", {
70 | describe: "directory to transform",
71 | type: "string",
72 | coerce: (baseDir) => path.join(process.cwd(), baseDir || ""),
73 | });
74 | }
75 | ).argv;
76 |
77 | async function main() {
78 | try {
79 | await transformWxss(argv.baseDir);
80 | await transformPagesWxml(argv.baseDir);
81 | await transformImageSize(argv.baseDir);
82 | } catch (e) {
83 | console.error(e);
84 | process.exit(1);
85 | }
86 | }
87 |
88 | main();
89 |
--------------------------------------------------------------------------------
/src/utils/wxmlParser.ts:
--------------------------------------------------------------------------------
1 | import { SourceNode, SourceMapGenerator, SourceMapConsumer } from "source-map";
2 |
3 | // 正则声明
4 | const startTagReg =
5 | /^<([-A-Za-z0-9_]+)((?:\s+[-A-Za-z0-9_:@.#]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)>/i;
6 | const endTagReg = /^<\/([-A-Za-z0-9_]+)[^>]*>/i;
7 | const attrReg =
8 | /^([-A-Za-z0-9_:@.#]+)(?:(\s*=\s*)(?:(?:(")((?:\\.|[^"])*)")|(?:(')((?:\\.|[^'])*)')|([^>\s]+)))?/i;
9 | const commentReg = /^/;
10 | const spaceReg = /^\s*/;
11 | const textReg = /^[^<]+/;
12 |
13 | // 空元素
14 | const voidSet: Set = new Set([]);
15 |
16 | // 可能包含任意内容的元素
17 | const rawTextSet: Set = new Set(["wxs"]);
18 |
19 | interface Position {
20 | line: number;
21 | column: number;
22 | file: string;
23 | source: string;
24 | }
25 |
26 | type PositionMapper = (start: number, length: number) => Position;
27 |
28 | function getPositionMapper(fileName: string, content: string): PositionMapper {
29 | const lines: number[] = []; // lins[i] 代表 i 位置对应行号
30 | const columns: number[] = []; // column[i] 代表 i 位置对应行的起始列号
31 |
32 | let col = 0;
33 | let lin = 1;
34 | const l = content.length;
35 | for (let i = 0; i < l; ++i) {
36 | lines[i] = lin;
37 | columns[i] = col;
38 | if (content[i] === "\n") {
39 | lin += 1;
40 | col = i + 1;
41 | }
42 | }
43 |
44 | return (start, length) => {
45 | return {
46 | line: lines[start],
47 | column: start - columns[start],
48 | file: fileName,
49 | source: content.substr(start, length),
50 | };
51 | };
52 | }
53 |
54 | const DIRTY = Symbol("dirty");
55 |
56 | function isDirtySourceNode(
57 | chunks: string | SourceNode | (string | SourceNode)[]
58 | ): boolean {
59 | return Array.isArray(chunks)
60 | ? chunks.some((chunk) => isDirtySourceNode(chunk))
61 | : chunks[DIRTY];
62 | }
63 |
64 | function posToSourceNode(
65 | pos: Position | undefined,
66 | chunks: string | SourceNode | (string | SourceNode)[] = []
67 | ): any {
68 | if (!Array.isArray(chunks)) chunks = [chunks];
69 | const dirty = !pos?.source || isDirtySourceNode(chunks);
70 | const node = new SourceNode(
71 | pos?.line ?? null,
72 | pos?.column ?? null,
73 | pos?.file ?? null,
74 | dirty ? chunks : pos!.source
75 | );
76 | node[DIRTY] = dirty;
77 | return node;
78 | }
79 |
80 | class Stack extends Array {
81 | constructor(arr: T[] = []) {
82 | super(...arr);
83 | }
84 |
85 | top() {
86 | return this[this.length - 1];
87 | }
88 | }
89 |
90 | interface TextToken {
91 | type: "text";
92 | raw: boolean;
93 | text: string;
94 | pos?: Position;
95 | }
96 |
97 | interface CommentToken {
98 | type: "comment";
99 | content: {
100 | val: string;
101 | pos?: Position;
102 | };
103 | pos?: Position;
104 | }
105 |
106 | interface AttributeToken {
107 | type: "attribute";
108 | name: {
109 | val: string;
110 | pos?: Position;
111 | };
112 | value?: {
113 | val: string;
114 | pos?: Position;
115 | };
116 | pos?: Position;
117 | }
118 |
119 | interface StartTagToken {
120 | type: "startTag";
121 | tag: {
122 | val: string;
123 | pos?: Position;
124 | };
125 | attrs: AttributeToken[];
126 | unary: boolean;
127 | pos?: Position;
128 | }
129 |
130 | interface EndTagToken {
131 | type: "endTag";
132 | tag: {
133 | val: string;
134 | pos?: Position;
135 | };
136 | pos?: Position;
137 | }
138 |
139 | type Token = TextToken | CommentToken | StartTagToken | EndTagToken;
140 |
141 | function tokenizeComment(
142 | content: string,
143 | start: number,
144 | tokens: Token[],
145 | positionMapper: PositionMapper
146 | ) {
147 | const match = content.match(commentReg);
148 |
149 | if (!match) return;
150 |
151 | const all = match[0];
152 | const text = match[1];
153 |
154 | tokens.push({
155 | type: "comment",
156 | content: {
157 | val: text,
158 | pos: positionMapper(start + 4, text.length),
159 | },
160 | pos: positionMapper(start, all.length),
161 | });
162 |
163 | return all.length;
164 | }
165 |
166 | function tokenizeRawText(
167 | tagName: string,
168 | content: string,
169 | start: number,
170 | tokens: Token[],
171 | positionMapper: PositionMapper
172 | ) {
173 | const match = content.match(
174 | new RegExp(`^(?:[^<]+|(?:<(?!/${tagName}[^>]*>)))+`)
175 | );
176 |
177 | if (!match) return;
178 |
179 | const text = match[0];
180 |
181 | tokens.push({
182 | type: "text",
183 | raw: true,
184 | text,
185 | pos: positionMapper(start, text.length),
186 | });
187 |
188 | return text.length;
189 | }
190 |
191 | function tokenizeRawTextEndTag(
192 | tagName: string,
193 | content: string,
194 | start: number,
195 | tokens: Token[],
196 | positionMapper: PositionMapper
197 | ) {
198 | const match = content.match(new RegExp(`^${tagName}[^>]*>`));
199 |
200 | if (!match) return;
201 |
202 | const all = match[0];
203 |
204 | tokens.push({
205 | type: "endTag",
206 | tag: {
207 | val: tagName,
208 | pos: positionMapper(start + 2, tagName.length),
209 | },
210 | pos: positionMapper(start, all.length),
211 | });
212 |
213 | return all.length;
214 | }
215 |
216 | function tokenizeEndTag(
217 | content: string,
218 | start: number,
219 | tokens: Token[],
220 | positionMapper: PositionMapper
221 | ) {
222 | const match = content.match(endTagReg);
223 |
224 | if (!match) return;
225 |
226 | const all = match[0];
227 | const tagName = match[1];
228 |
229 | tokens.push({
230 | type: "endTag",
231 | tag: {
232 | val: tagName,
233 | pos: positionMapper(start + 2, tagName.length),
234 | },
235 | pos: positionMapper(start, all.length),
236 | });
237 |
238 | return all.length;
239 | }
240 |
241 | function tokenizeStartTag(
242 | content: string,
243 | start: number,
244 | tokens: Token[],
245 | positionMapper: PositionMapper
246 | ) {
247 | const match = content.match(startTagReg);
248 |
249 | if (!match) return;
250 |
251 | const all = match[0];
252 | const tagName = match[1];
253 | const attrString = match[2];
254 | const unary = voidSet.has(tagName) || !!match[3];
255 |
256 | const attrs = tokenizeAttrs(
257 | attrString,
258 | start + 1 + tagName.length,
259 | positionMapper
260 | );
261 |
262 | tokens.push({
263 | type: "startTag",
264 | tag: {
265 | val: tagName,
266 | pos: positionMapper(start + 1, tagName.length),
267 | },
268 | attrs,
269 | unary,
270 | pos: positionMapper(start, all.length),
271 | });
272 |
273 | return all.length;
274 | }
275 |
276 | function tokenizeText(
277 | content: string,
278 | start: number,
279 | tokens: Token[],
280 | positionMapper: PositionMapper
281 | ) {
282 | const match = content.match(textReg);
283 |
284 | if (!match) return;
285 |
286 | const text = match[0];
287 |
288 | tokens.push({
289 | type: "text",
290 | raw: false,
291 | text,
292 | pos: positionMapper(start, text.length),
293 | });
294 |
295 | return text.length;
296 | }
297 |
298 | function tokenizeAttr(
299 | content: string,
300 | start: number,
301 | tokens: AttributeToken[],
302 | positionMapper: PositionMapper
303 | ) {
304 | const match = content.match(attrReg);
305 |
306 | if (!match) return;
307 |
308 | const all = match[0];
309 | const nameStr = match[1];
310 | const equal = match[2];
311 | const quote = match[3] || match[5] || "";
312 | const valueStr = match[4] ?? match[6] ?? match[7];
313 |
314 | const name: AttributeToken["name"] = {
315 | val: nameStr,
316 | pos: positionMapper(start, nameStr.length),
317 | };
318 |
319 | let value: AttributeToken["value"];
320 | if (typeof valueStr === "string") {
321 | const valueStart = start + nameStr.length + equal.length + quote.length;
322 | value = {
323 | val: valueStr,
324 | pos: positionMapper(valueStart, valueStr.length),
325 | };
326 | }
327 |
328 | tokens.push({
329 | type: "attribute",
330 | name,
331 | value,
332 | pos: positionMapper(start, all.length),
333 | });
334 |
335 | return all.length;
336 | }
337 |
338 | function tokenizeSpace(
339 | content: string,
340 | start: number,
341 | tokens: AttributeToken[],
342 | positionMapper: PositionMapper
343 | ) {
344 | const match = content.match(spaceReg);
345 |
346 | if (!match) return;
347 |
348 | return match[0].length;
349 | }
350 |
351 | function tokenizeAttrs(
352 | content: string,
353 | start: number,
354 | positionMapper: PositionMapper
355 | ) {
356 | const tokens: AttributeToken[] = [];
357 |
358 | while (content.length) {
359 | const offset =
360 | tokenizeAttr(content, start, tokens, positionMapper) ||
361 | tokenizeSpace(content, start, tokens, positionMapper);
362 |
363 | if (!offset) {
364 | throw new Error("unexpected token " + content);
365 | }
366 |
367 | start += offset;
368 | content = content.substring(offset);
369 | }
370 |
371 | return tokens;
372 | }
373 |
374 | /**
375 | * 分词,将 wxml 切分为 Token
376 | */
377 | export function tokenize(fileName: string, content: string): Token[] {
378 | const tokens: Token[] = [];
379 | let start = 0;
380 |
381 | const positionMapper = getPositionMapper(fileName, content);
382 |
383 | while (content.length) {
384 | const lastToken = tokens[tokens.length - 1];
385 |
386 | let offset: number | undefined;
387 |
388 | if (
389 | lastToken &&
390 | lastToken.type === "startTag" &&
391 | rawTextSet.has(lastToken.tag.val) &&
392 | !lastToken.unary
393 | ) {
394 | // 如果是包含任意元素的 tag,则只解析 text 和自己的 end tag
395 | offset =
396 | tokenizeRawText(
397 | lastToken.tag.val,
398 | content,
399 | start,
400 | tokens,
401 | positionMapper
402 | ) ||
403 | tokenizeRawTextEndTag(
404 | lastToken.tag.val,
405 | content,
406 | start,
407 | tokens,
408 | positionMapper
409 | );
410 | } else {
411 | offset =
412 | tokenizeComment(content, start, tokens, positionMapper) ||
413 | tokenizeEndTag(content, start, tokens, positionMapper) ||
414 | tokenizeStartTag(content, start, tokens, positionMapper) ||
415 | tokenizeText(content, start, tokens, positionMapper);
416 | }
417 |
418 | if (!offset) {
419 | throw new Error("unexpected token " + content);
420 | }
421 |
422 | start += offset;
423 | content = content.substring(offset);
424 | }
425 |
426 | return tokens;
427 | }
428 |
429 | export type WxmlNode = WxmlText | WxmlComment | WxmlElement;
430 |
431 | export type WxmlTree = WxmlNode[];
432 |
433 | interface WxmlText {
434 | type: "text";
435 | raw?: boolean;
436 | text: string;
437 | token?: TextToken;
438 | }
439 |
440 | interface WxmlComment {
441 | type: "comment";
442 | content: string;
443 | token?: CommentToken;
444 | }
445 |
446 | interface WxmlElement {
447 | type: "element";
448 | tag: string;
449 | attrs: { name: string; value?: string; token?: AttributeToken }[];
450 | startTagToken?: StartTagToken;
451 | endTagToken?: EndTagToken;
452 | children: WxmlTree;
453 | }
454 |
455 | /**
456 | * 语法解析,将 wxml 分词后解析为 Wxml 节点树
457 | */
458 | export function parse(fileName: string, wxml: string): WxmlTree {
459 | const root = {
460 | type: "root" as const,
461 | children: [] as WxmlTree,
462 | };
463 | const stack = new Stack([root]);
464 |
465 | const tokens = tokenize(fileName, wxml);
466 |
467 | for (const token of tokens) {
468 | switch (token.type) {
469 | case "startTag": {
470 | const elem: WxmlElement = {
471 | type: "element",
472 | tag: token.tag.val,
473 | attrs: token.attrs.map((token) => ({
474 | name: token.name.val,
475 | value: token.value?.val,
476 | token,
477 | })),
478 | startTagToken: token,
479 | children: [],
480 | };
481 | stack.top().children.push(elem);
482 | if (!token.unary) stack.push(elem);
483 | break;
484 | }
485 | case "endTag": {
486 | let top = stack.top();
487 | // 关闭一个 tag 时,找到最近一个对应的 start tag
488 | while (
489 | (top = stack.top()) &&
490 | top.type === "element" &&
491 | top.tag !== token.tag.val
492 | ) {
493 | stack.pop();
494 | }
495 |
496 | if (top.type !== "element") {
497 | // 没有找到对应的 start tag
498 | throw new Error("unexpected end tag " + token.tag.val);
499 | }
500 |
501 | top.endTagToken = token;
502 | stack.pop();
503 | break;
504 | }
505 | case "comment": {
506 | stack.top().children.push({
507 | type: "comment",
508 | content: token.content.val,
509 | token,
510 | });
511 | break;
512 | }
513 | case "text": {
514 | const text = token.text;
515 | if (!text) break;
516 |
517 | // if (stack.top().type === 'root') {
518 | // // 不能在根节点有 text
519 | // throw new Error('unexpected text ' + token.text)
520 | // }
521 | stack.top().children.push({
522 | type: "text",
523 | raw: token.raw,
524 | text: token.text,
525 | token,
526 | });
527 | break;
528 | }
529 | }
530 | }
531 |
532 | return root.children;
533 | }
534 |
535 | export interface WxmlPath {
536 | node: WxmlNode;
537 | replace(node: WxmlNode): void;
538 | insertBefore(node: WxmlNode): void;
539 | insertAfter(node: WxmlNode): void;
540 | }
541 |
542 | function getPath(parent: WxmlTree, index: number, node: WxmlNode): WxmlPath {
543 | return {
544 | node,
545 | replace(node: WxmlNode) {
546 | parent[index] = node;
547 | },
548 | insertBefore(node: WxmlNode) {
549 | parent.splice(index, 0, node);
550 | index += 1;
551 | },
552 | insertAfter(node: WxmlNode) {
553 | if (index < parent.length - 1) {
554 | parent.splice(index + 1, 0, node);
555 | } else {
556 | parent.push(node);
557 | }
558 | },
559 | };
560 | }
561 |
562 | /**
563 | * 遍历
564 | */
565 | export function walk(
566 | ast: WxmlTree,
567 | handler: {
568 | begin?: (path: WxmlPath) => void;
569 | end?: (path: WxmlPath) => void;
570 | }
571 | ): void {
572 | Array.from(ast).forEach((node, index) => {
573 | const path = getPath(ast, index, node);
574 | handler.begin && handler.begin(path);
575 | if (node.type === "element") walk(node.children, handler);
576 | handler.end && handler.end(path);
577 | });
578 | }
579 |
580 | /**
581 | * 代码生成
582 | */
583 | export function codegen(
584 | ast: WxmlTree,
585 | options: { sourceMap?: boolean; prevMap?: any; minimize?: boolean } = {}
586 | ): { code: string; map: SourceMapGenerator | undefined } {
587 | const { sourceMap, prevMap, minimize } = {
588 | sourceMap: false,
589 | minimize: false,
590 | ...options,
591 | };
592 |
593 | const rootNode = new SourceNode();
594 |
595 | const _codegen = (elem: WxmlNode, sourceNode: SourceNode) => {
596 | switch (elem.type) {
597 | case "text": {
598 | if (!minimize || elem.raw) {
599 | // 空字符串不生成 sourceMap
600 | sourceNode.add(
601 | posToSourceNode(
602 | elem.token?.pos && elem.text.trim() ? elem.token?.pos : undefined,
603 | elem.text
604 | )
605 | );
606 | } else {
607 | const trimText = elem.text.trim();
608 | if (trimText)
609 | sourceNode.add(posToSourceNode(elem.token?.pos, trimText));
610 | }
611 | break;
612 | }
613 | case "comment": {
614 | if (!minimize)
615 | sourceNode.add(
616 | // comment 不生成 sourceMap
617 | posToSourceNode(undefined, [""])
618 | );
619 | break;
620 | }
621 | case "element": {
622 | // startTag
623 | sourceNode.add(
624 | posToSourceNode(elem.startTagToken?.pos, [
625 | "<",
626 | posToSourceNode(elem.startTagToken?.tag.pos, elem.tag),
627 | ...elem.attrs.map((attr) => [
628 | " ",
629 | posToSourceNode(attr.token?.pos, [
630 | posToSourceNode(attr.token?.name.pos, attr.name),
631 | ...(attr.value === undefined
632 | ? []
633 | : [
634 | '="',
635 | posToSourceNode(attr.token?.value?.pos, attr.value),
636 | '"',
637 | ]),
638 | ]),
639 | ]),
640 | elem.endTagToken ? ">" : "/>",
641 | ])
642 | );
643 | // content
644 | if (elem.children.length) {
645 | elem.children.forEach((child) => _codegen(child, sourceNode));
646 | }
647 | // endTag
648 | if (elem.endTagToken)
649 | sourceNode.add(
650 | posToSourceNode(elem.endTagToken?.pos, [
651 | "",
652 | posToSourceNode(elem.endTagToken?.tag.pos, elem.tag),
653 | ">",
654 | ])
655 | );
656 | break;
657 | }
658 | }
659 | };
660 |
661 | ast.forEach((elem) => _codegen(elem, rootNode));
662 |
663 | let code: string, map: SourceMapGenerator | undefined;
664 |
665 | if (sourceMap) {
666 | const result = rootNode.toStringWithSourceMap();
667 | code = result.code;
668 | map = result.map;
669 | } else {
670 | code = rootNode.toString();
671 | map = undefined;
672 | }
673 |
674 | if (map && prevMap) {
675 | const prevConsumer = new SourceMapConsumer(prevMap);
676 | map.applySourceMap(prevConsumer);
677 | }
678 |
679 | return {
680 | code,
681 | map,
682 | };
683 | }
684 |
--------------------------------------------------------------------------------