├── .gitignore
├── .npmrc
├── README.md
├── binarization-bak.js
├── binarization.html
├── binarization.js
├── bit.js
├── cnn.html
├── cnn.md
├── data
├── data.js
├── index.js
├── model.js
├── package.json
├── read.js
├── write.js
└── yarn.lock
├── demo.gif
├── docs
├── assets
│ ├── cnn.d418443f.js
│ ├── index.070df02d.js
│ ├── index.24c9cc44.js
│ ├── index.96676500.css
│ ├── map.efa40bbb.png
│ ├── model.368228f4.json
│ ├── test.137e7c6f.buffer
│ └── train.9ab62db8.buffer
├── cnn.html
├── favicon.ico
├── index.html
├── model.json
└── model.weights.bin
├── index.html
├── lsb.js
├── map.jpeg
├── ocr.md
├── ocr
├── ocr-0.jpg
├── ocr-1.jpeg
├── ocr-10.png
├── ocr-11.png
├── ocr-12.png
├── ocr-13.jpeg
├── ocr-14.jpeg
├── ocr-15.jpeg
├── ocr-16.jpeg
├── ocr-17.png
├── ocr-18.png
├── ocr-19.png
├── ocr-2.jpeg
├── ocr-3.jpeg
├── ocr-4.jpeg
├── ocr-5.png
├── ocr-6.png
├── ocr-7.jpeg
├── ocr-8.png
├── ocr-9.png
└── ocr-map.jpeg
├── package.json
├── pnpm-lock.yaml
├── public
├── favicon.ico
├── model.json
└── model.weights.bin
├── src
├── App.vue
├── assets
│ ├── map.jpeg
│ ├── map.png
│ └── sheikah-icon
│ │ ├── 0.svg
│ │ ├── 1.svg
│ │ ├── 2.svg
│ │ ├── 3.svg
│ │ ├── 4.svg
│ │ ├── 5.svg
│ │ ├── 6.svg
│ │ ├── 7.svg
│ │ ├── 8.svg
│ │ ├── 9.svg
│ │ ├── a.svg
│ │ ├── b.svg
│ │ ├── c.svg
│ │ ├── d.svg
│ │ ├── e.svg
│ │ ├── exclam.svg
│ │ ├── f.svg
│ │ ├── g.svg
│ │ ├── h.svg
│ │ ├── hyphen.svg
│ │ ├── i.svg
│ │ ├── j.svg
│ │ ├── k.svg
│ │ ├── l.svg
│ │ ├── m.svg
│ │ ├── n.svg
│ │ ├── o.svg
│ │ ├── p.svg
│ │ ├── period.svg
│ │ ├── q.svg
│ │ ├── question.svg
│ │ ├── r.svg
│ │ ├── s.svg
│ │ ├── t.svg
│ │ ├── u.svg
│ │ ├── v.svg
│ │ ├── w.svg
│ │ ├── x.svg
│ │ ├── y.svg
│ │ └── z.svg
├── cnn
│ ├── data.js
│ └── index.js
├── components
│ ├── Download.vue
│ ├── ParsePanel.vue
│ ├── WordIcon
│ │ ├── Main.vue
│ │ ├── icon-map.ts
│ │ └── regist-script.ts
│ └── WordsPanel.vue
├── data
│ ├── model.json
│ ├── model.weights.bin
│ ├── test.buffer
│ ├── test.json
│ ├── train.buffer
│ ├── train.json
│ └── words.json
├── main.ts
├── shims-vue.d.ts
└── utils
│ ├── export-image.ts
│ ├── image-info.ts
│ └── image-ocr.ts
├── tsconfig.json
└── vite.config.ts
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | .DS_Store
3 | dist-ssr
4 | *.local
5 | **/dataset/**
6 |
--------------------------------------------------------------------------------
/.npmrc:
--------------------------------------------------------------------------------
1 | registry = https://registry.npmmirror.com
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 希卡文生成翻译器器
2 |
3 | 工具地址:https://kinglisky.github.io/zelda-words/index.html
4 |
5 | 
6 |
7 | 开发:
8 |
9 | ```
10 | yarn && yarn dev
11 | ```
12 |
13 | 具体实现请看:
14 |
15 | - [从希卡文翻译谈谈 OCR 的简单实现](https://juejin.cn/post/6941003131891220517)
16 | - [CNN 手写数字分类与希卡文翻译](https://juejin.cn/post/7015918983723352077)
17 |
--------------------------------------------------------------------------------
/binarization-bak.js:
--------------------------------------------------------------------------------
1 | (async function () {
2 | // canvas drawImage 有跨域限制,先加载图片转 blob url 使用
3 | const loadImage = (url) => {
4 | return fetch(url)
5 | .then(res => res.blob())
6 | .then(blob => URL.createObjectURL(blob))
7 | .then(blobUrl => {
8 |
9 | return new Promise((resolve, reject) => {
10 | const img = new Image();
11 | img.onload = () => resolve(img);
12 | img.onerror = (e) => reject(e);
13 | img.src = blobUrl;
14 | });
15 | });
16 | };
17 |
18 | const drawToCanvas = (image) => {
19 | const { naturalWidth: width, naturalHeight: height } = image;
20 | const canvas = document.createElement('canvas');
21 | canvas.width = width;
22 | canvas.height = height;
23 | const ctx = canvas.getContext('2d');
24 | ctx.drawImage(image, 0, 0);
25 | return canvas;
26 | }
27 |
28 | const canvasToGray = (canvas) => {
29 | const ctx = canvas.getContext('2d');
30 | const data = ctx.getImageData(0, 0, canvas.width, canvas.height);
31 | const calculateGray = (r, g, b) => parseInt(r * 0.299 + g * 0.587 + b * 0.114);
32 | const grayData = [];
33 | for (let x = 0; x < data.width; x++) {
34 | for (let y = 0; y < data.height; y++) {
35 | const idx = (x + y * data.width) * 4;
36 | const r = data.data[idx + 0];
37 | const g = data.data[idx + 1];
38 | const b = data.data[idx + 2];
39 | const gray = calculateGray(r, g, b);
40 | grayData.push(gray);
41 | }
42 | }
43 | return grayData;
44 | };
45 |
46 | // 像素平均值图片阈值
47 | const average = (grayData) => {
48 | let sum = 0;
49 | for (let i = 0; i < grayData.length; i += 1) {
50 | sum += grayData[i];
51 | }
52 | return sum / grayData.length;
53 | };
54 |
55 | const otsu = (grayData) => {
56 | let ptr = 0;
57 | // 记录 0-256 每个灰度值的数量,初始值为 0
58 | let histData = Array(256).fill(0);
59 | let total = grayData.length;
60 |
61 | while (ptr < total) {
62 | let h = grayData[ptr++];
63 | histData[h]++;
64 | }
65 | // 总数(灰度值x数量)
66 | let sum = 0;
67 | for (let i = 0; i < 256; i++) {
68 | sum += i * histData[i];
69 | }
70 | // 背景(小于阈值)的数量
71 | let wB = 0;
72 | // 前景(大于阈值)的数量
73 | let wF = 0;
74 | // 背景图像(灰度x数量)总和
75 | let sumB = 0;
76 | // 存储最大类间方差值
77 | let varMax = 0;
78 | // 阈值
79 | let threshold = 0;
80 |
81 | for (let t = 0; t < 256; t++) {
82 | // 背景(小于阈值)的数量累加
83 | wB += histData[t];
84 | if (wB === 0) continue;
85 | // 前景(大于阈值)的数量累加
86 | wF = total - wB;
87 | if (wF === 0) break;
88 | // 背景(灰度x数量)累加
89 | sumB += t * histData[t];
90 |
91 | // 背景(小于阈值)的平均灰度
92 | let mB = sumB / wB;
93 | // 前景(大于阈值)的平均灰度
94 | let mF = (sum - sumB) / wF;
95 | // 类间方差
96 | let varBetween = wB * wF * (mB - mF) ** 2;
97 |
98 | if (varBetween > varMax) {
99 | varMax = varBetween;
100 | threshold = t;
101 | }
102 | }
103 |
104 | return threshold;
105 | };
106 |
107 | const binaryzationOutput = (originCanvas, threshold) => {
108 | const ctx = originCanvas.getContext('2d');
109 | const imageData = ctx.getImageData(0, 0, originCanvas.width, originCanvas.height);
110 | const { width, height, data } = imageData;
111 | // 第一像素的值即为背景色值
112 | const head = (data[0] + data[1] + data[2]) / 3 | 0;
113 | // 如果背景颜色大于阈值,则背景与文字的颜色的值则需要调换
114 | const color = head > threshold
115 | ? { foreground: 0, background: 255}
116 | : { foreground: 255, background: 0 };
117 | for (let x = 0; x < width; x++) {
118 | for (let y = 0; y < height; y++) {
119 | const idx = (x + y * width) * 4;
120 | const avg = (data[idx] + data[idx + 1] + data[idx + 2]) / 3 | 0;
121 | const v = avg > threshold ? color.foreground : color.background;
122 | data[idx] = v;
123 | data[idx + 1] = v;
124 | data[idx + 2] = v;
125 | data[idx + 3] = 255;
126 | }
127 | }
128 | ctx.putImageData(imageData, 0, 0);
129 | return originCanvas.toDataURL();
130 | }
131 |
132 | const binaryzationHash = (originCanvas, threshold) => {
133 | const ctx = originCanvas.getContext('2d');
134 | const imageData = ctx.getImageData(0, 0, originCanvas.width, originCanvas.height);
135 | const { width, height, data } = imageData;
136 | // 第一像素的值即为背景色值
137 | const head = (data[0] + data[1] + data[2]) / 3 | 0;
138 | // 如果背景颜色大于阈值,则背景与文字的颜色的值则需要调换
139 | const color = head > threshold
140 | ? { foreground: 0, background: 255}
141 | : { foreground: 255, background: 0 };
142 | const hash = [];
143 | for (let x = 0; x < width; x++) {
144 | for (let y = 0; y < height; y++) {
145 | const idx = (x + y * width) * 4;
146 | const avg = (data[idx] + data[idx + 1] + data[idx + 2]) / 3 | 0;
147 | const v = avg > threshold ? color.foreground : color.background;
148 | hash.push(v ? 1 : 0);
149 | }
150 | }
151 | return hash;
152 | }
153 |
154 | const url = 'https://markdown-write.oss-cn-hangzhou.aliyuncs.com/page.png';
155 | const image = await loadImage(url);
156 | const canvas = drawToCanvas(image);
157 | const grayData = canvasToGray(canvas);
158 | // const threshold = average(grayData);
159 | const threshold = otsu(grayData);
160 | const result = binaryzationOutput(canvas, threshold);
161 | console.log('res', result);
162 | })();
--------------------------------------------------------------------------------
/binarization.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | BINARYZATION OUTPUT
9 |
10 |
33 |
34 | 原图
35 |
36 | 灰度
37 |
38 | 二值化
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/binarization.js:
--------------------------------------------------------------------------------
1 | (async function () {
2 | const drawToCanvas = (canvas, image) => {
3 | const ctx = canvas.getContext('2d');
4 | ctx.drawImage(image, 0, 0);
5 | return canvas;
6 | }
7 |
8 | const canvasToGray = (canvas) => {
9 | const ctx = canvas.getContext('2d');
10 | const data = ctx.getImageData(0, 0, canvas.width, canvas.height);
11 | const calculateGray = (r, g, b) => parseInt(r * 0.299 + g * 0.587 + b * 0.114);
12 | const grayData = [];
13 | for (let x = 0; x < data.width; x++) {
14 | for (let y = 0; y < data.height; y++) {
15 | const idx = (x + y * data.width) * 4;
16 | const r = data.data[idx + 0];
17 | const g = data.data[idx + 1];
18 | const b = data.data[idx + 2];
19 | const gray = calculateGray(r, g, b);
20 | data.data[idx + 0] = gray;
21 | data.data[idx + 1] = gray;
22 | data.data[idx + 2] = gray;
23 | data.data[idx + 3] = 255;
24 | grayData.push(gray);
25 | }
26 | }
27 | ctx.putImageData(data, 0, 0);
28 | return grayData;
29 | };
30 |
31 | // 像素平均值图片阈值
32 | const average = (grayData) => {
33 | let sum = 0;
34 | for (let i = 0; i < grayData.length; i += 1) {
35 | sum += grayData[i];
36 | }
37 | return sum / grayData.length;
38 | };
39 |
40 | const otsu = (grayData) => {
41 | let ptr = 0;
42 | // 记录 0-256 每个灰度值的数量,初始值为 0
43 | let histData = Array(256).fill(0);
44 | let total = grayData.length;
45 |
46 | while (ptr < total) {
47 | let h = grayData[ptr++];
48 | histData[h]++;
49 | }
50 | // 总数(灰度值x数量)
51 | let sum = 0;
52 | for (let i = 0; i < 256; i++) {
53 | sum += i * histData[i];
54 | }
55 | // 背景(小于阈值)的数量
56 | let wB = 0;
57 | // 前景(大于阈值)的数量
58 | let wF = 0;
59 | // 背景图像(灰度x数量)总和
60 | let sumB = 0;
61 | // 存储最大类间方差值
62 | let varMax = 0;
63 | // 阈值
64 | let threshold = 0;
65 |
66 | for (let t = 0; t < 256; t++) {
67 | // 背景(小于阈值)的数量累加
68 | wB += histData[t];
69 | if (wB === 0) continue;
70 | // 前景(大于阈值)的数量累加
71 | wF = total - wB;
72 | if (wF === 0) break;
73 | // 背景(灰度x数量)累加
74 | sumB += t * histData[t];
75 |
76 | // 背景(小于阈值)的平均灰度
77 | let mB = sumB / wB;
78 | // 前景(大于阈值)的平均灰度
79 | let mF = (sum - sumB) / wF;
80 | // 类间方差
81 | let varBetween = wB * wF * (mB - mF) ** 2;
82 |
83 | if (varBetween > varMax) {
84 | varMax = varBetween;
85 | threshold = t;
86 | }
87 | }
88 |
89 | return threshold;
90 | };
91 |
92 | const canvasToBinaryzation = (canvas, threshold) => {
93 | const ctx = canvas.getContext('2d');
94 | const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
95 | const { width, height, data } = imageData;
96 | // 第一像素的值即为背景色值
97 | const head = data[0];
98 | // 如果背景颜色大于阈值,则背景与文字的颜色的值则需要调换
99 | const color = head > threshold
100 | ? { foreground: 0, background: 255}
101 | : { foreground: 255, background: 0 };
102 | const bits = [];
103 | for (let x = 0; x < width; x++) {
104 | for (let y = 0; y < height; y++) {
105 | const idx = (x + y * width) * 4;
106 | const avg = (data[idx] + data[idx + 1] + data[idx + 2]) / 3 | 0;
107 | const v = avg > threshold ? color.foreground : color.background;
108 | data[idx] = v;
109 | data[idx + 1] = v;
110 | data[idx + 2] = v;
111 | data[idx + 3] = 255;
112 | bits.push(v > 0 ? 1 : 0);
113 | }
114 | }
115 | ctx.putImageData(imageData, 0, 0);
116 | return bits;
117 | }
118 |
119 | const image = document.querySelector('.input-image');
120 | const grayCanvas = document.querySelector('.output-gray');
121 | const binaryzationCanvas = document.querySelector('.output-binaryzation');
122 | drawToCanvas(grayCanvas, image);
123 | const grayData = canvasToGray(grayCanvas);
124 | const threshold = average(grayData);
125 | // const threshold = otsu(grayData);
126 | drawToCanvas(binaryzationCanvas, grayCanvas);
127 | const bits = canvasToBinaryzation(binaryzationCanvas, threshold);
128 | console.log(bits);
129 | })();
--------------------------------------------------------------------------------
/bit.js:
--------------------------------------------------------------------------------
1 | (function () {
2 | function paddingLfet(bits) {
3 | return ('00000000' + bits).slice(-8);
4 | }
5 |
6 | function loadImage (url) {
7 | return fetch(url)
8 | .then(res => res.blob())
9 | .then(blob => URL.createObjectURL(blob))
10 | .then(blobUrl => {
11 |
12 | return new Promise((resolve, reject) => {
13 | const img = new Image();
14 | img.onload = () => resolve(img);
15 | img.onerror = (e) => reject(e);
16 | img.src = blobUrl;
17 | });
18 | });
19 | };
20 |
21 | function write(data) {
22 | const bits = data.reduce((s, it) => s + paddingLfet(it.toString(2)), '');
23 | const size = 100;
24 | const width = size * bits.length;
25 | const canvas = document.createElement('canvas');
26 | canvas.width = width;
27 | canvas.height = size;
28 | const ctx = canvas.getContext('2d');
29 | ctx.fillStyle = '#0000000';
30 | ctx.fillRect(0, 0, width, size);
31 | for (let i = 0; i < bits.length; i++) {
32 | if (Number(bits[i])) {
33 | ctx.fillStyle = '#020202';
34 | ctx.fillRect(i * size, 0, size, size);
35 | }
36 | }
37 | return canvas.toDataURL();
38 | }
39 |
40 | async function read(url) {
41 | const image = await loadImage(url);
42 | const canvas = document.createElement('canvas');
43 | canvas.width = image.naturalWidth;
44 | canvas.height = image.naturalHeight;
45 | const ctx = canvas.getContext('2d');
46 | ctx.drawImage(image, 0, 0);
47 | const size = 100;
48 | const bits = [];
49 | for (let i = 0; i < 16; i++) {
50 | const imageData = ctx.getImageData(i * size, 0, size, size);
51 | const r = imageData.data[0];
52 | const g = imageData.data[1];
53 | const b = imageData.data[2];
54 | bits.push(r + g + b === 0 ? 0 : 1);
55 | }
56 | return bits;
57 | }
58 |
59 | const url = write([100, 200]);
60 | console.log(url);
61 | read(url).then(bits => console.log(bits));
62 | })();
--------------------------------------------------------------------------------
/cnn.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
10 | ZELDA WORDS(CNN)
11 |
12 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/data/data.js:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 | const path = require('path');
3 | const util = require('util');
4 | const tf = require('@tensorflow/tfjs');
5 | const readFile = util.promisify(fs.readFile);
6 | const WORDS = require('./words.json');
7 | const TRAIN = require('../src/cnn/train.json');
8 | const TEST = require('../src/cnn/test.json');
9 | const loadBuffer = async (data) => {
10 | const {
11 | count,
12 | width,
13 | height,
14 | buffer: bufferName,
15 | indexs,
16 | } = data;
17 | const buffers = await readFile(path.join(__dirname, '../src/cnn', bufferName));
18 | const images = new Float32Array(buffers);
19 | for (let i = 0; i < images.length; i++) {
20 | images[i] = images[i] / 255;
21 | }
22 | const labels = new Int32Array(indexs);
23 | return {
24 | count,
25 | width,
26 | height,
27 | images,
28 | labels,
29 | };
30 | }
31 |
32 | class Dataset {
33 | constructor() {
34 | this.dataset = {};
35 | }
36 |
37 | async loadData() {
38 | const train = await loadBuffer(TRAIN);
39 | const test = await loadBuffer(TEST);
40 | this.dataset = {
41 | train,
42 | test,
43 | };
44 | }
45 |
46 | getData(key) {
47 | const target = this.dataset[key];
48 | const imagesShape = [target.count, target.height, target.height, 1 ];
49 | return {
50 | images: tf.tensor4d(target.images, imagesShape),
51 | labels: tf.oneHot(tf.tensor1d(target.labels, 'int32'), WORDS.length).toFloat(),
52 | };
53 | }
54 |
55 | getTrainData() {
56 | return this.getData('train');
57 | }
58 |
59 | getTestData() {
60 | return this.getData('test');
61 | }
62 | }
63 |
64 | module.exports = new Dataset();
65 |
--------------------------------------------------------------------------------
/data/index.js:
--------------------------------------------------------------------------------
1 | const data = require('./data');
2 | const model = require('./model');
3 | const yargs = require('yargs/yargs');
4 | const { hideBin } = require('yargs/helpers');
5 | const argv = yargs(hideBin(process.argv)).argv;
6 |
7 | async function run(epochs, batchSize, modelSavePath) {
8 | await data.loadData();
9 |
10 | model.summary();
11 |
12 | const {
13 | images: trainImages,
14 | labels: trainLabels,
15 | } = data.getTrainData();
16 | // console.log({ trainImages, trainLabels });
17 | const validationSplit = 0.15;
18 | await model.fit(trainImages, trainLabels, {
19 | verbose: 1,
20 | epochs,
21 | batchSize,
22 | validationSplit,
23 | callbacks: {
24 | onBatchEnd: async (batch, logs) => {
25 | console.log(`onBatchEnd: batch ${batch} ---> loss: ${logs.loss} acc: ${logs.acc}`);
26 | },
27 | onEpochEnd: async (epoch, logs) => {
28 | console.log(`onEpochEnd: epoch ${epoch} ---> val_loss: ${logs.val_loss} val_acc: ${logs.val_acc}`);
29 | }
30 | }
31 | });
32 |
33 | const {
34 | images: testImages,
35 | labels: testLabels,
36 | } = data.getTestData();
37 |
38 | const evalOutput = model.evaluate(testImages, testLabels);
39 |
40 | console.log(
41 | `\nEvaluation result:\n` +
42 | ` Loss = ${evalOutput[0].dataSync()[0].toFixed(3)}; ` +
43 | `Accuracy = ${evalOutput[1].dataSync()[0].toFixed(3)}`);
44 |
45 | if (modelSavePath) {
46 | await model.save(`file://${modelSavePath}`);
47 | console.log(`Saved model to path: ${modelSavePath}`);
48 | }
49 | }
50 |
51 | const epochs = Number(argv.epochs || 1);
52 | const batchSize = Number(argv.batch_size || 10);
53 | const modelSavePath = argv.model_save_path || '';
54 | console.log({
55 | epochs,
56 | batchSize,
57 | modelSavePath,
58 | });
59 | run(epochs, batchSize, modelSavePath);
60 |
--------------------------------------------------------------------------------
/data/model.js:
--------------------------------------------------------------------------------
1 | const tf = require('@tensorflow/tfjs');
2 | const WORDS = require('./words.json');
3 |
4 | const model = tf.sequential();
5 | model.add(tf.layers.conv2d({
6 | inputShape: [28, 28, 4],
7 | filters: 32,
8 | kernelSize: 3,
9 | activation: 'relu',
10 | }));
11 | model.add(tf.layers.conv2d({
12 | filters: 32,
13 | kernelSize: 3,
14 | activation: 'relu',
15 | }));
16 | model.add(tf.layers.maxPooling2d({ poolSize: [2, 2] }));
17 | model.add(tf.layers.conv2d({
18 | filters: 64,
19 | kernelSize: 3,
20 | activation: 'relu',
21 | }));
22 | model.add(tf.layers.conv2d({
23 | filters: 64,
24 | kernelSize: 3,
25 | activation: 'relu',
26 | }));
27 | model.add(tf.layers.maxPooling2d({ poolSize: [2, 2] }));
28 | model.add(tf.layers.flatten());
29 | model.add(tf.layers.dropout({ rate: 0.25 }));
30 | model.add(tf.layers.dense({ units: 512, activation: 'relu' }));
31 | model.add(tf.layers.dropout({ rate: 0.5 }));
32 | model.add(tf.layers.dense({ units: WORDS.length, activation: 'softmax' }));
33 |
34 | const optimizer = 'rmsprop';
35 | model.compile({
36 | optimizer: optimizer,
37 | loss: 'categoricalCrossentropy',
38 | metrics: ['accuracy'],
39 | });
40 |
41 | module.exports = model;
42 |
--------------------------------------------------------------------------------
/data/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "cnn",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "index.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1"
8 | },
9 | "author": "",
10 | "license": "ISC",
11 | "dependencies": {
12 | "@tensorflow/tfjs": "^3.9.0",
13 | "cheerio": "^1.0.0-rc.10",
14 | "color": "^4.0.1",
15 | "sharp": "^0.28.3",
16 | "shelljs": "^0.8.4"
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/data/read.js:
--------------------------------------------------------------------------------
1 | const util = require('util');
2 | const path = require('path');
3 | const fs = require('fs');
4 | const sharp = require('sharp');
5 | const shell = require('shelljs');
6 | const readFile = util.promisify(fs.readFile);
7 | const data = require('../src/cnn/test.json');
8 |
9 | (async function main() {
10 | const {
11 | count,
12 | width,
13 | height,
14 | buffer: bufferName,
15 | indexs,
16 | } = data;
17 | const buffer = await readFile(path.join(__dirname, '../src/cnn', bufferName));
18 | const chunkSize = width * height;
19 | const options = {
20 | raw: {
21 | width,
22 | height,
23 | channels: 1
24 | }
25 | };
26 | let i = 0;
27 | while (i < count) {
28 | const start = i * chunkSize;
29 | const end = start + chunkSize;
30 | const data = buffer.slice(start , end);
31 | const targetIndex = indexs[i];
32 | const fileName = `${targetIndex}.png`;
33 | await sharp(data, options).png().toFile(fileName);
34 | await shell.exec(`open ${fileName}`);
35 | i+=1;
36 | }
37 | })();
38 |
--------------------------------------------------------------------------------
/data/write.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 | const util = require('util');
3 | const path = require('path');
4 | const fs = require('fs');
5 | const sharp = require('sharp');
6 | const cheerio = require('cheerio');
7 | const Color = require('color');
8 | const readFile = util.promisify(fs.readFile);
9 | const writeFile = util.promisify(fs.writeFile);
10 | const tf = require('@tensorflow/tfjs');
11 | const WORDS = require('../src/data/words.json');
12 | const yargs = require('yargs/yargs');
13 | const { hideBin } = require('yargs/helpers');
14 | const argv = yargs(hideBin(process.argv)).argv;
15 |
16 | const WORDS_INDEXS = Array.from({ length: WORDS.length }).map((_, i) => i);
17 | const IMAGE_WIDTH = Number(argv.size || 28);
18 | const IMAGE_HEIGHT = Number(argv.size || 28);
19 | const COUNT = Number(argv.count || 1);
20 | const NAME = argv.name || 'temp';
21 |
22 | console.log({
23 | COUNT,
24 | NAME,
25 | IMAGE_HEIGHT,
26 | IMAGE_WIDTH,
27 | });
28 |
29 | function randomValue(value, base = 0) {
30 | return Math.floor(Math.random() * value + base);
31 | }
32 |
33 | function fillSvg(svg, color) {
34 | const $ = cheerio.load(svg, { xmlMode: true });
35 | const fill = Color(color).hex();
36 | $('svg').attr('fill', fill);
37 | return $.xml();
38 | }
39 |
40 | async function loadSvg(word) {
41 | const svgPath = path.join(__dirname, '../src/assets', word.path);
42 | const data = await readFile(svgPath, 'utf8');
43 | const svgContent = fillSvg(data, {
44 | r: 255,
45 | g: 255,
46 | b: 255,
47 | });
48 | return Buffer.from(svgContent);
49 | }
50 |
51 | async function createWordImage(word) {
52 | const size = randomValue(200, 24);
53 | const svg = await loadSvg(word);
54 | // 生成大小不同的图片
55 | const resizeImageBuffer = await sharp(svg)
56 | .resize(size, size)
57 | .trim()
58 | .png().toBuffer();
59 | // 统一缩放成 28 x 28
60 | const wordImageBuffer = await sharp(resizeImageBuffer)
61 | .resize(IMAGE_WIDTH, IMAGE_HEIGHT)
62 | .png().toBuffer();
63 | // 字符背景色
64 | const baseImageBuffer = await sharp({
65 | create: {
66 | width: IMAGE_WIDTH,
67 | height: IMAGE_HEIGHT,
68 | channels: 4,
69 | background: {
70 | r: 0,
71 | g: 0,
72 | b: 0,
73 | alpha: 0,
74 | },
75 | }
76 | }).png().toBuffer();
77 | // 将文字图片绘制到背景上
78 | const image = await sharp(baseImageBuffer).composite([{
79 | input: wordImageBuffer,
80 | top: 0,
81 | left: 0,
82 | }]).sharpen().raw().toBuffer();
83 | return image;
84 | }
85 |
86 | (async function main() {
87 | let data = null;
88 | const indexs = [];
89 | for (let i = 0; i < COUNT; i++) {
90 | console.log('batch create images --------------------------------------> ', i);
91 | // 打乱字符顺序
92 | tf.util.shuffle(WORDS_INDEXS);
93 | const createWords = WORDS_INDEXS.map(async (index) => {
94 | const word = WORDS[index];
95 | const buffer = await createWordImage(word);
96 | return {
97 | index,
98 | buffer,
99 | };
100 | });
101 | const res = await Promise.all(createWords);
102 | res.forEach(({ index, buffer }) => {
103 | const pixs = [];
104 | for (let i = 0; i < buffer.length; i += 4) {
105 | const a = buffer[i + 3] / 255;
106 | const r = buffer[i] * a;
107 | const g = buffer[i + 1] * a;
108 | const b = buffer[i + 2] * a;
109 | pixs.push(Math.floor(r * 0.299 + g * 0.587 + b * 0.114));
110 | }
111 | indexs.push(index);
112 | const pixsBuffer = Buffer.from(pixs);
113 | data = data ? Buffer.concat([data, pixsBuffer]) : pixsBuffer;
114 | });
115 | const meta = {
116 | indexs,
117 | count: (i + 1) * WORDS_INDEXS.length,
118 | width: IMAGE_WIDTH,
119 | height: IMAGE_HEIGHT,
120 | buffer: `${NAME}.buffer`,
121 | };
122 | await writeFile(path.join(__dirname, `../src/data/${NAME}.buffer`), data);
123 | await writeFile(path.join(__dirname, `../src/data/${NAME}.json`), JSON.stringify(meta));
124 | console.log(`batch save images --------------------------------------> ${i}, count ${meta.count}`);
125 | }
126 | console.log('done!');
127 | })();
128 |
--------------------------------------------------------------------------------
/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/demo.gif
--------------------------------------------------------------------------------
/docs/assets/index.96676500.css:
--------------------------------------------------------------------------------
1 | .word-icon{overflow:hidden;width:1em;height:1em;padding:0;margin:0;fill:currentColor}.words-panel__groups,.words-panel--vertical{display:flex}.words-panel--vertical .words-panel__groups{flex-direction:column}.parse-panel{position:fixed;top:50%;left:50%;overflow:hidden;width:80%;max-width:600px;height:50%;background:#fff;border-radius:4px;transform:translate(-50%,-50%)}.parse-panel__close{position:absolute;top:0;right:0;display:block;width:36px;height:36px;color:#000;font-size:36px;line-height:36px;text-align:center;cursor:pointer}.parse-panel__result{display:block;width:100%;height:100%;object-fit:contain}.parse-panel__message{display:flex;width:100%;height:100%;align-items:center;justify-content:center}.download{position:fixed;top:50%;left:50%;overflow:hidden;width:80%;max-width:600px;height:60%;background:#fff;border-radius:4px;transform:translate(-50%,-50%)}.download img{display:block;width:100%;height:100%;object-fit:contain}*{box-sizing:border-box;padding:0;margin:0}body{width:100vw;background:#000200}#app{display:flex;justify-content:center;overflow:hidden;width:100vw;height:100vh;font-family:Avenir,Helvetica,Arial,sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.container{display:flex;flex-direction:column;width:100%;max-width:1920px;height:100%}.header{display:flex;width:100%;height:60px;border-bottom:1px solid #fff}.header__group{display:flex;align-items:center;flex:1;height:100%}.header__item{position:relative;display:flex;justify-content:center;align-items:center;flex:1;height:100%;color:#fff;border-top:1px solid #fff;border-right:1px solid #fff}.header__item span{margin-right:8px}.header__color{display:block;width:100%;height:100%;border:none}.header__button{cursor:pointer}.header__upload{position:absolute;top:0;left:0;width:100%;height:100%;outline:none;cursor:pointer;opacity:0}.content{display:flex;align-items:center;flex:1;overflow:hidden;width:100%}.words{flex:1;box-sizing:border-box;overflow:hidden;height:100%}.words textarea{display:block;width:100%;height:100%;padding:32px;color:#fff;font-size:14px;line-height:2;background-color:transparent;border:none;border-right:1px solid #fff;outline:none;resize:none}.results{display:flex;flex:2;overflow-x:auto;overflow-y:auto;height:100%}@media (max-width: 768px){.container{flex-direction:column-reverse}.header{display:block;height:auto}.header__group{display:flex;align-items:center;height:60px}.header__item{display:flex;justify-content:center;align-items:center;height:100%;border-bottom:1px solid #fff}.content{flex-direction:column-reverse;flex:1}.words{width:100%;height:200px}.results{justify-content:center;align-items:center;width:100%}}
2 |
--------------------------------------------------------------------------------
/docs/assets/map.efa40bbb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/docs/assets/map.efa40bbb.png
--------------------------------------------------------------------------------
/docs/assets/model.368228f4.json:
--------------------------------------------------------------------------------
1 | {
2 | "modelTopology": {
3 | "class_name": "Sequential",
4 | "config": {
5 | "name": "sequential_1",
6 | "layers": [
7 | {
8 | "class_name": "Conv2D",
9 | "config": {
10 | "filters": 16,
11 | "kernel_initializer": {
12 | "class_name": "VarianceScaling",
13 | "config": {
14 | "scale": 1,
15 | "mode": "fan_avg",
16 | "distribution": "normal",
17 | "seed": null
18 | }
19 | },
20 | "kernel_regularizer": null,
21 | "kernel_constraint": null,
22 | "kernel_size": [
23 | 3,
24 | 3
25 | ],
26 | "strides": [
27 | 1,
28 | 1
29 | ],
30 | "padding": "valid",
31 | "data_format": "channels_last",
32 | "dilation_rate": [
33 | 1,
34 | 1
35 | ],
36 | "activation": "relu",
37 | "use_bias": true,
38 | "bias_initializer": {
39 | "class_name": "Zeros",
40 | "config": {}
41 | },
42 | "bias_regularizer": null,
43 | "activity_regularizer": null,
44 | "bias_constraint": null,
45 | "name": "conv2d_Conv2D1",
46 | "trainable": true,
47 | "batch_input_shape": [
48 | null,
49 | 28,
50 | 28,
51 | 1
52 | ],
53 | "dtype": "float32"
54 | }
55 | },
56 | {
57 | "class_name": "MaxPooling2D",
58 | "config": {
59 | "pool_size": [
60 | 2,
61 | 2
62 | ],
63 | "padding": "valid",
64 | "strides": [
65 | 2,
66 | 2
67 | ],
68 | "data_format": "channels_last",
69 | "name": "max_pooling2d_MaxPooling2D1",
70 | "trainable": true
71 | }
72 | },
73 | {
74 | "class_name": "Conv2D",
75 | "config": {
76 | "filters": 32,
77 | "kernel_initializer": {
78 | "class_name": "VarianceScaling",
79 | "config": {
80 | "scale": 1,
81 | "mode": "fan_avg",
82 | "distribution": "normal",
83 | "seed": null
84 | }
85 | },
86 | "kernel_regularizer": null,
87 | "kernel_constraint": null,
88 | "kernel_size": [
89 | 3,
90 | 3
91 | ],
92 | "strides": [
93 | 1,
94 | 1
95 | ],
96 | "padding": "valid",
97 | "data_format": "channels_last",
98 | "dilation_rate": [
99 | 1,
100 | 1
101 | ],
102 | "activation": "relu",
103 | "use_bias": true,
104 | "bias_initializer": {
105 | "class_name": "Zeros",
106 | "config": {}
107 | },
108 | "bias_regularizer": null,
109 | "activity_regularizer": null,
110 | "bias_constraint": null,
111 | "name": "conv2d_Conv2D2",
112 | "trainable": true
113 | }
114 | },
115 | {
116 | "class_name": "MaxPooling2D",
117 | "config": {
118 | "pool_size": [
119 | 2,
120 | 2
121 | ],
122 | "padding": "valid",
123 | "strides": [
124 | 2,
125 | 2
126 | ],
127 | "data_format": "channels_last",
128 | "name": "max_pooling2d_MaxPooling2D2",
129 | "trainable": true
130 | }
131 | },
132 | {
133 | "class_name": "Conv2D",
134 | "config": {
135 | "filters": 32,
136 | "kernel_initializer": {
137 | "class_name": "VarianceScaling",
138 | "config": {
139 | "scale": 1,
140 | "mode": "fan_avg",
141 | "distribution": "normal",
142 | "seed": null
143 | }
144 | },
145 | "kernel_regularizer": null,
146 | "kernel_constraint": null,
147 | "kernel_size": [
148 | 3,
149 | 3
150 | ],
151 | "strides": [
152 | 1,
153 | 1
154 | ],
155 | "padding": "valid",
156 | "data_format": "channels_last",
157 | "dilation_rate": [
158 | 1,
159 | 1
160 | ],
161 | "activation": "relu",
162 | "use_bias": true,
163 | "bias_initializer": {
164 | "class_name": "Zeros",
165 | "config": {}
166 | },
167 | "bias_regularizer": null,
168 | "activity_regularizer": null,
169 | "bias_constraint": null,
170 | "name": "conv2d_Conv2D3",
171 | "trainable": true
172 | }
173 | },
174 | {
175 | "class_name": "Flatten",
176 | "config": {
177 | "name": "flatten_Flatten1",
178 | "trainable": true
179 | }
180 | },
181 | {
182 | "class_name": "Dense",
183 | "config": {
184 | "units": 64,
185 | "activation": "relu",
186 | "use_bias": true,
187 | "kernel_initializer": {
188 | "class_name": "VarianceScaling",
189 | "config": {
190 | "scale": 1,
191 | "mode": "fan_avg",
192 | "distribution": "normal",
193 | "seed": null
194 | }
195 | },
196 | "bias_initializer": {
197 | "class_name": "Zeros",
198 | "config": {}
199 | },
200 | "kernel_regularizer": null,
201 | "bias_regularizer": null,
202 | "activity_regularizer": null,
203 | "kernel_constraint": null,
204 | "bias_constraint": null,
205 | "name": "dense_Dense1",
206 | "trainable": true
207 | }
208 | },
209 | {
210 | "class_name": "Dense",
211 | "config": {
212 | "units": 40,
213 | "activation": "softmax",
214 | "use_bias": true,
215 | "kernel_initializer": {
216 | "class_name": "VarianceScaling",
217 | "config": {
218 | "scale": 1,
219 | "mode": "fan_avg",
220 | "distribution": "normal",
221 | "seed": null
222 | }
223 | },
224 | "bias_initializer": {
225 | "class_name": "Zeros",
226 | "config": {}
227 | },
228 | "kernel_regularizer": null,
229 | "bias_regularizer": null,
230 | "activity_regularizer": null,
231 | "kernel_constraint": null,
232 | "bias_constraint": null,
233 | "name": "dense_Dense2",
234 | "trainable": true
235 | }
236 | }
237 | ]
238 | },
239 | "keras_version": "tfjs-layers 3.9.0",
240 | "backend": "tensor_flow.js"
241 | },
242 | "format": "layers-model",
243 | "generatedBy": "TensorFlow.js tfjs-layers v3.9.0",
244 | "convertedBy": null,
245 | "weightsManifest": [
246 | {
247 | "paths": [
248 | "./model.weights.bin"
249 | ],
250 | "weights": [
251 | {
252 | "name": "conv2d_Conv2D1/kernel",
253 | "shape": [
254 | 3,
255 | 3,
256 | 1,
257 | 16
258 | ],
259 | "dtype": "float32"
260 | },
261 | {
262 | "name": "conv2d_Conv2D1/bias",
263 | "shape": [
264 | 16
265 | ],
266 | "dtype": "float32"
267 | },
268 | {
269 | "name": "conv2d_Conv2D2/kernel",
270 | "shape": [
271 | 3,
272 | 3,
273 | 16,
274 | 32
275 | ],
276 | "dtype": "float32"
277 | },
278 | {
279 | "name": "conv2d_Conv2D2/bias",
280 | "shape": [
281 | 32
282 | ],
283 | "dtype": "float32"
284 | },
285 | {
286 | "name": "conv2d_Conv2D3/kernel",
287 | "shape": [
288 | 3,
289 | 3,
290 | 32,
291 | 32
292 | ],
293 | "dtype": "float32"
294 | },
295 | {
296 | "name": "conv2d_Conv2D3/bias",
297 | "shape": [
298 | 32
299 | ],
300 | "dtype": "float32"
301 | },
302 | {
303 | "name": "dense_Dense1/kernel",
304 | "shape": [
305 | 288,
306 | 64
307 | ],
308 | "dtype": "float32"
309 | },
310 | {
311 | "name": "dense_Dense1/bias",
312 | "shape": [
313 | 64
314 | ],
315 | "dtype": "float32"
316 | },
317 | {
318 | "name": "dense_Dense2/kernel",
319 | "shape": [
320 | 64,
321 | 40
322 | ],
323 | "dtype": "float32"
324 | },
325 | {
326 | "name": "dense_Dense2/bias",
327 | "shape": [
328 | 40
329 | ],
330 | "dtype": "float32"
331 | }
332 | ]
333 | }
334 | ]
335 | }
--------------------------------------------------------------------------------
/docs/assets/test.137e7c6f.buffer:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/docs/assets/test.137e7c6f.buffer
--------------------------------------------------------------------------------
/docs/assets/train.9ab62db8.buffer:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/docs/assets/train.9ab62db8.buffer
--------------------------------------------------------------------------------
/docs/cnn.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
10 | ZELDA WORDS(CNN)
11 |
12 |
13 |
14 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/docs/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/docs/favicon.ico
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | ZELDA WORDS
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/model.json:
--------------------------------------------------------------------------------
1 | {
2 | "modelTopology": {
3 | "class_name": "Sequential",
4 | "config": {
5 | "name": "sequential_1",
6 | "layers": [
7 | {
8 | "class_name": "Conv2D",
9 | "config": {
10 | "filters": 16,
11 | "kernel_initializer": {
12 | "class_name": "VarianceScaling",
13 | "config": {
14 | "scale": 1,
15 | "mode": "fan_avg",
16 | "distribution": "normal",
17 | "seed": null
18 | }
19 | },
20 | "kernel_regularizer": null,
21 | "kernel_constraint": null,
22 | "kernel_size": [
23 | 3,
24 | 3
25 | ],
26 | "strides": [
27 | 1,
28 | 1
29 | ],
30 | "padding": "valid",
31 | "data_format": "channels_last",
32 | "dilation_rate": [
33 | 1,
34 | 1
35 | ],
36 | "activation": "relu",
37 | "use_bias": true,
38 | "bias_initializer": {
39 | "class_name": "Zeros",
40 | "config": {}
41 | },
42 | "bias_regularizer": null,
43 | "activity_regularizer": null,
44 | "bias_constraint": null,
45 | "name": "conv2d_Conv2D1",
46 | "trainable": true,
47 | "batch_input_shape": [
48 | null,
49 | 28,
50 | 28,
51 | 1
52 | ],
53 | "dtype": "float32"
54 | }
55 | },
56 | {
57 | "class_name": "MaxPooling2D",
58 | "config": {
59 | "pool_size": [
60 | 2,
61 | 2
62 | ],
63 | "padding": "valid",
64 | "strides": [
65 | 2,
66 | 2
67 | ],
68 | "data_format": "channels_last",
69 | "name": "max_pooling2d_MaxPooling2D1",
70 | "trainable": true
71 | }
72 | },
73 | {
74 | "class_name": "Conv2D",
75 | "config": {
76 | "filters": 32,
77 | "kernel_initializer": {
78 | "class_name": "VarianceScaling",
79 | "config": {
80 | "scale": 1,
81 | "mode": "fan_avg",
82 | "distribution": "normal",
83 | "seed": null
84 | }
85 | },
86 | "kernel_regularizer": null,
87 | "kernel_constraint": null,
88 | "kernel_size": [
89 | 3,
90 | 3
91 | ],
92 | "strides": [
93 | 1,
94 | 1
95 | ],
96 | "padding": "valid",
97 | "data_format": "channels_last",
98 | "dilation_rate": [
99 | 1,
100 | 1
101 | ],
102 | "activation": "relu",
103 | "use_bias": true,
104 | "bias_initializer": {
105 | "class_name": "Zeros",
106 | "config": {}
107 | },
108 | "bias_regularizer": null,
109 | "activity_regularizer": null,
110 | "bias_constraint": null,
111 | "name": "conv2d_Conv2D2",
112 | "trainable": true
113 | }
114 | },
115 | {
116 | "class_name": "MaxPooling2D",
117 | "config": {
118 | "pool_size": [
119 | 2,
120 | 2
121 | ],
122 | "padding": "valid",
123 | "strides": [
124 | 2,
125 | 2
126 | ],
127 | "data_format": "channels_last",
128 | "name": "max_pooling2d_MaxPooling2D2",
129 | "trainable": true
130 | }
131 | },
132 | {
133 | "class_name": "Conv2D",
134 | "config": {
135 | "filters": 32,
136 | "kernel_initializer": {
137 | "class_name": "VarianceScaling",
138 | "config": {
139 | "scale": 1,
140 | "mode": "fan_avg",
141 | "distribution": "normal",
142 | "seed": null
143 | }
144 | },
145 | "kernel_regularizer": null,
146 | "kernel_constraint": null,
147 | "kernel_size": [
148 | 3,
149 | 3
150 | ],
151 | "strides": [
152 | 1,
153 | 1
154 | ],
155 | "padding": "valid",
156 | "data_format": "channels_last",
157 | "dilation_rate": [
158 | 1,
159 | 1
160 | ],
161 | "activation": "relu",
162 | "use_bias": true,
163 | "bias_initializer": {
164 | "class_name": "Zeros",
165 | "config": {}
166 | },
167 | "bias_regularizer": null,
168 | "activity_regularizer": null,
169 | "bias_constraint": null,
170 | "name": "conv2d_Conv2D3",
171 | "trainable": true
172 | }
173 | },
174 | {
175 | "class_name": "Flatten",
176 | "config": {
177 | "name": "flatten_Flatten1",
178 | "trainable": true
179 | }
180 | },
181 | {
182 | "class_name": "Dense",
183 | "config": {
184 | "units": 64,
185 | "activation": "relu",
186 | "use_bias": true,
187 | "kernel_initializer": {
188 | "class_name": "VarianceScaling",
189 | "config": {
190 | "scale": 1,
191 | "mode": "fan_avg",
192 | "distribution": "normal",
193 | "seed": null
194 | }
195 | },
196 | "bias_initializer": {
197 | "class_name": "Zeros",
198 | "config": {}
199 | },
200 | "kernel_regularizer": null,
201 | "bias_regularizer": null,
202 | "activity_regularizer": null,
203 | "kernel_constraint": null,
204 | "bias_constraint": null,
205 | "name": "dense_Dense1",
206 | "trainable": true
207 | }
208 | },
209 | {
210 | "class_name": "Dense",
211 | "config": {
212 | "units": 40,
213 | "activation": "softmax",
214 | "use_bias": true,
215 | "kernel_initializer": {
216 | "class_name": "VarianceScaling",
217 | "config": {
218 | "scale": 1,
219 | "mode": "fan_avg",
220 | "distribution": "normal",
221 | "seed": null
222 | }
223 | },
224 | "bias_initializer": {
225 | "class_name": "Zeros",
226 | "config": {}
227 | },
228 | "kernel_regularizer": null,
229 | "bias_regularizer": null,
230 | "activity_regularizer": null,
231 | "kernel_constraint": null,
232 | "bias_constraint": null,
233 | "name": "dense_Dense2",
234 | "trainable": true
235 | }
236 | }
237 | ]
238 | },
239 | "keras_version": "tfjs-layers 3.9.0",
240 | "backend": "tensor_flow.js"
241 | },
242 | "format": "layers-model",
243 | "generatedBy": "TensorFlow.js tfjs-layers v3.9.0",
244 | "convertedBy": null,
245 | "weightsManifest": [
246 | {
247 | "paths": [
248 | "./model.weights.bin"
249 | ],
250 | "weights": [
251 | {
252 | "name": "conv2d_Conv2D1/kernel",
253 | "shape": [
254 | 3,
255 | 3,
256 | 1,
257 | 16
258 | ],
259 | "dtype": "float32"
260 | },
261 | {
262 | "name": "conv2d_Conv2D1/bias",
263 | "shape": [
264 | 16
265 | ],
266 | "dtype": "float32"
267 | },
268 | {
269 | "name": "conv2d_Conv2D2/kernel",
270 | "shape": [
271 | 3,
272 | 3,
273 | 16,
274 | 32
275 | ],
276 | "dtype": "float32"
277 | },
278 | {
279 | "name": "conv2d_Conv2D2/bias",
280 | "shape": [
281 | 32
282 | ],
283 | "dtype": "float32"
284 | },
285 | {
286 | "name": "conv2d_Conv2D3/kernel",
287 | "shape": [
288 | 3,
289 | 3,
290 | 32,
291 | 32
292 | ],
293 | "dtype": "float32"
294 | },
295 | {
296 | "name": "conv2d_Conv2D3/bias",
297 | "shape": [
298 | 32
299 | ],
300 | "dtype": "float32"
301 | },
302 | {
303 | "name": "dense_Dense1/kernel",
304 | "shape": [
305 | 288,
306 | 64
307 | ],
308 | "dtype": "float32"
309 | },
310 | {
311 | "name": "dense_Dense1/bias",
312 | "shape": [
313 | 64
314 | ],
315 | "dtype": "float32"
316 | },
317 | {
318 | "name": "dense_Dense2/kernel",
319 | "shape": [
320 | 64,
321 | 40
322 | ],
323 | "dtype": "float32"
324 | },
325 | {
326 | "name": "dense_Dense2/bias",
327 | "shape": [
328 | 40
329 | ],
330 | "dtype": "float32"
331 | }
332 | ]
333 | }
334 | ]
335 | }
--------------------------------------------------------------------------------
/docs/model.weights.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/docs/model.weights.bin
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | ZELDA WORDS
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/lsb.js:
--------------------------------------------------------------------------------
1 | (async function () {
2 | function loadImage (url) {
3 | return fetch(url)
4 | .then(res => res.blob())
5 | .then(blob => URL.createObjectURL(blob))
6 | .then(blobUrl => {
7 |
8 | return new Promise((resolve, reject) => {
9 | const img = new Image();
10 | img.onload = () => resolve(img);
11 | img.onerror = (e) => reject(e);
12 | img.src = blobUrl;
13 | });
14 | });
15 | };
16 |
17 | function createCanvas(width, height) {
18 | const canvas = document.createElement('canvas');
19 | canvas.width = width;
20 | canvas.height = height;
21 | return canvas;
22 | }
23 |
24 | function getImageData(image) {
25 | const { naturalWidth, naturalHeight } = image;
26 | const canvas = createCanvas(naturalWidth, naturalHeight);
27 | const ctx = canvas.getContext('2d');
28 | ctx.drawImage(image, 0, 0);
29 | return ctx.getImageData(0, 0, naturalWidth, naturalHeight);
30 | }
31 |
32 | function putImageData(imageData) {
33 | const { width, height } = imageData;
34 | const canvas = createCanvas(width, height);
35 | const ctx = canvas.getContext('2d');
36 | ctx.putImageData(imageData, 0, 0);
37 | return canvas;
38 | }
39 |
40 | function writeMetaInfo(baseImageData, qrcodeImageData) {
41 | const { width, height, data } = qrcodeImageData;
42 | for (let x = 0; x < width; x++) {
43 | for (let y = 0; y < height; y++) {
44 | // 选用 r 通道来隐藏信息
45 | const r = (x + y * width) * 4;
46 | const v = data[r];
47 | // 二维码白色部分(背景)标识为 1,黑色部分(内容)标识为 0
48 | const bit = v === 255 ? 1 : 0;
49 | // 如果当前 R 通道色值奇偶性和二维码对应像素不一致则进行加减一使其奇偶性一致
50 | if (baseImageData.data[r] % 2 !== bit) {
51 | baseImageData.data[r] += bit ? 1 : -1;
52 | }
53 | }
54 | }
55 | return baseImageData;
56 | }
57 |
58 | function readMetaInfo(imageData) {
59 | const { width, height, data } = imageData;
60 | const qrcodeImageData = new ImageData(width, height);
61 | for (let x = 0; x < width; x++) {
62 | for (let y = 0; y < height; y++) {
63 | // 读取 r 通道息
64 | const r = (x + y * width) * 4;
65 | const v = data[r] % 2 === 0 ? 0 : 255;
66 | qrcodeImageData.data[r] = v;
67 | qrcodeImageData.data[r + 1] = v;
68 | qrcodeImageData.data[r + 2] = v;
69 | qrcodeImageData.data[r + 3] = 255;
70 | }
71 | }
72 | return qrcodeImageData;
73 | }
74 |
75 | const baseImage = await loadImage('https://gd-filems.dancf.com/mcm79j/mcm79j/05654/cd68f955-0f4d-4e42-af93-fe8ae82599e3555415.png');
76 | const qrcodeImage = await loadImage('https://gd-filems.dancf.com/mcm79j/mcm79j/05654/f3ffa72f-2377-4c8c-b30f-6d261f5b6905555476.jpg');
77 | const resultImageData = writeMetaInfo(getImageData(baseImage), getImageData(qrcodeImage));
78 | const resultCanvas = putImageData(resultImageData);
79 |
80 | // const resultDataUrl = resultCanvas.toDataURL('image/png');
81 | const resultDataUrl = resultCanvas.toDataURL('image/jpeg', 1);
82 | console.log(resultDataUrl);
83 | const hideMetaImage = await loadImage(resultDataUrl);
84 | const readData = readMetaInfo(getImageData(hideMetaImage));
85 | const readCanvas = putImageData(readData);
86 | console.log(readCanvas.toDataURL());
87 | })();
--------------------------------------------------------------------------------
/map.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/map.jpeg
--------------------------------------------------------------------------------
/ocr.md:
--------------------------------------------------------------------------------
1 | ## 卷积神经网络实现
2 |
3 | 新增了卷积神经网络实现~
4 | ## 希卡文
5 |
6 | 
7 |
8 | 塞尔达玩家一定不会陌生,希卡文是游戏《塞尔达传说旷野之息》中一种虚构的文字,在希卡族的建筑上都能找到它的影子,之前实现了一个简单的[希卡文生成与翻译的工具](http://nlush.com/zelda-words),不过关键的文字解析实现的并不优雅,使用隐藏水印的方式将一些关键信息隐藏在导出的图片中,图片压缩后隐藏信息很容易丢失,导致解析失败。有兴趣的同学不妨看看上一篇文章:[摸一个塞尔达希卡文字转换器](https://juejin.cn/post/6935836863844319239)。
9 |
10 |
11 | 后面研究了下 [OCR](https://zh.wikipedia.org/wiki/%E5%85%89%E5%AD%A6%E5%AD%97%E7%AC%A6%E8%AF%86%E5%88%AB) 的技术实现,手撸了个简单版的希卡文字 OCR 解析器,简单扯扯实现,水平有限望指点蛤~
12 |
13 | > 光学字符识别(英語:Optical Character Recognition,OCR)是指对文本资料的图像文件进行分析识别处理,获取文字及版面信息的过程。
14 |
15 | 工具地址在这:
16 | - 工具的演示地址在这:[https://kinglisky.github.io/zelda-words](https://kinglisky.github.io/zelda-words/index.html)
17 | - 仓库地址:[https://github.com/kinglisky/zelda-words](https://github.com/kinglisky/zelda-words)
18 |
19 | 虚构世界的文字往往是基于现实文字创造的,希卡文与英文字母数字与四个特殊符号(共 40 个字符)相对应,规则很简单,都在下图:
20 |
21 | 
22 |
23 | 我们导出的希卡文图片长这样:
24 |
25 | 
26 | 
27 |
28 | 开始吧~
29 |
30 | ## 图片二值化
31 |
32 | 我们导出希卡文的图片颜色和文字排列都是不确定的,我们首先需要将图片做一个归一化处理,因为我们只关心图片的文字内容,所以首先要剔除颜色的干扰,我们可以将图片统一处理黑白色调的图片。
33 |
34 | 这个过程称作[二值化
35 | ](https://zh.wikipedia.org/zh-hans/%E4%BA%8C%E5%80%BC%E5%8C%96),二值化后的图片更能摒除干扰**突出图片的内容特征**,二值化后的图片可以很方便的被序列化生成图片指纹。
36 |
37 | > 二值化(英语:Binarization)是图像分割的一种最简单的方法。二值化可以把灰度图像转换成二值图像。把大于某个临界灰度值的像素灰度设为灰度極大值,把小于这个值的像素灰度设为灰度極小值,从而实现二值化。
38 |
39 | 
40 | 
41 |
42 | 图片二值化主要流程如下:
43 | - 图片灰度处理
44 | - 计算灰度图片的二值化阈值
45 | - 图片二值化
46 |
47 | ### 图片灰度处理
48 |
49 | 
50 |
51 | 我们以上面的图片为例,图片的灰度处理比较简单,将 rgb 通道的颜色按 `r * 0.299 + g * 0.587 + b * 0.114` 的比值相加就能得到灰度值,因为灰度图片的 rgb 通道的值都是相同的,我们只取一个通道的值用于下一步计算。
52 |
53 | ```JavaScript
54 | const canvasToGray = (canvas) => {
55 | const ctx = canvas.getContext('2d');
56 | const data = ctx.getImageData(0, 0, canvas.width, canvas.height);
57 | const calculateGray = (r, g, b) => parseInt(r * 0.299 + g * 0.587 + b * 0.114);
58 | const grayData = [];
59 | for (let x = 0; x < data.width; x++) {
60 | for (let y = 0; y < data.height; y++) {
61 | const idx = (x + y * data.width) * 4;
62 | const r = data.data[idx + 0];
63 | const g = data.data[idx + 1];
64 | const b = data.data[idx + 2];
65 | const gray = calculateGray(r, g, b);
66 | grayData.push(gray);
67 | }
68 | }
69 | return grayData;
70 | };
71 | ```
72 |
73 | 灰度处理后的图片如下:
74 |
75 | 
76 | ### 二值化阈值
77 |
78 | 阈值计算是图片二值化非常关键的一步,相关的算法也很多这里,这里我们先试试一种最简单的[均值哈希(aHash)](https://baike.baidu.com/item/%E5%9D%87%E5%80%BC%E5%93%88%E5%B8%8C%E7%AE%97%E6%B3%95)算法,算法很简单,求图片灰度像素的总和再除以像素点数量得出均值作为二值化的阈值。直接上代码:
79 |
80 | ```JavaScript
81 | const average = (grayData) => {
82 | let sum = 0;
83 | for (let i = 0; i < grayData.length; i += 1) {
84 | sum += data[i];
85 | }
86 | return sum / grayData.length;
87 | };
88 | ```
89 |
90 | 其他计算阈值的算法还有:
91 | - [感知哈希 pHash](https://baike.baidu.com/item/%E6%84%9F%E7%9F%A5%E5%93%88%E5%B8%8C%E7%AE%97%E6%B3%95)
92 | - [大津算法 otsu](https://zh.wikipedia.org/wiki/%E5%A4%A7%E6%B4%A5%E7%AE%97%E6%B3%95)
93 |
94 | 感兴趣的同学可以了解下,otsu 的生成的二值化效果比较好,后面我们会有 otsu 来处理计算图片阈值,这里也贴一个 otsu 的实现:
95 |
96 | ```JavaScript
97 | const otsu = (grayData) => {
98 | let ptr = 0;
99 | // 记录0-256每个灰度值的数量,初始值为 0
100 | let histData = Array(256).fill(0);
101 | let total = grayData.length;
102 |
103 | while (ptr < total) {
104 | let h = grayData[ptr++];
105 | histData[h]++;
106 | }
107 | // 总数(灰度值x数量)
108 | let sum = 0;
109 | for (let i = 0; i < 256; i++) {
110 | sum += i * histData[i];
111 | }
112 | // 背景(小于阈值)的数量
113 | let wB = 0;
114 | // 前景(大于阈值)的数量
115 | let wF = 0;
116 | // 背景图像(灰度x数量)总和
117 | let sumB = 0;
118 | // 存储最大类间方差值
119 | let varMax = 0;
120 | // 阈值
121 | let threshold = 0;
122 |
123 | for (let t = 0; t < 256; t++) {
124 | // 背景(小于阈值)的数量累加
125 | wB += histData[t];
126 | if (wB === 0) continue;
127 | // 前景(大于阈值)的数量累加
128 | wF = total - wB;
129 | if (wF === 0) break;
130 | // 背景(灰度x数量)累加
131 | sumB += t * histData[t];
132 |
133 | // 背景(小于阈值)的平均灰度
134 | let mB = sumB / wB;
135 | // 前景(大于阈值)的平均灰度
136 | let mF = (sum - sumB) / wF;
137 | // 类间方差
138 | let varBetween = wB * wF * (mB - mF) ** 2;
139 |
140 | if (varBetween > varMax) {
141 | varMax = varBetween;
142 | threshold = t;
143 | }
144 | }
145 |
146 | return threshold;
147 | };
148 | ```
149 |
150 | ### 图片二值化
151 |
152 | 求得了阈值后我们再进行二值化就很简单了,不过这里有注意点,由于我们生成的图片**文字颜色和背景颜色**都是不确定的,我们求得阈值后,进行二值化时图片的背景颜色可能大于阈值,也有可能小于阈值,这样就没法统一所有图片的输出。这里我们需要规定二值化的图片输出,我们统一将背景的颜色设置为黑色(数值为 0),文字颜色设置为(255 白色)。
153 |
154 | 由于我们生成的图片比较简单,图片的背景颜色取第一个像素的 rgb 值就能确认了,代码实现也很简单:
155 |
156 | ```JavaScript
157 | const binaryzationOutput = (originCanvas, threshold) => {
158 | const ctx = originCanvas.getContext('2d');
159 | const imageData = ctx.getImageData(0, 0, originCanvas.width, originCanvas.height);
160 | const { width, height, data } = imageData;
161 | // 第一像素的值即为背景色值
162 | const head = (data[0] + data[1] + data[2]) / 3;
163 | // 如果背景颜色大于阈值,则背景与文字的颜色的值则需要调换
164 | const color = head > threshold
165 | ? { foreground: 0, background: 255 }
166 | : { foreground: 255, background: 0 };
167 | for (let x = 0; x < width; x++) {
168 | for (let y = 0; y < height; y++) {
169 | const idx = (x + y * width) * 4;
170 | const avg = (data[idx] + data[idx + 1] + data[idx + 2]) / 3;
171 | const v = avg > threshold ? color.foreground : color.background;
172 | data[idx] = v;
173 | data[idx + 1] = v;
174 | data[idx + 2] = v;
175 | data[idx + 3] = 255;
176 | }
177 | }
178 | ctx.putImageData(imageData, 0, 0);
179 | return originCanvas.toDataURL();
180 | }
181 | ```
182 |
183 | 还有一点需要注意下,这里二值处理的是**原图**,不是灰度处理后的图片。
184 |
185 | [完整的代码戳这](https://github.com/kinglisky/zelda-words/blob/master/binarization.js),二值化的图片如下:
186 |
187 | 
188 |
189 | ## 文字切割
190 |
191 | 经过上面的二值化处理我们已经将图片统一处理成黑底白字的图片,操作也特别简单,但生产级别的 OCR 实现往往还会涉及复杂的图片预处理,如图片的投影矫正、旋转矫正、裁剪、图片降噪、锐化等操作,这些预处理都是为了生成一张只包含文字信息的干净图片,因为会很大程度的影响下一步文字切割的效果。
192 |
193 | 同字面描述一样,我们得想办法把一个个希卡文字提取出来,下面介绍一种简单的切割算法:**投影切割算法**。
194 |
195 | 基本思路是:
196 | - 从上到下扫描图片每一行像素值,切割出文字所在的行
197 | - 从左到右扫描文字行每一列像素值,切割出单个文字
198 |
199 | ### 切割行
200 |
201 | 直接看图容易理解一点,先来切割行,我们图片大小是 700 x 600,从上至下扫描每一行的像素,**黑色像素记为 0 白色像素记为 1**,统计每行 1 的个数,我们可以得到下面折线图:
202 |
203 | 
204 |
205 | 
206 |
207 | 横坐标对应图片的高度,纵坐标对应每行像素点的个数,我们可以很直观知道纵坐标为 0 的部分都是图片的空白间距,有值的部分则是文字内容所在的行,行高则是所跨越的区间。
208 |
209 | ### 切割文字(切割列)
210 | 通过上一步的扫描行我们已经可以切割出文字内容所占的行,下一步就是从左到右扫描文字行每列的像素值,同样是黑色记 0 白色记 1 ,统计 1 的个数,以第一行文字为例,其扫描的出来折线图如下:
211 |
212 | 
213 |
214 | 
215 |
216 | 嗯,这个我知道,和切割行一样,只要将纵坐标有值得部分切出来就好!
217 |
218 | 但这里会有问题,如果简单的按纵坐标有值的区间去拆分文字,最后一个文字就会被拆分左右两部部分:
219 |
220 | 
221 |
222 | 原因也很好理解,最后一个文字是左右结构的,中间有空隙隔开,所以文字被拆开了。
223 |
224 | 
225 |
226 | 可以看看下面的几个特殊的字符,一般拆分文字时我们需要考虑左右或者上下结构的文字。
227 |
228 | **上下结构的文字:**
229 |
230 | 
231 |
232 | 
233 |
234 | **左右结构的文字:**
235 |
236 | 
237 |
238 | 
239 |
240 |
241 | 针对这些文字我们应该如何处理呢?我们可以很容易观察出希卡文字都是正方形的,那么每个文字宽高比例应该 1 : 1,如果我们能知道文字的宽度或者高度,那我们就是如何拼接文字区域了。如何计算文字的宽度或高度呢?
242 |
243 | 处理其实很简单,针对整张图片,**横向扫描一次,纵向扫描一次**,可以得到文字内容在横纵方向上的投影大小,我们取横纵投影中**最大的区间**就是标准文字的大小,拆分时文字块不足标准大小则继续与下个投影区间的文字块合并,直到达到文字的标准大小。
244 |
245 | 我们先来实现横纵向扫描和求取最大文字块的方法
246 |
247 | ```JavaScript
248 | // 横纵向扫描
249 | function countPixel(imageData, isRow = false) {
250 | const { width, height, data } = imageData;
251 | const offsets = [0, 1, 2];
252 | // 背景色
253 | const head = offsets.map((i) => data[i]);
254 | const pixel = [];
255 | if (isRow) {
256 | // 从上至下,横向扫描
257 | for (let i = 0; i < height; i++) {
258 | let count = 0;
259 | for (let j = 0; j < width; j++) {
260 | const index = (i * width + j) * 4;
261 | const isEqual = offsets.every(
262 | (offset) => head[offset] === data[index + offset]
263 | );
264 | count += isEqual ? 0 : 1;
265 | }
266 | pixel.push(count);
267 | }
268 | } else {
269 | // 从左到右,纵向扫描
270 | for (let i = 0; i < width; i++) {
271 | let count = 0;
272 | for (let j = 0; j < height; j++) {
273 | const index = (j * width + i) * 4;
274 | const isEqual = offsets.every(
275 | (offset) => head[offset] === data[index + offset]
276 | );
277 | count += isEqual ? 0 : 1;
278 | }
279 | pixel.push(count);
280 | }
281 | }
282 | return pixel;
283 | }
284 |
285 | // 拆分文字与背景区间
286 | function countRanges(counts) {
287 | const groups = [];
288 | let foreground = 0;
289 | let background = 0;
290 | counts.forEach((count) => {
291 | if (count) {
292 | foreground += 1;
293 | if (background) {
294 | groups.push({ background: true, value: background });
295 | background = 0;
296 | }
297 | } else {
298 | background += 1;
299 | if (foreground) {
300 | groups.push({ foreground: true, value: foreground });
301 | foreground = 0;
302 | }
303 | }
304 | });
305 | if (foreground) {
306 | groups.push({ foreground: true, value: foreground });
307 | }
308 | if (background) {
309 | groups.push({ background: true, value: background });
310 | }
311 | return groups;
312 | }
313 |
314 | // 获取文字内容的最大区间
315 | function getMaxRange(data) {
316 | return data.reduce((max, it) => {
317 | if (it.foreground) {
318 | return Math.max(max, it.value);
319 | }
320 | return max;
321 | }, 0);
322 | }
323 | ```
324 |
325 | 计算图片中文字大小:
326 |
327 | ```JavaScript
328 | const imageData = {};
329 | // 逐行扫描
330 | const rowsRanges = countRanges(countPixel(imageData, true));
331 | // 逐列扫描
332 | const colsRanges = countRanges(countPixel(imageData, false));
333 |
334 | // 计算横纵像素分布得出字体内容的大小(字体正方形区域)
335 | const fontRange = Math.max(
336 | getMaxRange(rowsRanges),
337 | getMaxRange(colsRanges)
338 | );
339 | ```
340 |
341 | 合并左右上下结构的文字区间:
342 |
343 | ```JavaScript
344 | // 合并结构分离的文字区间
345 | function mergeRanges(data, size) {
346 | const merge = [];
347 | // chunks 用来保存小于标准文字大小区域
348 | let chunks = [];
349 | data.forEach((item) => {
350 | if (chunks.length) {
351 | chunks.push(item);
352 | const value = chunks.reduce((sum, chunk) => sum + chunk.value, 0);
353 | // 当前换成的区域大小大于或接近标准文字大小则合并成一块
354 | if (value >= size || Math.pow(value - size, 2) < 4) {
355 | merge.push({
356 | foreground: true,
357 | value,
358 | });
359 | chunks = [];
360 | }
361 | return;
362 | }
363 | // 区域内容小于标准文字大小是推入 chunks
364 | if (item.foreground && item.value < size) {
365 | chunks = [item];
366 | return;
367 | }
368 | merge.push(item);
369 | });
370 | return merge;
371 | }
372 | ```
373 |
374 | 统一处理后的区块信息如下,我们只需按顺序裁剪出 `foreground` 与其对应的区块大小 `value` 就好了。
375 |
376 | ```JavaScript
377 | [
378 | {
379 | "background": true,
380 | "value": 115
381 | },
382 | {
383 | "foreground": true,
384 | "value": 70
385 | },
386 | {
387 | "background": true,
388 | "value": 30
389 | },
390 | {
391 | "foreground": true,
392 | "value": 70
393 | },
394 | {
395 | "background": true,
396 | "value": 30
397 | },
398 | {
399 | "foreground": true,
400 | "value": 70
401 | },
402 | {
403 | "background": true,
404 | "value": 30
405 | },
406 | {
407 | "foreground": true,
408 | "value": 70
409 | },
410 | {
411 | "background": true,
412 | "value": 115
413 | }
414 | ]
415 | ```
416 |
417 | 剩下的就是算各种偏移值然后从 cnavas 中切割出单个的文字块并记录下位置信息,[具体的实现可以戳这里](https://github.com/kinglisky/zelda-words/blob/master/src/utils/image-ocr.ts#L221),就不细讲了,切割出来文字内容如下:
418 |
419 | 
420 | ## 相似图片检测
421 |
422 | 切割出文字后,剩下的就是文字的翻译了。对于希卡文我们知道它与英文的映射规则,每个希卡符号背后对都对应一个英文符号,我们可以生成 40 个英文字符对应的希卡符号图片作为标准字符图片,那么希卡图片翻译就可以简单理解为:将切割的图片与已知的 40 标准字符图片逐个进行**相似性比较**,找出相似度最高的图片就是目标字符。
423 |
424 | 
425 |
426 | 上面为 `abcdefghijklmnopqrstuvwxyz0123456789.-!?` 对应的希卡符号。
427 |
428 | 我们该如何进行比较两张图片的相似性呢?其实我们已经完成了很大一部分工作,就差临门一脚了。既然一张已经二值化处理成了黑白图片,我们将图片的**黑色像素输出为 0 白色部分输出为 1 **这样就可以得到一张图片的二进制哈希,至于两张图片的相似性就是比较两张图片哈希同一位置的**差异个数**,其实就是计算两张图片的哈希的[汉明距离](https://zh.wikipedia.org/zh-hans/%E6%B1%89%E6%98%8E%E8%B7%9D%E7%A6%BB),汉明距离越小,两张图片越相似。我们只需要简单改变下二值化输出的代码就能得到图片的哈希。
429 |
430 | ```JavaScript
431 | const binaryzationHash = (originCanvas, threshold) => {
432 | const ctx = originCanvas.getContext('2d');
433 | const imageData = ctx.getImageData(0, 0, originCanvas.width, originCanvas.height);
434 | const { width, height, data } = imageData;
435 | // 第一像素的值即为背景色值
436 | const head = (data[0] + data[1] + data[2]) / 3;
437 | // 如果背景颜色大于阈值,则背景与文字的颜色的值则需要调换
438 | const color = head > threshold
439 | ? { foreground: 0, background: 255 }
440 | : { foreground: 255, background: 0 };
441 | const hash = [];
442 | for (let x = 0; x < width; x++) {
443 | for (let y = 0; y < height; y++) {
444 | const idx = (x + y * width) * 4;
445 | const avg = (data[idx] + data[idx + 1] + data[idx + 2]) / 3;
446 | const v = avg > threshold ? color.foreground : color.background;
447 | hash.push(v ? 1 : 0);
448 | }
449 | }
450 | return hash;
451 | }
452 | ```
453 |
454 | 汉明距离的比较也十分简单:
455 |
456 | ```JavaScript
457 | const hammingDistance = (hash1, hash2) => {
458 | let count = 0;
459 | hash1.forEach((it, index) => {
460 | count += it ^ hash2[index];
461 | });
462 | return count;
463 | };
464 | ```
465 |
466 | 这就是相似图片比较最核心的代码了,因为我们并不能保证切割出来的文字块大小能标准图片一样,所以我们会将切割图片和标准图片都缩小成 8 x 8 大小再进行比较,两张图片相似性比较主要流程大致如下:
467 | - 将比较的图片都缩小成 8 x 8
468 | - 图片灰度化处理
469 | - 计算二值化阈值
470 | - 图片二值化计算图片哈希
471 | - 比较两张图片哈希的汉明距离
472 |
473 | 之前详细整理过一篇相似图片识别的文章,对此感兴趣的同学可以看看这篇文章:[相似图片识别的朴素实现](https://juejin.cn/post/6926181310868226061)。
474 |
475 | 回到我们希卡文翻译上,所以我们现在要做只有三步:
476 | 1. 40 个标准图片统一缩小成 8 x 8 并生成对应的图片哈希
477 | 2. 切割出的文字图片统一缩小成 8 x 8 并生成对应的图片哈希
478 | 3. 切割出文字哈希逐个与 40 个标准图片哈希比较,挑选出差异(相似度最高的)最小就是目标字母
479 |
480 |
481 | 代码实现也比较简单:
482 |
483 | ```JavaScript
484 | async function createImageFingerprints(image) {
485 | const contents = splitImage(image);
486 | return contents.map(({ canvas, ...args }) => {
487 | // 统一缩小到 8 像素
488 | const imageData = resizeCanvas(canvas, 8);
489 | const hash = binaryzationOutput(imageData);
490 | return {
491 | ...args,
492 | hash,
493 | };
494 | });
495 | }
496 |
497 | // 生成标准字符指纹
498 | function createSymbols(fingerprints) {
499 | const WORDS = 'abcdefghijklmnopqrstuvwxyz0123456789.-!?';
500 | return fingerprints.map((it, index) => {
501 | return {
502 | name: WORDS[index],
503 | value: it.hash,
504 | };
505 | });
506 | }
507 |
508 | // 匹配出最相似的字符
509 | function mapSymbols(fingerprints, symbols) {
510 | return fingerprints.map(({ hash, ...position }) => {
511 | const isEmpty = hash.every((v:) => v === hash[0]);
512 | if (isEmpty) {
513 | return ' ';
514 | }
515 | let diff = Number.MAX_SAFE_INTEGER;
516 | let word = '*';
517 | symbols.forEach((symbol) => {
518 | const distance = hammingDistance(hash, symbol.value);
519 | // 汉明距离大于标识相似度偏差较大排除
520 | if (distance < diff) {
521 | diff = distance;
522 | word = symbol.name;
523 | }
524 | });
525 | return {
526 | ...position,
527 | word,
528 | diff,
529 | };
530 | });
531 | }
532 | ```
533 |
534 | 使用大概是这样的:
535 |
536 | ```JavaScript
537 | /**
538 | * @param imageUrl 解析的图片
539 | * @param mapUrl 标准字符图片
540 | */
541 | export async function readMetaInfo(imageUrl, mapUrl) {
542 | const mapImage = await loadImage(mapUrl);
543 | const mapImageFingerprints = await createImageFingerprints(mapImage, false);
544 | const symbols = createSymbols(mapImageFingerprints);
545 | const readImage = await loadImage(imageUrl);
546 | const readImageFingerprints = await createImageFingerprints(
547 | readImage,
548 | true
549 | );
550 | const results = mapSymbols(readImageFingerprints, symbols);
551 | console.log(results);
552 | }
553 | ```
554 |
555 | [完整代码实现可以戳这里](https://github.com/kinglisky/zelda-words/blob/master/src/utils/image-ocr.ts#L390),至此一个简简简单版的希卡文 OCR 翻译器就完成了~
556 |
557 |
558 | ## 其他
559 |
560 | - [摸一个塞尔达希卡文字转换器](https://juejin.cn/post/6935836863844319239/)
561 | - [相似图片识别的朴素实现](https://juejin.cn/post/6926181310868226061)
562 | - [利用 JS 实现多种图片相似度算法](https://segmentfault.com/a/1190000021236326)
563 | - [文字切割算法-基于投影的切割](https://blog.csdn.net/Print_lin/article/details/80143002?spm=1001.2014.3001.5501)
564 | - [文字切割算法-投影切割优化](https://blog.csdn.net/Print_lin/article/details/80335236)
565 |
566 | 摸鱼做的一个小东西,粗略的了解了下 OCR 的实现还是很开心的。最后这张图片给特别的你,耶~
567 |
568 | 
569 |
--------------------------------------------------------------------------------
/ocr/ocr-0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-0.jpg
--------------------------------------------------------------------------------
/ocr/ocr-1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-1.jpeg
--------------------------------------------------------------------------------
/ocr/ocr-10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-10.png
--------------------------------------------------------------------------------
/ocr/ocr-11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-11.png
--------------------------------------------------------------------------------
/ocr/ocr-12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-12.png
--------------------------------------------------------------------------------
/ocr/ocr-13.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-13.jpeg
--------------------------------------------------------------------------------
/ocr/ocr-14.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-14.jpeg
--------------------------------------------------------------------------------
/ocr/ocr-15.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-15.jpeg
--------------------------------------------------------------------------------
/ocr/ocr-16.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-16.jpeg
--------------------------------------------------------------------------------
/ocr/ocr-17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-17.png
--------------------------------------------------------------------------------
/ocr/ocr-18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-18.png
--------------------------------------------------------------------------------
/ocr/ocr-19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-19.png
--------------------------------------------------------------------------------
/ocr/ocr-2.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-2.jpeg
--------------------------------------------------------------------------------
/ocr/ocr-3.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-3.jpeg
--------------------------------------------------------------------------------
/ocr/ocr-4.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-4.jpeg
--------------------------------------------------------------------------------
/ocr/ocr-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-5.png
--------------------------------------------------------------------------------
/ocr/ocr-6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-6.png
--------------------------------------------------------------------------------
/ocr/ocr-7.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-7.jpeg
--------------------------------------------------------------------------------
/ocr/ocr-8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-8.png
--------------------------------------------------------------------------------
/ocr/ocr-9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-9.png
--------------------------------------------------------------------------------
/ocr/ocr-map.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/ocr/ocr-map.jpeg
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "zelda-words",
3 | "version": "0.0.0",
4 | "scripts": {
5 | "dev": "vite",
6 | "build": "vuedx-typecheck . && vite build",
7 | "serve": "vite preview"
8 | },
9 | "dependencies": {
10 | "@nuintun/qrcode": "^3.0.1",
11 | "@tensorflow/tfjs": "^3.9.0",
12 | "vue": "^3.0.5",
13 | "yargs": "^17.1.1"
14 | },
15 | "devDependencies": {
16 | "@types/dom-to-image": "^2.6.2",
17 | "@types/node": "^16.9.3",
18 | "@vitejs/plugin-vue": "^1.1.4",
19 | "@vue/compiler-sfc": "^3.0.5",
20 | "@vuedx/typecheck": "^0.6.0",
21 | "@vuedx/typescript-plugin-vue": "^0.6.0",
22 | "dom-to-image": "^2.6.0",
23 | "sass": "^1.32.7",
24 | "typescript": "^4.1.3",
25 | "vite": "^2.0.0"
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/public/favicon.ico
--------------------------------------------------------------------------------
/public/model.json:
--------------------------------------------------------------------------------
1 | {
2 | "modelTopology": {
3 | "class_name": "Sequential",
4 | "config": {
5 | "name": "sequential_1",
6 | "layers": [
7 | {
8 | "class_name": "Conv2D",
9 | "config": {
10 | "filters": 16,
11 | "kernel_initializer": {
12 | "class_name": "VarianceScaling",
13 | "config": {
14 | "scale": 1,
15 | "mode": "fan_avg",
16 | "distribution": "normal",
17 | "seed": null
18 | }
19 | },
20 | "kernel_regularizer": null,
21 | "kernel_constraint": null,
22 | "kernel_size": [
23 | 3,
24 | 3
25 | ],
26 | "strides": [
27 | 1,
28 | 1
29 | ],
30 | "padding": "valid",
31 | "data_format": "channels_last",
32 | "dilation_rate": [
33 | 1,
34 | 1
35 | ],
36 | "activation": "relu",
37 | "use_bias": true,
38 | "bias_initializer": {
39 | "class_name": "Zeros",
40 | "config": {}
41 | },
42 | "bias_regularizer": null,
43 | "activity_regularizer": null,
44 | "bias_constraint": null,
45 | "name": "conv2d_Conv2D1",
46 | "trainable": true,
47 | "batch_input_shape": [
48 | null,
49 | 28,
50 | 28,
51 | 1
52 | ],
53 | "dtype": "float32"
54 | }
55 | },
56 | {
57 | "class_name": "MaxPooling2D",
58 | "config": {
59 | "pool_size": [
60 | 2,
61 | 2
62 | ],
63 | "padding": "valid",
64 | "strides": [
65 | 2,
66 | 2
67 | ],
68 | "data_format": "channels_last",
69 | "name": "max_pooling2d_MaxPooling2D1",
70 | "trainable": true
71 | }
72 | },
73 | {
74 | "class_name": "Conv2D",
75 | "config": {
76 | "filters": 32,
77 | "kernel_initializer": {
78 | "class_name": "VarianceScaling",
79 | "config": {
80 | "scale": 1,
81 | "mode": "fan_avg",
82 | "distribution": "normal",
83 | "seed": null
84 | }
85 | },
86 | "kernel_regularizer": null,
87 | "kernel_constraint": null,
88 | "kernel_size": [
89 | 3,
90 | 3
91 | ],
92 | "strides": [
93 | 1,
94 | 1
95 | ],
96 | "padding": "valid",
97 | "data_format": "channels_last",
98 | "dilation_rate": [
99 | 1,
100 | 1
101 | ],
102 | "activation": "relu",
103 | "use_bias": true,
104 | "bias_initializer": {
105 | "class_name": "Zeros",
106 | "config": {}
107 | },
108 | "bias_regularizer": null,
109 | "activity_regularizer": null,
110 | "bias_constraint": null,
111 | "name": "conv2d_Conv2D2",
112 | "trainable": true
113 | }
114 | },
115 | {
116 | "class_name": "MaxPooling2D",
117 | "config": {
118 | "pool_size": [
119 | 2,
120 | 2
121 | ],
122 | "padding": "valid",
123 | "strides": [
124 | 2,
125 | 2
126 | ],
127 | "data_format": "channels_last",
128 | "name": "max_pooling2d_MaxPooling2D2",
129 | "trainable": true
130 | }
131 | },
132 | {
133 | "class_name": "Conv2D",
134 | "config": {
135 | "filters": 32,
136 | "kernel_initializer": {
137 | "class_name": "VarianceScaling",
138 | "config": {
139 | "scale": 1,
140 | "mode": "fan_avg",
141 | "distribution": "normal",
142 | "seed": null
143 | }
144 | },
145 | "kernel_regularizer": null,
146 | "kernel_constraint": null,
147 | "kernel_size": [
148 | 3,
149 | 3
150 | ],
151 | "strides": [
152 | 1,
153 | 1
154 | ],
155 | "padding": "valid",
156 | "data_format": "channels_last",
157 | "dilation_rate": [
158 | 1,
159 | 1
160 | ],
161 | "activation": "relu",
162 | "use_bias": true,
163 | "bias_initializer": {
164 | "class_name": "Zeros",
165 | "config": {}
166 | },
167 | "bias_regularizer": null,
168 | "activity_regularizer": null,
169 | "bias_constraint": null,
170 | "name": "conv2d_Conv2D3",
171 | "trainable": true
172 | }
173 | },
174 | {
175 | "class_name": "Flatten",
176 | "config": {
177 | "name": "flatten_Flatten1",
178 | "trainable": true
179 | }
180 | },
181 | {
182 | "class_name": "Dense",
183 | "config": {
184 | "units": 64,
185 | "activation": "relu",
186 | "use_bias": true,
187 | "kernel_initializer": {
188 | "class_name": "VarianceScaling",
189 | "config": {
190 | "scale": 1,
191 | "mode": "fan_avg",
192 | "distribution": "normal",
193 | "seed": null
194 | }
195 | },
196 | "bias_initializer": {
197 | "class_name": "Zeros",
198 | "config": {}
199 | },
200 | "kernel_regularizer": null,
201 | "bias_regularizer": null,
202 | "activity_regularizer": null,
203 | "kernel_constraint": null,
204 | "bias_constraint": null,
205 | "name": "dense_Dense1",
206 | "trainable": true
207 | }
208 | },
209 | {
210 | "class_name": "Dense",
211 | "config": {
212 | "units": 40,
213 | "activation": "softmax",
214 | "use_bias": true,
215 | "kernel_initializer": {
216 | "class_name": "VarianceScaling",
217 | "config": {
218 | "scale": 1,
219 | "mode": "fan_avg",
220 | "distribution": "normal",
221 | "seed": null
222 | }
223 | },
224 | "bias_initializer": {
225 | "class_name": "Zeros",
226 | "config": {}
227 | },
228 | "kernel_regularizer": null,
229 | "bias_regularizer": null,
230 | "activity_regularizer": null,
231 | "kernel_constraint": null,
232 | "bias_constraint": null,
233 | "name": "dense_Dense2",
234 | "trainable": true
235 | }
236 | }
237 | ]
238 | },
239 | "keras_version": "tfjs-layers 3.9.0",
240 | "backend": "tensor_flow.js"
241 | },
242 | "format": "layers-model",
243 | "generatedBy": "TensorFlow.js tfjs-layers v3.9.0",
244 | "convertedBy": null,
245 | "weightsManifest": [
246 | {
247 | "paths": [
248 | "./model.weights.bin"
249 | ],
250 | "weights": [
251 | {
252 | "name": "conv2d_Conv2D1/kernel",
253 | "shape": [
254 | 3,
255 | 3,
256 | 1,
257 | 16
258 | ],
259 | "dtype": "float32"
260 | },
261 | {
262 | "name": "conv2d_Conv2D1/bias",
263 | "shape": [
264 | 16
265 | ],
266 | "dtype": "float32"
267 | },
268 | {
269 | "name": "conv2d_Conv2D2/kernel",
270 | "shape": [
271 | 3,
272 | 3,
273 | 16,
274 | 32
275 | ],
276 | "dtype": "float32"
277 | },
278 | {
279 | "name": "conv2d_Conv2D2/bias",
280 | "shape": [
281 | 32
282 | ],
283 | "dtype": "float32"
284 | },
285 | {
286 | "name": "conv2d_Conv2D3/kernel",
287 | "shape": [
288 | 3,
289 | 3,
290 | 32,
291 | 32
292 | ],
293 | "dtype": "float32"
294 | },
295 | {
296 | "name": "conv2d_Conv2D3/bias",
297 | "shape": [
298 | 32
299 | ],
300 | "dtype": "float32"
301 | },
302 | {
303 | "name": "dense_Dense1/kernel",
304 | "shape": [
305 | 288,
306 | 64
307 | ],
308 | "dtype": "float32"
309 | },
310 | {
311 | "name": "dense_Dense1/bias",
312 | "shape": [
313 | 64
314 | ],
315 | "dtype": "float32"
316 | },
317 | {
318 | "name": "dense_Dense2/kernel",
319 | "shape": [
320 | 64,
321 | 40
322 | ],
323 | "dtype": "float32"
324 | },
325 | {
326 | "name": "dense_Dense2/bias",
327 | "shape": [
328 | 40
329 | ],
330 | "dtype": "float32"
331 | }
332 | ]
333 | }
334 | ]
335 | }
--------------------------------------------------------------------------------
/public/model.weights.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/public/model.weights.bin
--------------------------------------------------------------------------------
/src/App.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
84 |
85 |
86 |
87 |
93 |
98 |
99 |
100 |
101 |
183 |
184 |
345 |
--------------------------------------------------------------------------------
/src/assets/map.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/src/assets/map.jpeg
--------------------------------------------------------------------------------
/src/assets/map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/src/assets/map.png
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/0.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/1.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/2.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/3.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/4.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/5.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/6.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/7.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/8.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/9.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/a.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/b.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/c.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/d.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/e.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/exclam.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/f.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/g.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/h.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/hyphen.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/i.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/j.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/k.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/l.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/m.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/n.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/o.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/p.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/period.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/q.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/question.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/r.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/s.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/t.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/u.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/v.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/w.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/x.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/y.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/assets/sheikah-icon/z.svg:
--------------------------------------------------------------------------------
1 |
2 |
8 |
--------------------------------------------------------------------------------
/src/cnn/data.js:
--------------------------------------------------------------------------------
1 | import * as tf from '@tensorflow/tfjs';
2 | import TRAIN from '../data/train.json';
3 | import TEST from '../data/test.json';
4 | import trainBufferURL from '../data/train.buffer?url';
5 | import testBufferURL from '../data/test.buffer?url';
6 |
7 | TRAIN.url = trainBufferURL;
8 | TEST.url = testBufferURL;
9 |
10 | export const WORDS_COUNT = 40;
11 | export const IMAGE_H = 28;
12 | export const IMAGE_W = 28;
13 | const loadBuffer = async (data) => {
14 | const {
15 | count,
16 | width,
17 | height,
18 | url,
19 | indexs,
20 | } = data;
21 | const buffers = await fetch(url).then(res => res.arrayBuffer());
22 | const images = new Float32Array(new Uint8ClampedArray(buffers));
23 | for (let i = 0; i < images.length; i++) {
24 | images[i] = images[i] / 255;
25 | }
26 | const labels = new Int32Array(indexs);
27 | return {
28 | count,
29 | width,
30 | height,
31 | images,
32 | labels,
33 | };
34 | }
35 |
36 | class Dataset {
37 | constructor() {
38 | this.dataset = {};
39 | }
40 |
41 | async loadData() {
42 | const train = await loadBuffer(TRAIN);
43 | const test = await loadBuffer(TEST);
44 | this.dataset = {
45 | train,
46 | test,
47 | };
48 | }
49 |
50 | getData(key) {
51 | const target = this.dataset[key];
52 | const imagesShape = [target.count, target.height, target.height, 1];
53 | return {
54 | images: tf.tensor4d(target.images, imagesShape),
55 | labels: tf.oneHot(tf.tensor1d(target.labels, 'int32'), WORDS_COUNT).toFloat(),
56 | };
57 | }
58 |
59 | getTrainData() {
60 | return this.getData('train');
61 | }
62 |
63 | getTestData(numExamples) {
64 | const res = this.getData('test');
65 | if (numExamples) {
66 | return {
67 | images: res.images.slice([0, 0, 0, 0], [numExamples, IMAGE_H, IMAGE_W, 1]),
68 | labels: res.labels.slice([0, 0], [numExamples, WORDS_COUNT]),
69 | };
70 | }
71 | return res;
72 | }
73 | }
74 |
75 | export const dataset = new Dataset();
76 |
--------------------------------------------------------------------------------
/src/cnn/index.js:
--------------------------------------------------------------------------------
1 | import * as tf from '@tensorflow/tfjs';
2 | import { dataset, IMAGE_H, IMAGE_W, WORDS_COUNT } from './data';
3 | import modelURL from '../data/model.json?url';
4 |
5 | const BATCH_SIZE = 400;
6 |
7 | function createModel() {
8 | const model = tf.sequential();
9 | // conv2d 层,进行卷积操作
10 | model.add(tf.layers.conv2d({
11 | inputShape: [IMAGE_H, IMAGE_W, 1],
12 | kernelSize: 3,
13 | filters: 16,
14 | activation: 'relu'
15 | }));
16 | // 卷积后进行池化
17 | model.add(tf.layers.maxPooling2d({ poolSize: 2, strides: 2 }));
18 | // 在重复一次卷积与池化
19 | model.add(tf.layers.conv2d({ kernelSize: 3, filters: 32, activation: 'relu' }));
20 | model.add(tf.layers.maxPooling2d({ poolSize: 2, strides: 2 }));
21 | model.add(tf.layers.conv2d({ kernelSize: 3, filters: 32, activation: 'relu' }));
22 | // 扁平化张量
23 | model.add(tf.layers.flatten({}));
24 | // 添加层密集加神经网络的容量
25 | model.add(tf.layers.dense({ units: 64, activation: 'relu' }));
26 | // 为多分类问题配置归一化指数激活函数
27 | model.add(tf.layers.dense({ units: WORDS_COUNT, activation: 'softmax' }));
28 | return model;
29 | }
30 |
31 | async function showPredictions(model, data) {
32 | const testExamples = 40;
33 | const examples = data.getTestData(testExamples);
34 | tf.tidy(() => {
35 | const output = model.predict(examples.images);
36 | const axis = 1;
37 | const labels = Array.from(examples.labels.argMax(axis).dataSync());
38 | const predictions = Array.from(output.argMax(axis).dataSync());
39 | const res = predictions.filter((it, index) => it === labels[index]);
40 | console.log('预测结果', res.length, res);
41 | });
42 | }
43 |
44 | async function train({ model, data, onIteration }) {
45 | model.compile({
46 | optimizer: 'rmsprop',
47 | loss: 'categoricalCrossentropy',
48 | metrics: ['accuracy'],
49 | });
50 |
51 | const batchSize = BATCH_SIZE;
52 |
53 | // 校验集比例
54 | const validationSplit = 0.15;
55 |
56 | // 训练轮次
57 | const trainEpochs = 20;
58 |
59 | let trainBatchCount = 0;
60 | let trainEpochCount = 0;
61 |
62 | const trainData = data.getTrainData();
63 | const testData = data.getTestData();
64 |
65 | const totalNumBatches = Math.ceil(trainData.images.shape[0] * (1 - validationSplit) / batchSize) * trainEpochs;
66 |
67 | let valAcc;
68 | await model.fit(trainData.images, trainData.labels, {
69 | batchSize,
70 | validationSplit,
71 | epochs: trainEpochs,
72 | callbacks: {
73 | // 每个批次训练结束调用
74 | onBatchEnd: async (batch, logs) => {
75 | trainBatchCount++;
76 | console.log(`训练进度:${(trainBatchCount / totalNumBatches * 100).toFixed(1)}%`);
77 | console.log(`损失: ${logs.loss}`);
78 | console.log(`准确率: ${logs.acc}`);
79 | if (onIteration && batch % 10 === 0) {
80 | onIteration('onBatchEnd', batch, logs);
81 | }
82 | await tf.nextFrame();
83 | },
84 | // 每个轮次训练结束调用
85 | onEpochEnd: async (epoch, logs) => {
86 | trainEpochCount++;
87 | valAcc = logs.val_acc;
88 | console.log(`训练进度:轮次 ${trainEpochCount}`);
89 | console.log(`校验集损失: ${logs.val_loss}`);
90 | console.log(`校验集准确率: ${logs.val_acc}`);
91 | if (onIteration) {
92 | onIteration('onEpochEnd', epoch, logs);
93 | }
94 | await tf.nextFrame();
95 | }
96 | }
97 | });
98 |
99 | const testResult = model.evaluate(testData.images, testData.labels);
100 | const testAccPercent = testResult[1].dataSync()[0] * 100;
101 | const finalValAccPercent = valAcc * 100;
102 | console.log(`检验集准确率: ${finalValAccPercent.toFixed(1)}%`);
103 | console.log(`测试集准确率: ${testAccPercent.toFixed(1)}%`);
104 |
105 | const saveResults = await model.save('downloads://zelda-words-model');
106 | console.log('保存模型', saveResults);
107 | }
108 |
109 | async function run() {
110 | console.log('加载训练数据...');
111 | await dataset.loadData();
112 | const model = createModel();
113 | train({
114 | model,
115 | data: dataset,
116 | onIteration: () => {
117 | showPredictions(model, dataset);
118 | },
119 | });
120 | };
121 |
122 | async function predict() {
123 | const model = await tf.loadLayersModel(modelURL);
124 | await dataset.loadData();
125 | const examples = dataset.getTestData(40);
126 | const output = model.predict(examples.images);
127 | const axis = 1;
128 | const labels = Array.from(examples.labels.argMax(axis).dataSync());
129 | const predictions = Array.from(output.argMax(axis).dataSync());
130 | const res = predictions.filter((it, index) => it === labels[index]);
131 | console.log('预测结果', res.length, res);
132 | }
133 |
134 | window.addEventListener('load', () => {
135 | document.querySelector('.run').addEventListener('click', run, false);
136 | document.querySelector('.predict').addEventListener('click', predict, false);
137 | });
138 |
--------------------------------------------------------------------------------
/src/components/Download.vue:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
8 |
9 |
10 |
46 |
47 |
69 |
--------------------------------------------------------------------------------
/src/components/ParsePanel.vue:
--------------------------------------------------------------------------------
1 |
2 |
6 | ×
7 |
12 |
16 | 图片解析中......
17 |
18 |
19 |
20 |
21 |
82 |
83 |
127 |
--------------------------------------------------------------------------------
/src/components/WordIcon/Main.vue:
--------------------------------------------------------------------------------
1 |
2 |
9 |
10 |
11 |
83 |
84 |
94 |
--------------------------------------------------------------------------------
/src/components/WordIcon/icon-map.ts:
--------------------------------------------------------------------------------
1 | export default {
2 | '0': '0',
3 | '1': '1',
4 | '2': '2',
5 | '3': '3',
6 | '4': '4',
7 | '5': '5',
8 | '6': '6',
9 | '7': '7',
10 | '8': '8',
11 | '9': '9',
12 | 'a': 'a',
13 | 'b': 'b',
14 | 'c': 'c',
15 | 'd': 'd',
16 | 'e': 'e',
17 | 'f': 'f',
18 | 'g': 'g',
19 | 'h': 'h',
20 | 'i': 'i',
21 | 'j': 'j',
22 | 'k': 'k',
23 | 'l': 'l',
24 | 'm': 'm',
25 | 'n': 'n',
26 | 'o': 'o',
27 | 'p': 'p',
28 | 'q': 'q',
29 | 'r': 'r',
30 | 's': 's',
31 | 't': 't',
32 | 'u': 'u',
33 | 'v': 'v',
34 | 'w': 'w',
35 | 'x': 'x',
36 | 'y': 'y',
37 | 'z': 'z',
38 | '.': 'period',
39 | '!': 'exclam',
40 | '?': 'question',
41 | '-': 'hyphen',
42 | };
43 |
--------------------------------------------------------------------------------
/src/components/WordIcon/regist-script.ts:
--------------------------------------------------------------------------------
1 | export default function registScript(url: string): void {
2 | const scriptName = 'ICON_FONT_SCRIPT';
3 | if (document.querySelector(`#${scriptName}`) || !url) return;
4 | const scriptElement: HTMLScriptElement = document.createElement('script');
5 | scriptElement.id = scriptName;
6 | document.body.appendChild(scriptElement);
7 | scriptElement.src = url;
8 | }
9 |
--------------------------------------------------------------------------------
/src/components/WordsPanel.vue:
--------------------------------------------------------------------------------
1 |
2 |
25 |
26 |
27 |
144 |
145 |
160 |
--------------------------------------------------------------------------------
/src/data/model.json:
--------------------------------------------------------------------------------
1 | {
2 | "modelTopology": {
3 | "class_name": "Sequential",
4 | "config": {
5 | "name": "sequential_1",
6 | "layers": [
7 | {
8 | "class_name": "Conv2D",
9 | "config": {
10 | "filters": 16,
11 | "kernel_initializer": {
12 | "class_name": "VarianceScaling",
13 | "config": {
14 | "scale": 1,
15 | "mode": "fan_avg",
16 | "distribution": "normal",
17 | "seed": null
18 | }
19 | },
20 | "kernel_regularizer": null,
21 | "kernel_constraint": null,
22 | "kernel_size": [
23 | 3,
24 | 3
25 | ],
26 | "strides": [
27 | 1,
28 | 1
29 | ],
30 | "padding": "valid",
31 | "data_format": "channels_last",
32 | "dilation_rate": [
33 | 1,
34 | 1
35 | ],
36 | "activation": "relu",
37 | "use_bias": true,
38 | "bias_initializer": {
39 | "class_name": "Zeros",
40 | "config": {}
41 | },
42 | "bias_regularizer": null,
43 | "activity_regularizer": null,
44 | "bias_constraint": null,
45 | "name": "conv2d_Conv2D1",
46 | "trainable": true,
47 | "batch_input_shape": [
48 | null,
49 | 28,
50 | 28,
51 | 1
52 | ],
53 | "dtype": "float32"
54 | }
55 | },
56 | {
57 | "class_name": "MaxPooling2D",
58 | "config": {
59 | "pool_size": [
60 | 2,
61 | 2
62 | ],
63 | "padding": "valid",
64 | "strides": [
65 | 2,
66 | 2
67 | ],
68 | "data_format": "channels_last",
69 | "name": "max_pooling2d_MaxPooling2D1",
70 | "trainable": true
71 | }
72 | },
73 | {
74 | "class_name": "Conv2D",
75 | "config": {
76 | "filters": 32,
77 | "kernel_initializer": {
78 | "class_name": "VarianceScaling",
79 | "config": {
80 | "scale": 1,
81 | "mode": "fan_avg",
82 | "distribution": "normal",
83 | "seed": null
84 | }
85 | },
86 | "kernel_regularizer": null,
87 | "kernel_constraint": null,
88 | "kernel_size": [
89 | 3,
90 | 3
91 | ],
92 | "strides": [
93 | 1,
94 | 1
95 | ],
96 | "padding": "valid",
97 | "data_format": "channels_last",
98 | "dilation_rate": [
99 | 1,
100 | 1
101 | ],
102 | "activation": "relu",
103 | "use_bias": true,
104 | "bias_initializer": {
105 | "class_name": "Zeros",
106 | "config": {}
107 | },
108 | "bias_regularizer": null,
109 | "activity_regularizer": null,
110 | "bias_constraint": null,
111 | "name": "conv2d_Conv2D2",
112 | "trainable": true
113 | }
114 | },
115 | {
116 | "class_name": "MaxPooling2D",
117 | "config": {
118 | "pool_size": [
119 | 2,
120 | 2
121 | ],
122 | "padding": "valid",
123 | "strides": [
124 | 2,
125 | 2
126 | ],
127 | "data_format": "channels_last",
128 | "name": "max_pooling2d_MaxPooling2D2",
129 | "trainable": true
130 | }
131 | },
132 | {
133 | "class_name": "Conv2D",
134 | "config": {
135 | "filters": 32,
136 | "kernel_initializer": {
137 | "class_name": "VarianceScaling",
138 | "config": {
139 | "scale": 1,
140 | "mode": "fan_avg",
141 | "distribution": "normal",
142 | "seed": null
143 | }
144 | },
145 | "kernel_regularizer": null,
146 | "kernel_constraint": null,
147 | "kernel_size": [
148 | 3,
149 | 3
150 | ],
151 | "strides": [
152 | 1,
153 | 1
154 | ],
155 | "padding": "valid",
156 | "data_format": "channels_last",
157 | "dilation_rate": [
158 | 1,
159 | 1
160 | ],
161 | "activation": "relu",
162 | "use_bias": true,
163 | "bias_initializer": {
164 | "class_name": "Zeros",
165 | "config": {}
166 | },
167 | "bias_regularizer": null,
168 | "activity_regularizer": null,
169 | "bias_constraint": null,
170 | "name": "conv2d_Conv2D3",
171 | "trainable": true
172 | }
173 | },
174 | {
175 | "class_name": "Flatten",
176 | "config": {
177 | "name": "flatten_Flatten1",
178 | "trainable": true
179 | }
180 | },
181 | {
182 | "class_name": "Dense",
183 | "config": {
184 | "units": 64,
185 | "activation": "relu",
186 | "use_bias": true,
187 | "kernel_initializer": {
188 | "class_name": "VarianceScaling",
189 | "config": {
190 | "scale": 1,
191 | "mode": "fan_avg",
192 | "distribution": "normal",
193 | "seed": null
194 | }
195 | },
196 | "bias_initializer": {
197 | "class_name": "Zeros",
198 | "config": {}
199 | },
200 | "kernel_regularizer": null,
201 | "bias_regularizer": null,
202 | "activity_regularizer": null,
203 | "kernel_constraint": null,
204 | "bias_constraint": null,
205 | "name": "dense_Dense1",
206 | "trainable": true
207 | }
208 | },
209 | {
210 | "class_name": "Dense",
211 | "config": {
212 | "units": 40,
213 | "activation": "softmax",
214 | "use_bias": true,
215 | "kernel_initializer": {
216 | "class_name": "VarianceScaling",
217 | "config": {
218 | "scale": 1,
219 | "mode": "fan_avg",
220 | "distribution": "normal",
221 | "seed": null
222 | }
223 | },
224 | "bias_initializer": {
225 | "class_name": "Zeros",
226 | "config": {}
227 | },
228 | "kernel_regularizer": null,
229 | "bias_regularizer": null,
230 | "activity_regularizer": null,
231 | "kernel_constraint": null,
232 | "bias_constraint": null,
233 | "name": "dense_Dense2",
234 | "trainable": true
235 | }
236 | }
237 | ]
238 | },
239 | "keras_version": "tfjs-layers 3.9.0",
240 | "backend": "tensor_flow.js"
241 | },
242 | "format": "layers-model",
243 | "generatedBy": "TensorFlow.js tfjs-layers v3.9.0",
244 | "convertedBy": null,
245 | "weightsManifest": [
246 | {
247 | "paths": [
248 | "./model.weights.bin"
249 | ],
250 | "weights": [
251 | {
252 | "name": "conv2d_Conv2D1/kernel",
253 | "shape": [
254 | 3,
255 | 3,
256 | 1,
257 | 16
258 | ],
259 | "dtype": "float32"
260 | },
261 | {
262 | "name": "conv2d_Conv2D1/bias",
263 | "shape": [
264 | 16
265 | ],
266 | "dtype": "float32"
267 | },
268 | {
269 | "name": "conv2d_Conv2D2/kernel",
270 | "shape": [
271 | 3,
272 | 3,
273 | 16,
274 | 32
275 | ],
276 | "dtype": "float32"
277 | },
278 | {
279 | "name": "conv2d_Conv2D2/bias",
280 | "shape": [
281 | 32
282 | ],
283 | "dtype": "float32"
284 | },
285 | {
286 | "name": "conv2d_Conv2D3/kernel",
287 | "shape": [
288 | 3,
289 | 3,
290 | 32,
291 | 32
292 | ],
293 | "dtype": "float32"
294 | },
295 | {
296 | "name": "conv2d_Conv2D3/bias",
297 | "shape": [
298 | 32
299 | ],
300 | "dtype": "float32"
301 | },
302 | {
303 | "name": "dense_Dense1/kernel",
304 | "shape": [
305 | 288,
306 | 64
307 | ],
308 | "dtype": "float32"
309 | },
310 | {
311 | "name": "dense_Dense1/bias",
312 | "shape": [
313 | 64
314 | ],
315 | "dtype": "float32"
316 | },
317 | {
318 | "name": "dense_Dense2/kernel",
319 | "shape": [
320 | 64,
321 | 40
322 | ],
323 | "dtype": "float32"
324 | },
325 | {
326 | "name": "dense_Dense2/bias",
327 | "shape": [
328 | 40
329 | ],
330 | "dtype": "float32"
331 | }
332 | ]
333 | }
334 | ]
335 | }
--------------------------------------------------------------------------------
/src/data/model.weights.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/src/data/model.weights.bin
--------------------------------------------------------------------------------
/src/data/test.buffer:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/src/data/test.buffer
--------------------------------------------------------------------------------
/src/data/test.json:
--------------------------------------------------------------------------------
1 | {"indexs":[33,36,12,22,39,14,8,28,30,35,5,25,11,16,0,3,6,26,4,27,19,31,2,15,10,34,20,9,21,13,37,7,17,1,29,18,38,32,23,24,19,18,21,13,7,0,36,35,26,1,15,33,17,28,9,37,14,29,34,2,16,22,11,3,38,24,30,23,31,32,25,27,5,4,12,39,10,6,20,8,37,8,11,28,16,10,23,24,0,27,32,22,29,20,19,38,12,25,17,2,31,33,35,5,15,3,13,26,39,30,6,9,21,36,4,7,1,14,18,34,28,6,27,30,0,3,31,1,8,20,10,22,34,35,16,11,32,39,23,15,38,29,13,33,19,5,18,26,21,2,24,9,14,7,12,25,37,4,17,36,25,14,2,38,15,6,28,34,33,27,1,31,26,16,18,17,10,39,7,21,23,11,29,36,8,20,3,19,37,0,30,24,12,13,4,35,22,9,5,32,33,10,34,2,25,18,23,17,5,27,9,6,26,20,36,30,4,22,28,12,15,13,32,14,37,38,0,21,1,3,39,24,7,8,16,11,31,29,35,19,29,5,14,3,35,0,15,12,4,6,1,21,25,30,24,34,13,18,7,9,11,23,20,32,36,10,19,26,8,28,31,2,16,38,27,37,17,39,22,33,9,31,12,21,20,27,35,1,22,11,33,18,8,39,32,7,15,28,24,19,0,25,29,10,38,14,5,13,26,4,3,16,6,2,37,34,30,23,36,17,13,10,34,36,22,38,0,1,21,20,27,7,16,9,5,24,2,30,29,14,11,15,17,8,26,37,31,3,28,33,25,18,23,32,6,19,4,35,12,39,7,19,0,27,12,29,15,6,20,13,28,31,9,21,25,34,5,32,33,4,17,24,8,39,1,37,2,22,38,30,26,3,23,10,36,18,14,35,16,11,9,12,1,18,32,34,28,39,6,26,38,25,31,0,23,19,24,11,27,3,35,36,37,8,2,20,4,13,15,30,5,21,17,14,7,10,29,22,33,16,18,21,9,28,27,20,26,2,39,17,19,12,24,8,7,3,13,16,11,32,22,30,29,5,37,36,25,34,38,15,6,33,4,1,14,0,10,23,35,31,3,19,36,37,31,5,9,7,15,1,16,23,14,39,33,35,12,26,34,13,25,32,17,20,24,11,2,38,18,29,4,8,6,30,28,27,0,10,21,22,25,4,32,36,1,7,12,27,31,9,5,2,14,21,26,15,3,18,28,23,22,19,11,37,35,16,24,6,17,30,13,20,29,0,39,8,38,33,34,10,14,13,5,27,38,6,15,4,19,32,8,16,9,26,30,35,3,25,21,10,17,1,33,11,22,20,36,18,7,34,12,2,29,23,31,39,28,0,37,24,10,36,22,14,11,25,7,33,4,35,19,15,0,27,2,37,18,39,12,29,1,8,3,9,17,32,38,28,24,34,13,6,26,20,31,23,30,16,21,5,31,36,7,38,4,35,15,0,11,19,3,29,23,30,32,37,2,39,21,33,8,9,5,34,22,27,26,28,20,17,12,24,13,25,6,14,16,18,10,1,28,38,25,1,35,22,39,13,12,14,21,0,2,7,18,24,3,11,36,4,27,8,32,33,16,17,26,20,5,15,34,6,30,31,9,29,37,23,10,19,21,28,12,1,8,13,37,35,24,9,5,15,30,3,29,32,34,22,26,10,36,2,23,18,39,17,19,31,6,7,0,11,25,38,4,33,20,16,14,27,32,0,13,11,39,12,14,18,17,31,23,7,6,36,5,26,2,29,3,10,15,35,1,8,33,37,25,19,30,9,38,4,16,22,27,20,24,34,21,28,35,12,32,26,17,4,1,16,38,18,25,34,9,15,28,7,3,20,2,30,22,23,13,14,21,36,11,31,0,39,37,27,19,6,10,33,5,8,24,29,35,38,14,33,17,5,27,22,19,24,39,15,34,21,12,7,10,30,6,25,11,31,8,37,18,2,20,4,29,23,28,36,3,26,16,1,13,32,9,0,38,39,8,19,27,17,26,13,31,36,32,3,12,7,20,4,2,37,1,25,34,9,15,16,22,14,0,29,30,6,35,23,18,5,33,24,28,21,11,10,29,9,21,36,18,4,13,10,16,23,26,39,2,3,24,30,27,14,11,38,31,12,19,1,17,22,35,33,37,6,28,32,8,20,25,15,5,34,0,7,12,0,36,37,28,4,19,3,23,27,35,26,10,21,11,1,8,14,2,30,34,24,16,15,13,17,39,31,32,5,9,22,25,33,20,18,38,29,6,7,6,15,24,3,19,26,5,1,33,35,21,31,39,22,25,12,34,27,23,8,30,28,38,9,2,17,0,14,16,29,32,36,18,4,7,10,37,11,13,20,0,5,36,22,2,28,15,14,37,38,26,21,35,11,33,9,1,20,18,19,17,32,25,12,31,30,13,24,7,10,29,27,3,16,8,6,23,4,39,34,7,1,11,2,34,38,17,14,16,15,30,21,4,8,12,26,31,27,39,29,36,18,10,9,33,22,28,32,3,24,5,37,25,13,19,0,35,23,20,6,28,18,27,2,25,0,23,21,32,5,39,10,20,11,30,33,36,17,34,4,6,8,37,1,26,14,31,15,29,16,12,7,22,24,13,19,9,3,38,35,28,22,36,17,1,13,4,12,14,23,21,0,11,30,26,6,18,19,7,31,15,32,33,34,25,20,5,8,39,38,16,3,2,35,9,27,10,29,24,37,32,23,8,36,3,38,13,19,14,20,1,39,17,18,22,11,10,31,4,7,27,35,21,24,30,0,5,2,15,16,26,12,25,33,6,34,29,28,37,9,12,29,31,4,10,9,20,8,21,18,0,11,7,32,16,28,34,1,19,30,25,37,33,36,26,24,14,17,2,5,22,13,15,39,35,38,23,3,6,27,32,8,19,16,31,25,13,4,27,24,36,30,29,21,38,12,5,1,39,6,3,37,34,23,11,14,18,26,20,7,0,35,28,15,22,10,2,9,33,17,4,39,15,5,9,1,35,38,18,34,22,26,27,17,25,24,21,16,37,20,19,28,8,7,29,10,3,12,0,11,23,2,36,13,6,32,14,31,33,30,11,31,19,18,33,29,14,0,26,35,38,27,34,21,12,15,24,30,36,17,32,23,10,28,16,13,39,8,1,2,3,6,7,9,22,20,25,4,37,5,27,39,5,1,20,33,15,6,38,24,31,19,29,26,0,37,11,14,8,12,4,21,16,18,22,13,28,2,17,9,7,3,34,25,35,10,30,36,23,32,10,18,36,7,11,17,37,2,16,35,9,39,29,38,20,32,22,3,21,4,23,25,34,5,26,13,31,27,12,6,33,14,19,15,1,24,28,0,8,30,38,10,1,8,18,0,32,23,22,37,12,31,28,14,11,6,30,2,24,7,27,5,39,17,16,29,36,3,21,34,33,9,15,35,25,19,20,4,26,13,25,34,12,24,36,4,13,15,33,39,7,6,3,0,17,8,10,9,22,21,2,5,20,29,30,31,27,37,32,18,26,28,16,19,14,11,38,35,1,23,33,18,1,31,36,10,39,29,27,13,35,9,19,20,12,7,24,17,26,6,4,15,21,38,16,22,23,25,8,3,2,34,14,28,11,32,30,37,5,0],"count":1600,"width":28,"height":28,"buffer":"test.buffer"}
--------------------------------------------------------------------------------
/src/data/train.buffer:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kinglisky/zelda-words/2adc6e49eaf3446e19d6f5d5923155d323f77eaf/src/data/train.buffer
--------------------------------------------------------------------------------
/src/data/words.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "symbol": "a",
4 | "path": "sheikah-icon/a.svg"
5 | },
6 | {
7 | "symbol": "b",
8 | "path": "sheikah-icon/b.svg"
9 | },
10 | {
11 | "symbol": "c",
12 | "path": "sheikah-icon/c.svg"
13 | },
14 | {
15 | "symbol": "d",
16 | "path": "sheikah-icon/d.svg"
17 | },
18 | {
19 | "symbol": "e",
20 | "path": "sheikah-icon/e.svg"
21 | },
22 | {
23 | "symbol": "f",
24 | "path": "sheikah-icon/f.svg"
25 | },
26 | {
27 | "symbol": "g",
28 | "path": "sheikah-icon/g.svg"
29 | },
30 | {
31 | "symbol": "h",
32 | "path": "sheikah-icon/h.svg"
33 | },
34 | {
35 | "symbol": "i",
36 | "path": "sheikah-icon/i.svg"
37 | },
38 | {
39 | "symbol": "j",
40 | "path": "sheikah-icon/j.svg"
41 | },
42 | {
43 | "symbol": "k",
44 | "path": "sheikah-icon/k.svg"
45 | },
46 | {
47 | "symbol": "l",
48 | "path": "sheikah-icon/l.svg"
49 | },
50 | {
51 | "symbol": "m",
52 | "path": "sheikah-icon/m.svg"
53 | },
54 | {
55 | "symbol": "n",
56 | "path": "sheikah-icon/n.svg"
57 | },
58 | {
59 | "symbol": "o",
60 | "path": "sheikah-icon/o.svg"
61 | },
62 | {
63 | "symbol": "p",
64 | "path": "sheikah-icon/p.svg"
65 | },
66 | {
67 | "symbol": "q",
68 | "path": "sheikah-icon/q.svg"
69 | },
70 | {
71 | "symbol": "r",
72 | "path": "sheikah-icon/r.svg"
73 | },
74 | {
75 | "symbol": "s",
76 | "path": "sheikah-icon/s.svg"
77 | },
78 | {
79 | "symbol": "t",
80 | "path": "sheikah-icon/t.svg"
81 | },
82 | {
83 | "symbol": "u",
84 | "path": "sheikah-icon/u.svg"
85 | },
86 | {
87 | "symbol": "v",
88 | "path": "sheikah-icon/v.svg"
89 | },
90 | {
91 | "symbol": "w",
92 | "path": "sheikah-icon/w.svg"
93 | },
94 | {
95 | "symbol": "x",
96 | "path": "sheikah-icon/x.svg"
97 | },
98 | {
99 | "symbol": "y",
100 | "path": "sheikah-icon/y.svg"
101 | },
102 | {
103 | "symbol": "z",
104 | "path": "sheikah-icon/z.svg"
105 | },
106 | {
107 | "symbol": "0",
108 | "path": "sheikah-icon/0.svg"
109 | },
110 | {
111 | "symbol": "1",
112 | "path": "sheikah-icon/1.svg"
113 | },
114 | {
115 | "symbol": "2",
116 | "path": "sheikah-icon/2.svg"
117 | },
118 | {
119 | "symbol": "3",
120 | "path": "sheikah-icon/3.svg"
121 | },
122 | {
123 | "symbol": "4",
124 | "path": "sheikah-icon/4.svg"
125 | },
126 | {
127 | "symbol": "5",
128 | "path": "sheikah-icon/5.svg"
129 | },
130 | {
131 | "symbol": "6",
132 | "path": "sheikah-icon/6.svg"
133 | },
134 | {
135 | "symbol": "7",
136 | "path": "sheikah-icon/7.svg"
137 | },
138 | {
139 | "symbol": "8",
140 | "path": "sheikah-icon/8.svg"
141 | },
142 | {
143 | "symbol": "9",
144 | "path": "sheikah-icon/9.svg"
145 | },
146 | {
147 | "symbol": ".",
148 | "path": "sheikah-icon/period.svg"
149 | },
150 | {
151 | "symbol": "-",
152 | "path": "sheikah-icon/hyphen.svg"
153 | },
154 | {
155 | "symbol": "!",
156 | "path": "sheikah-icon/exclam.svg"
157 | },
158 | {
159 | "symbol": "?",
160 | "path": "sheikah-icon/question.svg"
161 | }
162 | ]
--------------------------------------------------------------------------------
/src/main.ts:
--------------------------------------------------------------------------------
1 | import { createApp } from 'vue';
2 | import App from './App.vue';
3 | import registScript from './components/WordIcon/regist-script';
4 |
5 | registScript('//at.alicdn.com/t/font_2375469_s4wmtifuqro.js');
6 |
7 | createApp(App).mount('#app');
8 |
--------------------------------------------------------------------------------
/src/shims-vue.d.ts:
--------------------------------------------------------------------------------
1 | declare module '*.vue' {
2 | import { DefineComponent } from 'vue'
3 | const component: DefineComponent<{}, {}, any>
4 | export default component
5 | }
6 |
--------------------------------------------------------------------------------
/src/utils/export-image.ts:
--------------------------------------------------------------------------------
1 | import domtoimage from 'dom-to-image';
2 |
3 | const IS_MOBILE = /Android|iPhone|webOS|BlackBerry|SymbianOS|Windows Phone|iPad|iPod/i.test(window.navigator.userAgent);
4 |
5 | // fix 节点中 svg 图标依赖
6 | function fixSvgIconNode(node: HTMLBaseElement): boolean {
7 | if (node instanceof SVGElement) {
8 | const useNodes = Array.from(node.querySelectorAll('use') || []);
9 | useNodes.forEach((use: SVGUseElement) => {
10 | const id = use.getAttribute('xlink:href');
11 | // 将 svg 图片中依赖的 节点塞到当前 svg 节点下
12 | if (id && !node.querySelector(id)) {
13 | const symbolNode = document.querySelector(id);
14 | if (symbolNode) {
15 | node.insertBefore(
16 | symbolNode.cloneNode(true),
17 | node.children[0]
18 | );
19 | }
20 | }
21 | });
22 | }
23 | return true;
24 | }
25 | interface ExportOptions {
26 | size: number,
27 | width: number,
28 | height: number,
29 | message: string,
30 | vertical: boolean,
31 | fontColor: string,
32 | backgroundColor: string,
33 | }
34 |
35 | export default async function exportImage(node: HTMLBaseElement | null, options: ExportOptions): Promise {
36 | if (!node) {
37 | return Promise.resolve('');
38 | }
39 |
40 | const dataUrl = await domtoimage.toPng(node, {
41 | filter: (n: any) => fixSvgIconNode(n),
42 | quality: 1
43 | });
44 |
45 | if (!IS_MOBILE) {
46 | const link = document.createElement('a');
47 | link.download = `zelda-words-${Date.now()}.png`;
48 | link.href = dataUrl;
49 | link.click();
50 | return '';
51 | }
52 | return dataUrl;
53 | }
54 |
--------------------------------------------------------------------------------
/src/utils/image-info.ts:
--------------------------------------------------------------------------------
1 | const BIT_LENGTH = 24;
2 | const MAX_IOS_AREA_SIZE = 16777216 * 0.5;
3 | const MAX_CAVAS_AREA_SIZE = 16000 * 16000 * 0.5;
4 | const IS_IOS = !!window.navigator.userAgent.match(/\(i[^;]+;( U;)? CPU.+Mac OS X/);
5 |
6 | function paddingLfet(bits: string) {
7 | return ('00000000' + bits).slice(-8);
8 | }
9 |
10 | function getGcd(a: number, b: number): number {
11 | let max = Math.max(a, b);
12 | let min = Math.min(a, b);
13 | if (max % min === 0) {
14 | return min;
15 | } else {
16 | return getGcd(max % min, min);
17 | }
18 | }
19 |
20 | function getLcm(a: number, b: number) {
21 | return (a * b) / getGcd(a, b);
22 | }
23 |
24 | function colorOffset(hex: string, alpha: number, offset: number) {
25 | const result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
26 | if (result) {
27 | const c = [
28 | parseInt(result[1], 16),
29 | parseInt(result[2], 16),
30 | parseInt(result[3], 16),
31 | ].map(v => {
32 | const nv = v + offset;
33 | return nv % 255 !== nv ? v - offset : nv;
34 | }).join(',');
35 | return `rgba(${c},${alpha})`;
36 | }
37 | return '';
38 | }
39 |
40 | function loadImage(url: string): Promise {
41 | return new Promise((resolve, reject) => {
42 | const img = new Image();
43 | img.onload = () => resolve(img);
44 | img.onerror = (e) => reject(e);
45 | img.src = url;
46 | });
47 | };
48 |
49 | function createCavans(width: number, height: number) {
50 | const canvas = document.createElement('canvas');
51 | canvas.width = width;
52 | canvas.height = height;
53 | return canvas;
54 | }
55 |
56 | function putToCanvas(pixels: ImageData) {
57 | const canvas = document.createElement('canvas');
58 | canvas.width = pixels.width;
59 | canvas.height = pixels.height;
60 | const ctx = canvas.getContext('2d');
61 | ctx.putImageData(pixels, 0, 0);
62 | return canvas;
63 | }
64 |
65 | interface Options {
66 | size: number,
67 | width: number,
68 | height: number,
69 | message?: string,
70 | vertical: boolean,
71 | fontColor?: string,
72 | backgroundColor: string,
73 | }
74 |
75 | function createHeadMeta(options: Options) {
76 | console.log('createHeadMeta', options);
77 | const verticalBit = paddingLfet(Number(options.vertical).toString(2));
78 | const sizeBit = paddingLfet(options.size.toString(2));
79 | const sizeCount = Math.floor(options.width / options.size);
80 | if (sizeCount > 255) {
81 | throw new Error('图片尺寸过大!');
82 | }
83 | const widthBit = paddingLfet(sizeCount.toString(2));
84 | const bits = verticalBit + sizeBit + widthBit;
85 | // 取图片宽度与 24 的最小公倍数
86 | const canvasWidth = getLcm(options.width, BIT_LENGTH);
87 | const canvasHeight = Math.floor(canvasWidth / options.width * options.size);
88 | const canvas = createCavans(canvasWidth, canvasHeight);
89 | const ctx = canvas.getContext('2d');
90 | const chunkWidth = canvasWidth / BIT_LENGTH;
91 | const rgba = colorOffset(options.backgroundColor, 1, 2);
92 | ctx.fillStyle = rgba;
93 | for (let i = 0; i < BIT_LENGTH; i++) {
94 | if (Number(bits[i])) {
95 | ctx.fillRect(i * chunkWidth, 0, chunkWidth, canvasHeight);
96 | }
97 | }
98 | return canvas;
99 | }
100 |
101 | export function writeMetaInfo(pixels: Uint8ClampedArray, options: Options) {
102 | const baseCanvas = putToCanvas(new ImageData(pixels, options.width, options.height));
103 | const headCanvas = createHeadMeta(options);
104 | const baseCtx = baseCanvas.getContext('2d');
105 | const dh = Math.floor(baseCanvas.width / headCanvas.width * headCanvas.height);
106 | baseCtx.drawImage(headCanvas, 0, 0, headCanvas.width, headCanvas.height, 0, 0, baseCanvas.width, dh);
107 | return baseCanvas;
108 | }
109 |
110 | function readHeadInfo(ctx: CanvasRenderingContext2D, width: number, ratio: number) {
111 | const chunkSize = Math.floor(width / BIT_LENGTH);
112 | const chunkValue = [];
113 | for (let i = 0; i < BIT_LENGTH; i++) {
114 | const imageData = ctx.getImageData(i * chunkSize, 0, chunkSize, 4);
115 | const x = Math.floor(chunkSize / 2);
116 | const y = 2;
117 | const index = (y * chunkSize + x) * 4;
118 | chunkValue.push(imageData.data[index] + imageData.data[index + 1] + imageData.data[index + 2]);
119 | }
120 | const [head] = chunkValue;
121 | const bits = chunkValue.map(v => v === head ? 0 : 1).join('');
122 | const vertical = parseInt(bits.slice(0, 8), 2);
123 | const size = parseInt(bits.slice(8, 16), 2);
124 | const sizeCout = parseInt(bits.slice(16), 2);
125 | return {
126 | vertical: !!vertical,
127 | size: Math.round(size * ratio),
128 | wdith: Math.round(size * sizeCout * ratio),
129 | };
130 | }
131 |
132 | const toGray = (data: ImageData) => {
133 | const calculateGray = (r: number, g: number, b: number) =>
134 | Math.floor(r * 0.299 + g * 0.587 + b * 0.114);
135 | for (let x = 0; x < data.width; x++) {
136 | for (let y = 0; y < data.height; y++) {
137 | const idx = (x + y * data.width) * 4;
138 | const r = data.data[idx + 0];
139 | const g = data.data[idx + 1];
140 | const b = data.data[idx + 2];
141 | const gray = calculateGray(r, g, b);
142 | data.data[idx + 0] = gray;
143 | data.data[idx + 1] = gray;
144 | data.data[idx + 2] = gray;
145 | data.data[idx + 3] = 255;
146 | }
147 | }
148 | return data;
149 | };
150 |
151 | // 像素平均值图片阈值
152 | const average = (data: Uint8ClampedArray) => {
153 | let sum = 0;
154 | // 因为是灰度图片,取第一通道的值就好
155 | for (let i = 0; i < data.length - 1; i += 4) {
156 | sum += data[i];
157 | }
158 | return Math.round(sum / (data.length / 4));
159 | };
160 |
161 | // 大津法取图片阈值
162 | const otsu = (data: Uint8ClampedArray) => {
163 | let ptr = 0;
164 | let histData = Array(256).fill(0); // 记录0-256每个灰度值的数量,初始值为0
165 | let total = data.length;
166 |
167 | while (ptr < total) {
168 | let h = data[ptr++];
169 | histData[h]++;
170 | }
171 |
172 | let sum = 0; // 总数(灰度值x数量)
173 | for (let i = 0; i < 256; i++) {
174 | sum += i * histData[i];
175 | }
176 |
177 | let wB = 0; // 背景(小于阈值)的数量
178 | let wF = 0; // 前景(大于阈值)的数量
179 | let sumB = 0; // 背景图像(灰度x数量)总和
180 | let varMax = 0; // 存储最大类间方差值
181 | let threshold = 0; // 阈值
182 |
183 | for (let t = 0; t < 256; t++) {
184 | wB += histData[t]; // 背景(小于阈值)的数量累加
185 | if (wB === 0) continue;
186 | wF = total - wB; // 前景(大于阈值)的数量累加
187 | if (wF === 0) break;
188 |
189 | sumB += t * histData[t]; // 背景(灰度x数量)累加
190 |
191 | let mB = sumB / wB; // 背景(小于阈值)的平均灰度
192 | let mF = (sum - sumB) / wF; // 前景(大于阈值)的平均灰度
193 |
194 | let varBetween = wB * wF * (mB - mF) ** 2; // 类间方差
195 |
196 | if (varBetween > varMax) {
197 | varMax = varBetween;
198 | threshold = t;
199 | }
200 | }
201 |
202 | return threshold;
203 | };
204 |
205 | // 统一转成黑底白色的图片
206 | function unitizeImageData(imageData: ImageData) {
207 | const grayImageData = toGray(imageData)
208 | const { width, height, data } = grayImageData;
209 | const threshold = otsu(data);
210 | const colors = data[0] > threshold ? [0, 255] : [255, 0];
211 | for (let i = 0; i < width; i++) {
212 | for (let j = 0; j < height; j++) {
213 | const index = (j * width + i) * 4;
214 | const v = data[index] > threshold ? colors[0] : colors[1]
215 | data[index] = v;
216 | data[index + 1] = v;
217 | data[index + 2] = v;
218 | data[index + 3] = 255;
219 | }
220 | }
221 | return imageData;
222 | }
223 |
224 | function binaryzationOutput(imageData: ImageData) {
225 | const grayImageData = toGray(imageData);
226 | const { width, height, data } = grayImageData;
227 | const threshold = average(data);
228 | const value = data[0] > threshold ? [0, 1] : [1, 0];
229 | const hash = new Uint8Array(width * height);
230 | for (let i = 0; i < width; i++) {
231 | for (let j = 0; j < height; j++) {
232 | const index = (j * width + i);
233 | const v = data[index * 4] > threshold ? value[0] : value[1];
234 | hash.set([v], index);
235 | }
236 | }
237 | return hash;
238 | }
239 | interface SplitOptions {
240 | width: number,
241 | height: number,
242 | size: number,
243 | fingerprint: number,
244 | }
245 |
246 | function resizeImageData(imageData: ImageData, inputSize: number, outputSize: number) {
247 | const inputCanvas = createCavans(inputSize, inputSize);
248 | const inputCtx = inputCanvas.getContext('2d');
249 | inputCtx.putImageData(imageData, 0, 0);
250 | const outputCavans = createCavans(outputSize, outputSize);
251 | const outputCtx = outputCavans.getContext('2d');
252 | outputCtx.drawImage(inputCanvas, 0, 0, inputSize, inputSize, 0, 0, outputSize, outputSize);
253 | const outputImageData = outputCtx.getImageData(0, 0, outputSize, outputSize);
254 | return outputImageData;
255 | }
256 |
257 | function splitGrid(ctx: CanvasRenderingContext2D, options: SplitOptions) {
258 | const {
259 | width,
260 | height,
261 | size,
262 | fingerprint,
263 | } = options;
264 |
265 | const w = Math.floor(width / size);
266 | const h = Math.floor(height / size);
267 | console.log({ w, h });
268 | const grids = Array.from({ length: w * h }).fill(null);
269 | // 开头 1 ~ w - 1 剔除掉边框空格
270 | for (let i = 1; i < w - 1; i++) {
271 | for (let j = 1; j < h - 1; j++) {
272 | const imageData = ctx.getImageData(i * size, j * size, size, size);
273 | const resizeData = resizeImageData(imageData, size, fingerprint);
274 | const outputHash = binaryzationOutput(resizeData);
275 | const index = j * w + i;
276 | grids[index] = outputHash;
277 | }
278 | }
279 | return {
280 | grids: grids.filter(it => !!it),
281 | row: h - 2,
282 | col: w - 2,
283 | };
284 | }
285 |
286 | async function getImageFingerprint(url: string) {
287 | const image = await loadImage(url);
288 | const { naturalWidth, naturalHeight } = image;
289 | let canvasWidth = getLcm(naturalWidth, 24);
290 | let ratio = canvasWidth / naturalWidth;
291 | let canvasHeight = Math.round(ratio * naturalHeight);
292 | const area = canvasWidth * canvasHeight;
293 | // fix ios canvas max area size
294 | if (IS_IOS && area > MAX_IOS_AREA_SIZE) {
295 | const resizeRatio = MAX_IOS_AREA_SIZE / area;
296 | canvasWidth = Math.floor(canvasWidth * resizeRatio);
297 | canvasHeight = Math.floor(canvasHeight * resizeRatio);
298 | ratio = canvasWidth / naturalWidth;
299 | }
300 |
301 | // 桌面端尺寸
302 | if (area > MAX_CAVAS_AREA_SIZE) {
303 | const resizeRatio = MAX_CAVAS_AREA_SIZE / area;
304 | canvasWidth = Math.floor(canvasWidth * resizeRatio);
305 | canvasHeight = Math.floor(canvasHeight * resizeRatio);
306 | ratio = canvasWidth / naturalWidth;
307 | }
308 |
309 | const canvas = createCavans(canvasWidth, canvasHeight);
310 | const ctx = canvas.getContext('2d');
311 | ctx.drawImage(image, 0, 0, naturalWidth, naturalHeight, 0, 0, canvasWidth, canvasHeight);
312 | const headMeta = readHeadInfo(ctx, canvasWidth, ratio);
313 | console.log('readHeadInfo', headMeta);
314 | const { grids, row, col } = splitGrid(ctx, {
315 | width: canvasWidth,
316 | height: canvasHeight,
317 | size: headMeta.size,
318 | fingerprint: 8,
319 | });
320 | return {
321 | hashList: grids,
322 | row,
323 | col,
324 | headMeta,
325 | };
326 | }
327 |
328 | const WORDS = 'abcdefghijklmnopqrstuvwxyz0123456789.-!?';
329 |
330 | function createSymbols(hashList: Array) {
331 | return hashList.map((hash, index) => {
332 | return {
333 | name: WORDS[index],
334 | value: hash,
335 | };
336 | });
337 | }
338 |
339 | const hammingDistance = (hash1: Uint8Array, hash2: Uint8Array) => {
340 | let count = 0;
341 | hash1.forEach((it, index) => {
342 | count += it ^ hash2[index];
343 | });
344 | return count;
345 | };
346 |
347 | function mapToSymbol(hashList: Array, symbols: Array) {
348 | return hashList.map(hash => {
349 | const isEmpty = hash.every((v: number) => v === hash[0]);
350 | if (isEmpty) {
351 | return ' ';
352 | }
353 | let diff = Number.MAX_SAFE_INTEGER;
354 | let target = ' ';
355 | symbols.forEach(symbol => {
356 | const distance = hammingDistance(hash, symbol.value);
357 | // 汉明距离大于标识相似度偏差较大排除
358 | if (distance < diff && distance < 5) {
359 | diff = distance;
360 | target = symbol.name;
361 | }
362 | });
363 | return target;
364 | });
365 | }
366 |
367 | function printfSymbol(words: Array, options: any) {
368 | const { row, col, vertical } = options;
369 | console.log(words);
370 | if (words.every(w => w === ' ')) {
371 | return '图片文字颜色相差较小无法检察出图片!';
372 | }
373 | let message = '';
374 | if (vertical) {
375 | for (let i = 0; i < col; i++) {
376 | for (let j = 0; j < row; j++) {
377 | const index = j * col + i;
378 | message += words[index];
379 | }
380 | message += '\n';
381 | }
382 | } else {
383 | for (let i = 0; i < row; i++) {
384 | for (let j = 0; j < col; j++) {
385 | const index = i * col + j;
386 | message += words[index];
387 | }
388 | message += '\n';
389 | }
390 | }
391 | return message;
392 | }
393 |
394 | export async function readMetaInfo(imageUrl: string, mapUrl: string) {
395 | const mapFingerprint = await getImageFingerprint(mapUrl);
396 | const symbols = createSymbols(mapFingerprint.hashList);
397 | const imageFingerprint = await getImageFingerprint(imageUrl);
398 | const words = mapToSymbol(imageFingerprint.hashList, symbols);
399 | return printfSymbol(words, {
400 | row: imageFingerprint.row,
401 | col: imageFingerprint.col,
402 | vertical: imageFingerprint.headMeta.vertical,
403 | });
404 | }
--------------------------------------------------------------------------------
/src/utils/image-ocr.ts:
--------------------------------------------------------------------------------
1 | import * as tf from '@tensorflow/tfjs';
2 | import words from '../data/words.json';
3 |
4 | function toGray(data: ImageData) {
5 | const calculateGray = (r: number, g: number, b: number) =>
6 | Math.floor(r * 0.299 + g * 0.587 + b * 0.114);
7 | const res = [];
8 | for (let x = 0; x < data.width; x++) {
9 | for (let y = 0; y < data.height; y++) {
10 | const idx = (x + y * data.width) * 4;
11 | const r = data.data[idx + 0];
12 | const g = data.data[idx + 1];
13 | const b = data.data[idx + 2];
14 | const gray = calculateGray(r, g, b);
15 | res.push(gray);
16 | }
17 | }
18 | return res;
19 | }
20 |
21 | function otsu(imgData: ImageData) {
22 | const grayData = toGray(imgData);
23 | let ptr = 0;
24 | let histData = Array(256).fill(0);
25 | let total = grayData.length;
26 |
27 | while (ptr < total) {
28 | let h = 0xff & grayData[ptr++];
29 | histData[h]++;
30 | }
31 |
32 | let sum = 0;
33 | for (let i = 0; i < 256; i++) {
34 | sum += i * histData[i];
35 | }
36 |
37 | let wB = 0;
38 | let wF = 0;
39 | let sumB = 0;
40 | let varMax = 0;
41 | let threshold = 0;
42 |
43 | for (let t = 0; t < 256; t++) {
44 | wB += histData[t];
45 | if (wB === 0) continue;
46 | wF = total - wB;
47 | if (wF === 0) break;
48 |
49 | sumB += t * histData[t];
50 |
51 | let mB = sumB / wB;
52 | let mF = (sum - sumB) / wF;
53 |
54 | let varBetween = wB * wF * (mB - mF) ** 2;
55 |
56 | if (varBetween > varMax) {
57 | varMax = varBetween;
58 | threshold = t;
59 | }
60 | }
61 |
62 | return threshold;
63 | }
64 |
65 | // 统一转成黑底白色的图片
66 | function unitizeImageData(imageData: ImageData) {
67 | const { width, height, data } = imageData;
68 | const threshold = otsu(imageData);
69 | const head = (data[0] + data[1] + data[2]) / 3 | 0;
70 | const colors = head > threshold ? [0, 255] : [255, 0];
71 | const output = new ImageData(width, height);
72 | for (let i = 0; i < width; i++) {
73 | for (let j = 0; j < height; j++) {
74 | const index = (j * width + i) * 4;
75 | const avg = (data[index] + data[index + 1] + data[index + 2]) / 3 | 0;
76 | const v = avg > threshold ? colors[0] : colors[1];
77 | output.data[index] = v;
78 | output.data[index + 1] = v;
79 | output.data[index + 2] = v;
80 | output.data[index + 3] = 255;
81 | }
82 | }
83 | return output;
84 | }
85 |
86 | function loadImage(url: string): Promise {
87 | return new Promise((resolve, reject) => {
88 | const img = new Image();
89 | img.crossOrigin = 'anonymous';
90 | img.onload = () => resolve(img);
91 | img.onerror = (e) => reject(e);
92 | img.src = url;
93 | });
94 | }
95 |
96 | function createCavans(width: number, height: number) {
97 | const canvas = document.createElement('canvas');
98 | canvas.width = width;
99 | canvas.height = height;
100 | return canvas;
101 | }
102 |
103 | function countPixel(imageData: ImageData, isRow: boolean = false) {
104 | const { width, height, data } = imageData;
105 | const offsets = [0, 1, 2];
106 | const head = offsets.map((i) => data[i]);
107 | const pixel = [];
108 | if (isRow) {
109 | for (let i = 0; i < height; i++) {
110 | let count = 0;
111 | for (let j = 0; j < width; j++) {
112 | const index = (i * width + j) * 4;
113 | const isEqual = offsets.every(
114 | (offset) => head[offset] === data[index + offset]
115 | );
116 | count += isEqual ? 0 : 1;
117 | }
118 | pixel.push(count);
119 | }
120 | } else {
121 | for (let i = 0; i < width; i++) {
122 | let count = 0;
123 | for (let j = 0; j < height; j++) {
124 | const index = (j * width + i) * 4;
125 | const isEqual = offsets.every(
126 | (offset) => head[offset] === data[index + offset]
127 | );
128 | count += isEqual ? 0 : 1;
129 | }
130 | pixel.push(count);
131 | }
132 | }
133 | return pixel;
134 | }
135 |
136 | type Rang = {
137 | foreground?: boolean;
138 | background?: boolean;
139 | value: number;
140 | };
141 |
142 | function countRanges(counts: Array): Array {
143 | const groups = [];
144 | let foreground = 0;
145 | let background = 0;
146 | counts.forEach((count) => {
147 | if (count) {
148 | foreground += 1;
149 | if (background) {
150 | groups.push({ background: true, value: background });
151 | background = 0;
152 | }
153 | } else {
154 | background += 1;
155 | if (foreground) {
156 | groups.push({ foreground: true, value: foreground });
157 | foreground = 0;
158 | }
159 | }
160 | });
161 | if (foreground) {
162 | groups.push({ foreground: true, value: foreground });
163 | }
164 | if (background) {
165 | groups.push({ background: true, value: background });
166 | }
167 | return groups;
168 | }
169 |
170 | function getMaxRange(data: Array) {
171 | return data.reduce((max, it) => {
172 | if (it.foreground) {
173 | return Math.max(max, it.value);
174 | }
175 | return max;
176 | }, 0);
177 | }
178 |
179 | function mergeRanges(data: Array, size: number): Array {
180 | const merge: any[] = [];
181 | let chunks: any[] = [];
182 | data.forEach((item) => {
183 | if (chunks.length) {
184 | chunks.push(item);
185 | const value = chunks.reduce((sum, chunk) => sum + chunk.value, 0);
186 | if (value >= size || Math.pow(value - size, 2) < 4) {
187 | merge.push({
188 | foreground: true,
189 | value,
190 | });
191 | chunks = [];
192 | }
193 | return;
194 | }
195 | if (item.foreground && item.value < size) {
196 | chunks = [item];
197 | return;
198 | }
199 | merge.push(item);
200 | });
201 | return merge;
202 | }
203 |
204 | function createChunks(data: Array): Array {
205 | const chunks: any[] = [];
206 | let offset = 0;
207 | data.forEach((item) => {
208 | if (item.foreground) {
209 | chunks.push({
210 | offset,
211 | size: item.value,
212 | });
213 | }
214 | offset += item.value;
215 | });
216 | return chunks;
217 | }
218 |
219 | type Chunk = {
220 | x: number;
221 | y: number;
222 | width: number;
223 | height: number;
224 | canvas: HTMLCanvasElement;
225 | data?: Float32Array,
226 | };
227 |
228 | function splitImage(image: HTMLImageElement, log: boolean): Array {
229 | const { naturalWidth: width, naturalHeight: height } = image;
230 | const canvas = createCavans(width, height);
231 | const ctx = canvas.getContext('2d');
232 | ctx.drawImage(image, 0, 0);
233 | const imageData = unitizeImageData(ctx.getImageData(0, 0, width, height));
234 | const unitizeCanvas = createCavans(width, height);
235 | const unitizeCtx = unitizeCanvas.getContext('2d');
236 | unitizeCtx.putImageData(imageData, 0, 0);
237 |
238 | const rowsPixels = countPixel(imageData, true);
239 | const colsPixels = countPixel(imageData, false);
240 |
241 | if (log) {
242 | console.log('rowsPixels:', JSON.stringify(rowsPixels));
243 | console.log('colsPixels:', JSON.stringify(colsPixels));
244 | }
245 |
246 | // 逐行扫描
247 | const rowsRanges = countRanges(rowsPixels);
248 | // 逐列扫描
249 | const colsRanges = countRanges(colsPixels);
250 |
251 | // 计算横纵像素分布得出字体内容的大小(字体正方形区域)
252 | const fontRange = Math.max(
253 | getMaxRange(rowsRanges),
254 | getMaxRange(colsRanges)
255 | );
256 |
257 | const rowsChunks = createChunks(mergeRanges(rowsRanges, fontRange));
258 | const res: any[] = [];
259 | rowsChunks.forEach((row) => {
260 | const rowImageData = unitizeCtx.getImageData(
261 | 0,
262 | row.offset,
263 | width,
264 | row.size
265 | );
266 | const rowRanges = countRanges(countPixel(rowImageData, false));
267 | const rowChunks = createChunks(mergeRanges(rowRanges, fontRange));
268 | rowChunks.forEach((item) => {
269 | const itemCanvas = createCavans(item.size, row.size);
270 | const itemCtx = (
271 | itemCanvas.getContext('2d')
272 | );
273 | const itemImageData = unitizeCtx.getImageData(
274 | item.offset,
275 | row.offset,
276 | item.size,
277 | row.size
278 | );
279 | itemCtx.putImageData(itemImageData, 0, 0);
280 | res.push({
281 | x: item.offset,
282 | y: row.offset,
283 | width: item.size,
284 | height: item.size,
285 | canvas: itemCanvas,
286 | });
287 | });
288 | });
289 | return res;
290 | }
291 |
292 | function binaryzationOutput(imageData: ImageData) {
293 | const { width, height, data } = imageData;
294 | const threshold = otsu(imageData);
295 | const head = (data[0] + data[1] + data[2]) / 3 | 0;
296 | const value = head > threshold ? [0, 1] : [1, 0];
297 | const hash = new Uint8Array(width * height);
298 | for (let i = 0; i < width; i++) {
299 | for (let j = 0; j < height; j++) {
300 | const index = j * width + i;
301 | const v = data[index * 4] > threshold ? value[0] : value[1];
302 | hash.set([v], index);
303 | }
304 | }
305 | return hash;
306 | }
307 |
308 | function resizeCanvas(inputCanvas: HTMLCanvasElement, size: number) {
309 | const outputCavans = createCavans(size, size);
310 | const outputCtx = outputCavans.getContext('2d');
311 | outputCtx.drawImage(
312 | inputCanvas,
313 | 0,
314 | 0,
315 | inputCanvas.width,
316 | inputCanvas.height,
317 | 0,
318 | 0,
319 | size,
320 | size
321 | );
322 | return outputCtx.getImageData(0, 0, size, size);
323 | }
324 |
325 | async function createImageFingerprints(image: HTMLImageElement, log: boolean) {
326 | const contents = splitImage(image, log);
327 | return contents.map(({ canvas, ...args }) => {
328 | const imageData = resizeCanvas(canvas, 16);
329 | const hash = binaryzationOutput(imageData);
330 | return {
331 | ...args,
332 | hash,
333 | };
334 | });
335 | }
336 |
337 | function createSymbols(fingerprints: Array) {
338 | const WORDS = 'abcdefghijklmnopqrstuvwxyz0123456789.-!?';
339 | return fingerprints.map((it, index) => {
340 | return {
341 | name: WORDS[index],
342 | value: it.hash,
343 | };
344 | });
345 | }
346 |
347 | function hammingDistance(hash1: Uint8Array, hash2: Uint8Array) {
348 | let count = 0;
349 | hash1.forEach((it, index) => {
350 | count += it ^ hash2[index];
351 | });
352 | return count;
353 | }
354 |
355 | function mapSymbols(fingerprints: Array, symbols: Array) {
356 | return fingerprints.map(({ hash, ...position }) => {
357 | const isEmpty = hash.every((v: number) => v === hash[0]);
358 | if (isEmpty) {
359 | return ' ';
360 | }
361 | let diff = Number.MAX_SAFE_INTEGER;
362 | let word = '*';
363 | symbols.forEach((symbol) => {
364 | const distance = hammingDistance(hash, symbol.value);
365 | // 汉明距离大于标识相似度偏差较大排除
366 | if (distance < diff) {
367 | diff = distance;
368 | word = symbol.name;
369 | }
370 | });
371 | return {
372 | ...position,
373 | word,
374 | diff,
375 | };
376 | });
377 | }
378 |
379 | function printfSymbols(
380 | results: Array,
381 | width: number,
382 | height: number
383 | ): string {
384 | const canvas = createCavans(width, height);
385 | const ctx = canvas.getContext('2d');
386 | const head = results[0];
387 | ctx.fillStyle = '#000';
388 | ctx.strokeStyle = '#000';
389 | ctx.font = `${Math.floor(head.width)}px -apple-system, Arial, sans-serif`;
390 | ctx.textAlign = 'center';
391 | ctx.textBaseline = 'middle';
392 | results.forEach((item) => {
393 | ctx.fillText(
394 | item.word,
395 | item.x + Math.round(item.width / 2),
396 | item.y + Math.round(item.height / 2),
397 | item.width
398 | );
399 | });
400 | return canvas.toDataURL();
401 | }
402 |
403 | export async function readMetaInfo(imageUrl: string, mapUrl: string) {
404 | const mapImage = await loadImage(mapUrl);
405 | const mapImageFingerprints = await createImageFingerprints(mapImage, false);
406 | const symbols = createSymbols(mapImageFingerprints);
407 | const readImage = await loadImage(imageUrl);
408 | const readImageFingerprints = await createImageFingerprints(
409 | readImage,
410 | true,
411 | );
412 | const results = mapSymbols(readImageFingerprints, symbols);
413 | if (results.length) {
414 | return printfSymbols(
415 | results,
416 | readImage.naturalWidth,
417 | readImage.naturalHeight
418 | );
419 | }
420 | window.alert('无法解析');
421 | throw new Error('PARSE ERROR');
422 | }
423 |
424 |
425 | function convertToPredictData(images: Chunk[], imageSize: number) {
426 | images.forEach(it => {
427 | const imageData = resizeCanvas(it.canvas, imageSize);
428 | const pixs = new Float32Array(imageData.data.length / 4);
429 | let index = 0;
430 | // rgb 转灰度
431 | for (let i = 0; i < imageData.data.length; i += 4) {
432 | const r = imageData.data[i];
433 | const g = imageData.data[i + 1];
434 | const b = imageData.data[i + 2];
435 | pixs[index] = (r * 0.299 + g * 0.587 + b * 0.114) / 255;
436 | index += 1;
437 | }
438 | it.data = pixs;
439 | });
440 | const shape: [number, number, number, number] = [images.length, imageSize, imageSize, 1];
441 | const shapeSize = tf.util.sizeFromShape(shape);
442 | const concatData = new Float32Array(shapeSize);
443 | images.forEach((image, index) => {
444 | concatData.set(image.data as Float32Array, index * imageSize * imageSize);
445 | });
446 | return tf.tensor4d(concatData, shape);
447 | }
448 |
449 | export async function readMetaInfoByCnn(imageUrl: string) {
450 | const modelURL = import.meta.env.DEV ? 'model.json' : '/zelda-words/model.json';
451 | const imageSize = 28;
452 | const readImage = await loadImage(imageUrl);
453 | // 将希卡文的图片拆分出来
454 | const images = splitImage(readImage, false);
455 | // 转换成模型需要的张量格式
456 | const predictData = convertToPredictData(images, imageSize);
457 | // 加载训练号的模型
458 | const model = await tf.loadLayersModel(modelURL);
459 | const output = model.predict(predictData) as tf.Tensor;
460 | const axis = 1;
461 | // 获取预测结果的索引
462 | const predictIndexs = Array.from(output.argMax(axis).dataSync());
463 | // 通过索引找到目标字符
464 | const results = predictIndexs.map((predictIndex, index) => {
465 | const target = words[predictIndex];
466 | return {
467 | ...images[index],
468 | word: target.symbol,
469 | };
470 |
471 | });
472 | console.log('results', results);
473 | if (results.length) {
474 | return printfSymbols(
475 | results,
476 | readImage.naturalWidth,
477 | readImage.naturalHeight
478 | );
479 | }
480 | window.alert('无法解析');
481 | throw new Error('PARSE ERROR');
482 | }
483 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "esnext",
4 | "module": "esnext",
5 | "moduleResolution": "node",
6 | "strict": true,
7 | "jsx": "preserve",
8 | "sourceMap": true,
9 | "resolveJsonModule": true,
10 | "esModuleInterop": true,
11 | "lib": ["esnext", "dom"],
12 | "types": ["vite/client"]
13 | },
14 | "include": ["src/**/*.ts", "src/**/*.d.ts", "src/**/*.tsx", "src/**/*.vue", "src/components/WordIcon/icon-map.ts", "src/cnn/index.js"]
15 | }
16 |
--------------------------------------------------------------------------------
/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vite';
2 | import vue from '@vitejs/plugin-vue';
3 | import * as path from 'path';
4 | // https://vitejs.dev/config/
5 | export default defineConfig({
6 | base: '/zelda-words/',
7 |
8 | assetsInclude: 'buffer',
9 |
10 | build: {
11 | outDir: 'docs',
12 | rollupOptions: {
13 | input: {
14 | index: path.resolve(__dirname, 'index.html'),
15 | cnn: path.resolve(__dirname, 'cnn.html')
16 | }
17 | }
18 | },
19 | plugins: [vue()],
20 | });
21 |
--------------------------------------------------------------------------------