├── LICENSE
├── README.md
├── assets
├── cat_beard.png
├── face_pattern.jpg
├── rotate.jpg
├── sample.jpg
├── sample.mp4
└── sunglass.glb
├── index.html
├── package_image_tracker
├── camera
│ ├── camera.css
│ ├── camera.html
│ └── camera.js
├── photo
│ ├── photo.css
│ ├── photo.html
│ └── photo.js
└── utils
│ ├── ImageTracker.js
│ ├── imageBusiness.js
│ └── modelBusiness.js
├── package_opencv_tracker
├── assets
│ ├── opencv3.4.16.wasm
│ └── opencv_exec.js
├── camera
│ ├── camera.css
│ ├── camera.html
│ └── camera.js
├── photo
│ ├── photo.css
│ ├── photo.html
│ └── photo.js
└── utils
│ ├── imageTracker.js
│ └── modelBusiness.js
├── package_video_tracker
├── camera
│ ├── camera.css
│ ├── camera.html
│ └── camera.js
├── photo
│ ├── photo.css
│ ├── photo.html
│ └── photo.js
└── utils
│ ├── ImageTracker.js
│ └── imageBusiness.js
├── screenshot
├── 1-1.jpg
├── 1-2.jpg
├── 1-3.jpg
├── 4-1.jpg
├── 4-2.jpg
├── 5-1.jpg
├── 5-2.jpg
└── index.jpg
├── style
├── app.css
└── weui.css
└── third_party
├── GLTFLoader.js
├── jsfeat.js
├── three.js
├── tracking.js
└── vue.min.js
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2021, AR Fashion
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 1. [Chinese README](https://zhuanlan.zhihu.com/p/72617098)
2 |
3 | 2. [Chinese Source Code Analysis](https://zhuanlan.zhihu.com/p/74438078)
4 |
5 | ## Updated
6 |
7 | | Date | Update |
8 | | -- | -- |
9 | | 2021-12-11 | New: Added a Image Tracker using OpenCV WebAssembly. Please see the "Image AR using OpenCV". |
10 | | 2021-08-15 | New: Added a video mask mode for image AR. This is a CSS 3D transformation, which does not require three.js. |
11 | | 2021-07-21 | New: This project is the same as https://github.com/sanyuered/WeChat-MiniProgram-WebAR, but it is a pure front-end website. |
12 |
13 | ## Visit a live example
14 |
15 | https://sanyuered.github.io/WebAR/
16 |
17 | ## Introduction of Web AR
18 |
19 | This is a Web AR Demo.
20 |
21 | We can create AR effects by using "tracking.js" and "jsfeat" library.
22 |
23 | The "tracking.js" brings computer vision algorithms and techniques into browser environment. The "jsfeat" is also a JavaScript computer vision library. We can do real-time image and face detection.
24 |
25 | [tracking.js](https://trackingjs.com/) and [JSFeat](https://inspirit.github.io/jsfeat/)
26 |
27 | Index Page of the WeChat Mini-program
28 |
29 | 
30 |
31 | ## Image AR and 3D Mask
32 |
33 | Use the demo to scan a pattern image below.
34 |
35 | 
36 |
37 | A cat beard is on the pattern image.
38 |
39 | 
40 |
41 | A effect of translating and scaling.
42 |
43 | 
44 |
45 | A effect of rotating.
46 |
47 | 
48 |
49 | ## Image AR using OpenCV
50 |
51 | This is the same as above. Supports image rotation.
52 |
53 | The image is rotated by 30 degrees.
54 |
55 | 
56 |
57 | The image is rotated by 90 degrees.
58 |
59 | 
60 |
61 | ## Image AR and Video Mask
62 |
63 | Use the demo to scan a rotating image below.
64 |
65 | 
66 |
67 | A video player is on and aligned with the image.
68 |
69 | 
70 |
71 | ## How to replace the 2D mask "cat beard"
72 |
73 | You may replace the default url of a image for 2D mask.
74 |
75 | File: /package_image_tracker/photo/photo.js and
76 | /package_image_tracker/camera/camera.js
77 |
78 | ```javascript
79 | const modelUrl = '../../assets/cat_beard.png';
80 | ```
81 |
82 | ## How to replace the pattern image
83 |
84 | File: /package_face_tracker/utils/imageBusiness.js
85 |
86 | ```javascript
87 | const patternImageUrl = '../../assets/face_pattern.jpg';
88 | ```
89 |
90 | A pattern image is below.
91 |
92 | 
93 |
94 | ## How to put a image on an other position
95 |
96 | Select a track point on a pattern image, the point is used to set the "cat beard" image.
97 |
98 | File: /package_image_tracker/utils/modelBusiness.js
99 |
100 | ```javascript
101 | // a index of a track point on a pattern image
102 | const trackPoint = {
103 | x: 186, // the width of the pattern image is 375
104 | y: 140, // the height of the pattern image is 375
105 | };
106 | ```
107 |
108 |
--------------------------------------------------------------------------------
/assets/cat_beard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/assets/cat_beard.png
--------------------------------------------------------------------------------
/assets/face_pattern.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/assets/face_pattern.jpg
--------------------------------------------------------------------------------
/assets/rotate.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/assets/rotate.jpg
--------------------------------------------------------------------------------
/assets/sample.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/assets/sample.jpg
--------------------------------------------------------------------------------
/assets/sample.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/assets/sample.mp4
--------------------------------------------------------------------------------
/assets/sunglass.glb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/assets/sunglass.glb
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
Image AR using OpenCV
18 |
19 |
20 |
This is a 2D Feature Tracking demo..
21 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
Image AR and 3D Mask
41 |
42 |
43 |
This is a NTF(Natural Feature Tracking) demo.
44 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
Image AR and Video Mask
64 |
65 |
66 |
This is a NTF(Natural Feature Tracking) demo.
67 |
79 |
80 |
81 |
82 |
83 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
--------------------------------------------------------------------------------
/package_image_tracker/camera/camera.css:
--------------------------------------------------------------------------------
1 | .page__bd_spacing{
2 | padding-top:0;
3 | padding-left: 0;
4 | padding-right: 0;
5 | }
6 |
7 | .page__bd{
8 | text-align: center;
9 | }
10 |
11 | .videoPlayer{
12 | width: 375px;
13 | height: 450px;
14 | margin: auto;
15 | }
16 |
17 | .camera{
18 | position: absolute;
19 | }
20 |
21 | .canvas2d {
22 | position:fixed;
23 | left:1000px;
24 | }
25 |
26 | .hiddenCanvas{
27 | position:fixed;
28 | left:1000px;
29 | }
30 |
31 |
--------------------------------------------------------------------------------
/package_image_tracker/camera/camera.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | Image Detecting and 3D Mask
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
Detect
28 |
29 |
Pattern Image:
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
Loading...
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
--------------------------------------------------------------------------------
/package_image_tracker/camera/camera.js:
--------------------------------------------------------------------------------
1 | import * as image from '../utils/imageBusiness.js';
2 | import * as model from '../utils/modelBusiness.js';
3 | const canvasId = 'canvas2d';
4 | const canvasWebGLId = 'canvasWebGL';
5 | // a url of a image
6 | const modelUrl = '../../assets/cat_beard.png';
7 | // it should be more than detect time
8 | const frameTimeout = 250;
9 | const canvasWidth = 375;
10 | const canvasHeight = 375;
11 |
12 | var app = new Vue({
13 | el: '#app',
14 | data: {
15 | isShowLoadingToast: false,
16 | isButtonDisabled: false,
17 | notice: '',
18 | },
19 | methods: {
20 | async processVideo(frame) {
21 | var _that = this;
22 | const ctx = document.getElementById(canvasId).getContext('2d');
23 | // draw a video frame on a 2d canvas
24 | ctx.drawImage(frame, 0, 0, canvasWidth, canvasHeight);
25 | // get a video frame from a 2d canvas
26 | var res = ctx.getImageData(0, 0, canvasWidth, canvasHeight)
27 |
28 | // process start
29 | image.detect(res.data,
30 | canvasWidth,
31 | canvasHeight,
32 | function (event) {
33 | var result = event.data;
34 |
35 | if (result && result.prediction) {
36 | // set the rotation and position of the 3d model.
37 | model.setModel(result.prediction,
38 | canvasWidth,
39 | canvasHeight);
40 | _that.notice = "detect: " + result.prediction.goodMatch + " points, " + result.end + ' ms.';
41 | } else {
42 | // set the default position
43 | model.setModelOnDefaultposition();
44 | var message = 'No results.';
45 | _that.notice = message;
46 | console.log('detect:', message);
47 | }
48 | });
49 | // process end
50 | },
51 | async takePhoto() {
52 | if (!navigator.mediaDevices) {
53 | var msg = 'No navigator.mediaDevices, needs a https site.';
54 | this.notice = msg;
55 | console.log('takePhoto', msg);
56 | return
57 | }
58 |
59 | if (this.isButtonDisabled) {
60 | return
61 | }
62 |
63 | const stream = await navigator.mediaDevices.getUserMedia({
64 | video: {
65 | width: canvasWidth,
66 | height: canvasHeight,
67 | facingMode: 'environment',
68 | }
69 | });
70 | var inputData = document.getElementById("inputData");
71 | inputData.srcObject = stream;
72 | await this.onVideoPlay();
73 |
74 | },
75 | async onVideoPlay() {
76 | var inputData = document.getElementById("inputData");
77 | // check the state of the video player
78 | if (!inputData.paused && !inputData.ended) {
79 | await this.processVideo(inputData);
80 | }
81 |
82 | setTimeout(this.onVideoPlay, frameTimeout);
83 | },
84 | load() {
85 | this.isButtonDisabled = true;
86 |
87 | // load 3d model
88 | model.initThree(canvasWebGLId,
89 | modelUrl,
90 | canvasWidth,
91 | canvasHeight);
92 | image.initTracker();
93 |
94 | this.isButtonDisabled = false;
95 | },
96 | },
97 | mounted: function () {
98 | this.load();
99 | },
100 | })
101 |
102 |
--------------------------------------------------------------------------------
/package_image_tracker/photo/photo.css:
--------------------------------------------------------------------------------
1 |
2 | .page__bd_spacing{
3 | padding-top:0;
4 | padding-left: 0;
5 | padding-right: 0;
6 | }
7 |
8 | .page__bd{
9 | text-align: center;
10 | }
11 |
12 | .imagePlayer{
13 | width: 375px;
14 | height: 450px;
15 | margin: auto;
16 | }
17 |
18 | .camera{
19 | width: 375px;
20 | position: absolute;
21 | }
22 |
23 | .canvasWebGL{
24 | position: absolute;
25 | }
26 |
27 | .canvas2d {
28 | position:fixed;
29 | left:1000px;
30 | }
31 |
32 | .hiddenCanvas{
33 | position:fixed;
34 | left:1000px;
35 | }
36 |
--------------------------------------------------------------------------------
/package_image_tracker/photo/photo.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | Image Detecting and 3D Mask
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
A input image:
28 |
29 |
Detect
32 |
33 |
Pattern Image:
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
Loading...
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
--------------------------------------------------------------------------------
/package_image_tracker/photo/photo.js:
--------------------------------------------------------------------------------
1 | import * as image from '../utils/imageBusiness.js';
2 | import * as model from '../utils/modelBusiness.js';
3 | const canvasId = 'canvas2d';
4 | const canvasWebGLId = 'canvasWebGL';
5 | const maxCanvasWidth = 375;
6 | // a url of a image
7 | const modelUrl = '../../assets/cat_beard.png';
8 |
9 | var app = new Vue({
10 | el: '#app',
11 | data: {
12 | isShowLoadingToast: false,
13 | isButtonDisabled: false,
14 | notice: '',
15 | },
16 | methods: {
17 | processPhoto(photo, imageWidth, imageHeight) {
18 | var _that = this;
19 | const ctx = document.getElementById(canvasId).getContext('2d');
20 | var canvasWidth = imageWidth;
21 | if (canvasWidth > maxCanvasWidth) {
22 | canvasWidth = maxCanvasWidth;
23 | }
24 | // canvas Height
25 | var canvasHeight = Math.floor(canvasWidth * (imageHeight / imageWidth));
26 |
27 | // draw image on canvas
28 | ctx.drawImage(photo,
29 | 0, 0, canvasWidth, canvasHeight);
30 | var res = ctx.getImageData(0, 0, canvasWidth, canvasHeight)
31 |
32 | // process start
33 | image.detect(res.data,
34 | canvasWidth,
35 | canvasHeight,
36 | function (event) {
37 | var result = event.data;
38 |
39 | if (result && result.prediction) {
40 | // set the rotation and position of the 3d model.
41 | model.setModel(result.prediction,
42 | canvasWidth,
43 | canvasHeight);
44 |
45 | _that.notice = "detect: " + result.prediction.goodMatch + " points, " + result.end + ' ms.';
46 |
47 | } else {
48 | // set the default position
49 | model.setModelOnDefaultposition();
50 | var message = 'No results.';
51 | _that.notice = message;
52 | console.log('detect:', message);
53 | }
54 | });
55 | // process end
56 | },
57 | takePhoto() {
58 | if (this.isButtonDisabled) {
59 | return
60 | }
61 |
62 | const inputData = document.getElementById('inputData');
63 | this.processPhoto(inputData,
64 | inputData.width,
65 | inputData.height);
66 | },
67 | load() {
68 | this.isButtonDisabled = true;
69 | const inputData = document.getElementById("inputData");
70 | // load 3d model
71 | model.initThree(canvasWebGLId,
72 | modelUrl,
73 | inputData.width,
74 | inputData.height);
75 | image.initTracker();
76 |
77 | this.isButtonDisabled = false;
78 | },
79 | },
80 | mounted: function () {
81 | this.load();
82 | },
83 | })
84 |
85 | // change input image
86 | document.getElementById("uploaderInput").addEventListener("change", function (e) {
87 | var files = e.target.files;
88 | if (files.length == 0) {
89 | return
90 | }
91 | var url = window.URL || window.webkitURL;
92 | var src;
93 | if (url) {
94 | src = url.createObjectURL(files[0]);
95 | }
96 | var inputData = document.getElementById("inputData");
97 | inputData.src = src;
98 | });
99 |
--------------------------------------------------------------------------------
/package_image_tracker/utils/ImageTracker.js:
--------------------------------------------------------------------------------
1 | const fastThreshold = 20;
2 | const blurRadius = 2;
3 | const descriptorLength = 128;
4 | const minMatchNumber = 15;
5 |
6 | // custom tracker
7 | var ImageTracker = function (patternImageArray) {
8 | ImageTracker.base(this, 'constructor');
9 | if (patternImageArray) {
10 | this.setPattern(patternImageArray);
11 | }
12 | }
13 |
14 | tracking.inherits(ImageTracker, tracking.Tracker);
15 | ImageTracker.prototype.track = function (pixels, width, height) {
16 | var _that = this;
17 | var patterns = _that.getPattern();
18 |
19 | if (!patterns) {
20 | console.log('Pattern not specified.');
21 | return;
22 | }
23 |
24 | // start
25 | var start = new Date();
26 | var results = _that.trackImage_(patterns, pixels, width, height);
27 | // end
28 | var end = new Date() - start;
29 | console.log('detect', end, 'ms');
30 | results.end = end;
31 |
32 | // optional
33 | this.emit('track', {
34 | data: results,
35 | });
36 |
37 | return {
38 | data: results,
39 | };
40 | }
41 | ImageTracker.prototype.setPattern = function (patternImageArray) {
42 | this.pattern = patternImageArray;
43 | }
44 | ImageTracker.prototype.getPattern = function () {
45 | return this.pattern;
46 | };
47 | ImageTracker.prototype.calcTransform = function (matches,
48 | patternWidth,
49 | patternHeight,
50 | originPatternWidth,
51 | originPatternHeight) {
52 | var ransac = jsfeat.motion_estimator.ransac;
53 | var homography2d_kernel = new jsfeat.motion_model.homography2d();
54 | var transform = new jsfeat.matrix_t(3, 3, jsfeat.F32_t | jsfeat.C1_t);
55 | var from = [];
56 | var to = [];
57 | var newFrom = [];
58 | var newTo = [];
59 | var count = matches.length;
60 |
61 | // Both originPatternWidth and originPatternHeight are the origin size of the pattern image.
62 | var widthRatio = originPatternWidth / patternWidth;
63 | var heightRatio = originPatternHeight / patternHeight;
64 |
65 | for (var i = 0; i < count; i++) {
66 | var match = matches[i];
67 | from[i] = {
68 | x: match.keypoint1[0] * widthRatio,
69 | y: match.keypoint1[1] * heightRatio,
70 | };
71 | to[i] = {
72 | x: match.keypoint2[0],
73 | y: match.keypoint2[1]
74 | };
75 | }
76 |
77 | var mask = new jsfeat.matrix_t(count, 1, jsfeat.U8_t | jsfeat.C1_t);
78 | // minimum points to estimate motion
79 | var model_size = 4;
80 | // max error to classify as inlier
81 | var thresh = 3;
82 | // max outliers ratio
83 | var eps = 0.5;
84 | // probability of success
85 | var prob = 0.99;
86 | var params = new jsfeat.ransac_params_t(model_size, thresh, eps, prob);
87 | var max_iters = 1000;
88 | var goodMatch = 0;
89 | var isOK = ransac(params, homography2d_kernel, from, to, count, transform, mask, max_iters);
90 |
91 | if (isOK) {
92 | newFrom = from.filter(function (item, index) {
93 | return mask.data[index];
94 | });
95 | newTo = to.filter(function (item, index) {
96 | return mask.data[index];
97 | });
98 | goodMatch = newFrom.length;
99 | }
100 | homography2d_kernel.run(newFrom, newTo, transform, goodMatch);
101 |
102 | return {
103 | transform: transform,
104 | goodMatch: goodMatch,
105 | landmarks: newTo,
106 | };
107 |
108 | };
109 |
110 | ImageTracker.prototype.trackImage_ = function (patterns, pixels, width, height) {
111 | tracking.Brief.N = descriptorLength;
112 | tracking.Fast.THRESHOLD = fastThreshold;
113 | var transformDataArray = [];
114 | // camera image
115 | var blur2 = tracking.Image.blur(pixels, width, height, blurRadius);
116 | var gray2 = tracking.Image.grayscale(blur2, width, height);
117 | var corners2 = tracking.Fast.findCorners(gray2, width, height);
118 | var descriptors2 = tracking.Brief.getDescriptors(gray2, width, corners2);
119 | var goodIndex = -1;
120 |
121 | // pattern image
122 | for (var i = 0; i < patterns.length; i++) {
123 | var pattern = patterns[i];
124 | // blur
125 | var blur1 = tracking.Image.blur(pattern.pixels, pattern.width, pattern.height, blurRadius);
126 | // grayscale
127 | var gray1 = tracking.Image.grayscale(blur1, pattern.width, pattern.height);
128 | // find corners
129 | var corners1 = tracking.Fast.findCorners(gray1, pattern.width, pattern.height);
130 | // get descriptors
131 | var descriptors1 = tracking.Brief.getDescriptors(gray1, pattern.width, corners1);
132 | // match corners
133 | var matches = tracking.Brief.reciprocalMatch(corners1, descriptors1, corners2, descriptors2);
134 | // calculate transform
135 | var transformData = this.calcTransform(matches,
136 | // scale pattern image
137 | pattern.width,
138 | pattern.height,
139 | // origin pattern image
140 | patterns[0].width,
141 | patterns[0].height);
142 |
143 | // save data
144 | transformDataArray.push(transformData);
145 |
146 | if (transformDataArray[i].goodMatch >= minMatchNumber) {
147 | goodIndex = i;
148 | break;
149 | }
150 | }
151 |
152 | if (goodIndex === -1) {
153 | return {
154 | prediction: null
155 | }
156 | } else {
157 | var properTransform = transformDataArray[goodIndex];
158 | // var properPattern = patterns[goodIndex];
159 | return {
160 | prediction: {
161 | goodMatch: properTransform.goodMatch,
162 | transform: properTransform.transform,
163 | landmarks: transformData.landmarks,
164 | }
165 | };
166 | }
167 |
168 | };
169 |
--------------------------------------------------------------------------------
/package_image_tracker/utils/imageBusiness.js:
--------------------------------------------------------------------------------
1 | const hiddenCanvasId = 'hiddenCanvas';
2 | // pattern image resample levels
3 | const resampleLevels = 4;
4 | // pattern image width
5 | var patternFrameWidth;
6 | // pattern image height
7 | var patternFrameHeight;
8 | // pattern image max width
9 | const patternFrameMaxWidth = 375;
10 | // image tracker.
11 | var tracker = null;
12 | // pattern Image Array
13 | var patternImageArray = [];
14 | // magic number
15 | const sc_inc = Math.sqrt(2.0);
16 | // pattern image url: relative url,temp url and network url.
17 | const patternImageUrl = '../../assets/face_pattern.jpg';
18 |
19 | function detect(frame, width, height, callback) {
20 | if (!tracker) {
21 | console.log('detect:', 'waiting for the tracker initing to complete.');
22 | return;
23 | }
24 | var result = tracker.track(frame, width, height);
25 | if (callback) {
26 | callback(result);
27 | }
28 |
29 | }
30 |
31 | function drawPatternImageCallback(ctx) {
32 | var imageX = 0;
33 | var newWidth = patternFrameWidth;
34 | var newHeight = patternFrameHeight;
35 | // init
36 | patternImageArray = [];
37 |
38 | for (var i = 0; i < resampleLevels; i++) {
39 | var canvasRes = ctx.getImageData(imageX, 0, newWidth, newHeight);
40 |
41 | console.log('resample pattern image', canvasRes.width, canvasRes.height);
42 | patternImageArray.push({
43 | pixels: canvasRes.data,
44 | width: canvasRes.width,
45 | height: canvasRes.height,
46 | });
47 |
48 | // resample
49 | imageX += newWidth;
50 | newWidth = Math.round(newWidth / sc_inc);
51 | newHeight = Math.round(newHeight / sc_inc);
52 | }
53 |
54 | // init ImageTracker
55 | tracker = new ImageTracker(patternImageArray);
56 |
57 | }
58 |
59 | function initTrackerCallback(patternImage, newWidth, newHeight) {
60 | const ctx = document.getElementById(hiddenCanvasId).getContext('2d');
61 |
62 | var imageX = 0;
63 |
64 | for (var i = 0; i < resampleLevels; i++) {
65 | // draw image on canvas
66 | ctx.drawImage(patternImage, imageX, 0, newWidth, newHeight);
67 | // resample
68 | imageX += newWidth;
69 | newWidth = Math.round(newWidth / sc_inc);
70 | newHeight = Math.round(newHeight / sc_inc);
71 | }
72 |
73 | drawPatternImageCallback(ctx);
74 | }
75 |
76 | // get patter image
77 | function initTracker() {
78 | // set pattern image
79 | var patternImage = document.getElementById("patternImage");
80 | patternImage.src = patternImageUrl;
81 | patternImage.addEventListener("load", function () {
82 | // pattern image width
83 | patternFrameWidth = patternImage.width;
84 | // pattern image height
85 | patternFrameHeight = patternImage.height;
86 |
87 | // reduce image size to increase image process speed
88 | if (patternFrameWidth > patternFrameMaxWidth) {
89 | patternFrameWidth = patternFrameMaxWidth;
90 | patternFrameHeight = (patternImage.height / patternImage.width) * patternFrameMaxWidth;
91 | }
92 | // resample width and height
93 | var newWidth = patternFrameWidth;
94 | var newHeight = patternFrameHeight;
95 | initTrackerCallback(patternImage, newWidth, newHeight);
96 |
97 |
98 | });
99 | }
100 |
101 | export {
102 | initTracker,
103 | detect,
104 | };
105 |
--------------------------------------------------------------------------------
/package_image_tracker/utils/modelBusiness.js:
--------------------------------------------------------------------------------
1 | // the scale of the model image
2 | const initScale = 300;
3 | // a index of a track point on a pattern image
4 | const trackPoint = {
5 | x: 185, // the width of the pattern image is 375
6 | y: 224, // the height of the pattern image is 375
7 | };
8 |
9 | var camera, scene, renderer;
10 | var mainModel;
11 | var canvasWidth, canvasHeight;
12 |
13 | function initThree(canvasWebGLId,
14 | modelUrl,
15 | _canvasWidth,
16 | _canvasHeight) {
17 | canvasWidth = _canvasWidth;
18 | canvasHeight = _canvasHeight;
19 |
20 | var canvas_webgl = document.getElementById(canvasWebGLId);
21 | initScene(canvas_webgl);
22 | loadModel(modelUrl);
23 | }
24 |
25 | function initScene(canvas_webgl) {
26 | camera = new THREE.OrthographicCamera(1, 1, 1, 1, -1000, 1000);
27 | setSize();
28 | scene = new THREE.Scene();
29 | // ambient light
30 | scene.add(new THREE.AmbientLight(0xffffff));
31 | // direction light
32 | var directionallight = new THREE.DirectionalLight(0xffffff, 1);
33 | directionallight.position.set(0, 0, 1000);
34 | scene.add(directionallight);
35 |
36 | // init render
37 | renderer = new THREE.WebGLRenderer({
38 | canvas: canvas_webgl,
39 | antialias: true,
40 | alpha: true,
41 | });
42 | const devicePixelRatio = window.devicePixelRatio;
43 | console.log('device pixel ratio', devicePixelRatio);
44 | renderer.setPixelRatio(devicePixelRatio);
45 | renderer.setSize(canvasWidth, canvasHeight);
46 |
47 | animate();
48 | }
49 |
50 | function loadModel(modelUrl) {
51 | const texture1 = new THREE.TextureLoader().load(modelUrl);
52 | const material1 = new THREE.MeshBasicMaterial({ map: texture1, transparent: true });
53 | const geometry1 = new THREE.PlaneGeometry(1, 1);
54 | const plane1 = new THREE.Mesh(geometry1, material1);
55 | plane1.scale.setScalar(initScale);
56 | mainModel = plane1;
57 | scene.add(mainModel);
58 | console.log('loadModel', 'success');
59 | }
60 |
61 | function updateModel(modelUrl) {
62 | const texture1 = new THREE.TextureLoader().load(modelUrl);
63 | const material1 = new THREE.MeshBasicMaterial({ map: texture1, transparent: true });
64 | const geometry1 = new THREE.PlaneGeometry(1, 1);
65 | const plane1 = new THREE.Mesh(geometry1, material1);
66 | plane1.scale.setScalar(initScale);
67 | // remove old model
68 | scene.remove(mainModel);
69 | // save new model
70 | mainModel = plane1;
71 | // add new model
72 | scene.add(mainModel);
73 | console.log('updateModel', 'success');
74 | }
75 |
76 | function setSize() {
77 | const w = canvasWidth;
78 | const h = canvasHeight;
79 | camera.left = -0.5 * w;
80 | camera.right = 0.5 * w;
81 | camera.top = 0.5 * h;
82 | camera.bottom = -0.5 * h;
83 | camera.updateProjectionMatrix();
84 | }
85 |
86 | function setModel(prediction,
87 | _canvasWidth,
88 | _canvasHeight) {
89 |
90 | if (_canvasWidth !== canvasWidth) {
91 | canvasWidth = _canvasWidth;
92 | canvasHeight = _canvasHeight;
93 | setSize();
94 | }
95 |
96 | console.log('prediction', prediction);
97 |
98 | if (!mainModel) {
99 | console.log('setModel', '3d model is not loaded.');
100 | return;
101 | }
102 |
103 | var transform = prediction.transform.data;
104 | // position
105 | var target = getTranslation(transform,
106 | trackPoint.x,
107 | trackPoint.y);
108 | mainModel.position.set(target._x - canvasWidth / 2,canvasHeight / 2 - target._y, 0);
109 |
110 | // rotation
111 | var r = getRotationAndScale(transform);
112 | // convert array to rotation matrix
113 | var rotationMatrix = new THREE.Matrix4();
114 | rotationMatrix.fromArray(r.rotation);
115 | mainModel.rotation.setFromRotationMatrix(rotationMatrix);
116 |
117 | // scale
118 | mainModel.scale.setScalar(initScale * r.scale);
119 | }
120 |
121 | function setModelOnDefaultposition() {
122 | if (!mainModel) {
123 | console.log('setModel', '3d model is not loaded.');
124 | return;
125 | }
126 |
127 | // position
128 | mainModel.position.set(0, 0, 0);
129 | // rotation
130 | mainModel.material.rotation = 0;
131 | // scale
132 | mainModel.scale.setScalar(initScale * 0.65);
133 | }
134 |
135 | function getTranslation(td, x, y) {
136 | var m00 = td[0], m01 = td[1], m02 = td[2],
137 | m10 = td[3], m11 = td[4], m12 = td[5],
138 | m20 = td[6], m21 = td[7], m22 = td[8];
139 | var x2 = m00 * x + m01 * y + m02;
140 | var y2 = m10 * x + m11 * y + m12;
141 | var ws = m20 * x + m21 * y + m22;
142 | var sc = 1.0 / ws;
143 | var _x = x2 * sc;
144 | var _y = y2 * sc;
145 |
146 | // console.log('translation', _x, _y);
147 | return { _x, _y };
148 | }
149 |
150 | function getRotationAndScale(td) {
151 | var m00 = td[0],
152 | m10 = td[3],
153 | m20 = td[6];
154 | var norm = Math.sqrt(m00 * m00 +
155 | m10 * m10 +
156 | m20 * m20);
157 | // normal
158 | var H = td.map(function (item) {
159 | return item / norm;
160 | });
161 |
162 | m00 = H[0];
163 | m10 = H[3];
164 | m20 = H[6];
165 |
166 | var m01 = H[1], m02 = H[2],
167 | m11 = H[4], m12 = H[5],
168 | m21 = H[7], m22 = H[8];
169 |
170 | // rotate
171 | var c1 = [m00, m10, m20];
172 | var c2 = [m01, m11, m21];
173 | var c3 = [
174 | m21 * m10 - m20 * m11,
175 | m20 * m01 - m21 * m00,
176 | m00 * m11 - m10 * m01];
177 |
178 | var scale = 1 / m22;
179 | // convert 3x3 to 4x4
180 | var rotation =
181 | [c1[0], c2[0], c3[0], 0,
182 | c1[1], c2[1], c3[1], 0,
183 | c1[2], c2[2], c3[2], 0,
184 | 0, 0, 0, 1];
185 |
186 | // console.log('scale', scale);
187 | // console.log('rotation', rotation);
188 | return {
189 | scale,
190 | rotation
191 | };
192 | }
193 |
194 |
195 | function animate() {
196 | window.requestAnimationFrame(animate);
197 | renderer.render(scene, camera);
198 | }
199 |
200 |
201 | export {
202 | initThree,
203 | updateModel,
204 | setModel,
205 | setModelOnDefaultposition,
206 | getTranslation,
207 | getRotationAndScale,
208 | }
--------------------------------------------------------------------------------
/package_opencv_tracker/assets/opencv3.4.16.wasm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/package_opencv_tracker/assets/opencv3.4.16.wasm
--------------------------------------------------------------------------------
/package_opencv_tracker/camera/camera.css:
--------------------------------------------------------------------------------
1 | .page__bd_spacing{
2 | padding-top:0;
3 | padding-left: 0;
4 | padding-right: 0;
5 | }
6 |
7 | .page__bd{
8 | text-align: center;
9 | }
10 |
11 | .videoPlayer{
12 | width: 375px;
13 | height: 500px;
14 | margin: auto;
15 | }
16 |
17 | .camera{
18 | position: absolute;
19 | }
20 |
21 | .canvas2d {
22 | position:fixed;
23 | left:1000px;
24 | }
25 |
26 | .hiddenCanvas{
27 | position:fixed;
28 | left:1000px;
29 | }
30 |
31 |
--------------------------------------------------------------------------------
/package_opencv_tracker/camera/camera.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | Image Detecting using OpenCV
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
Detect
28 |
29 |
Pattern Image:
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
Loading...
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
--------------------------------------------------------------------------------
/package_opencv_tracker/camera/camera.js:
--------------------------------------------------------------------------------
1 | import * as imageTracker from '../utils/imageTracker.js?v3';
2 | import * as model from '../utils/modelBusiness.js';
3 | const canvasId = 'canvas2d';
4 | const canvasWebGLId = 'canvasWebGL';
5 | const maxCanvasWidth = 375;
6 | // a url of a image
7 | const modelUrl = '../../assets/cat_beard.png';
8 | // it should be more than detect time
9 | const frameTimeout = 200;
10 | const canvasWidth = 375;
11 | const canvasHeight = 375;
12 |
13 | var app = new Vue({
14 | el: '#app',
15 | data: {
16 | isShowLoadingToast: false,
17 | isButtonDisabled: false,
18 | notice: '',
19 | patternImageUrl: '../../assets/face_pattern.jpg',
20 | },
21 | methods: {
22 | async processVideo(frame) {
23 | var _that = this;
24 | var canvas1= document.getElementById(canvasId)
25 | const ctx = document.getElementById(canvasId).getContext('2d');
26 | // draw a video frame on a 2d canvas
27 | ctx.drawImage(frame, 0, 0, canvasWidth, canvasHeight);
28 |
29 | // process start
30 | var startTime = new Date();
31 | var result = imageTracker.detect(canvas1);
32 | var end = new Date() - startTime
33 |
34 | if (result && result.prediction) {
35 | // set the rotation and position of the 3d model.
36 | model.setModel(result.prediction,
37 | canvasWidth,
38 | canvasHeight);
39 |
40 | _that.notice = "detect: " + end + ' ms.';
41 | } else {
42 | // set the default position
43 | model.setModelOnDefaultposition();
44 | var message = 'No results.';
45 | _that.notice = message;
46 | console.log('detect:', message);
47 | }
48 | // process end
49 | },
50 | async takePhoto() {
51 | if (!navigator.mediaDevices) {
52 | var msg = 'No navigator.mediaDevices, needs a https site.';
53 | this.notice = msg;
54 | console.log('takePhoto', msg);
55 | return
56 | }
57 |
58 | if (this.isButtonDisabled) {
59 | return
60 | }
61 |
62 | const stream = await navigator.mediaDevices.getUserMedia({
63 | video: {
64 | width: canvasWidth,
65 | height: canvasHeight,
66 | facingMode: 'environment',
67 | }
68 | });
69 | var inputData = document.getElementById("inputData");
70 | inputData.srcObject = stream;
71 | await this.onVideoPlay();
72 | },
73 | async onVideoPlay() {
74 | var inputData = document.getElementById("inputData");
75 | // check the state of the video player
76 | if (!inputData.paused && !inputData.ended) {
77 | if(!this.isButtonDisabled){
78 | await this.processVideo(inputData);
79 | }
80 | }
81 |
82 | setTimeout(this.onVideoPlay, frameTimeout);
83 | },
84 | load() {
85 | var _that = this;
86 | _that.isButtonDisabled = true;
87 | // load 3d model
88 | model.initThree(canvasWebGLId,
89 | modelUrl,
90 | canvasWidth,
91 | canvasHeight);
92 |
93 | patternImage.addEventListener("load", function () {
94 | // waiting for OpenCV to be ready
95 | setTimeout(function () {
96 | imageTracker.initTemplateImage('patternImage')
97 | _that.isButtonDisabled = false;
98 | }, 1200)
99 | })
100 | },
101 | },
102 | mounted: function () {
103 | this.load();
104 | },
105 | })
106 |
107 |
--------------------------------------------------------------------------------
/package_opencv_tracker/photo/photo.css:
--------------------------------------------------------------------------------
1 |
2 | .page__bd_spacing{
3 | padding-top:0;
4 | padding-left: 0;
5 | padding-right: 0;
6 | }
7 |
8 | .page__bd{
9 | text-align: center;
10 | }
11 |
12 | .imagePlayer{
13 | width: 375px;
14 | height: 500px;
15 | margin: auto;
16 | }
17 |
18 | .camera{
19 | width: 375px;
20 | position: absolute;
21 | }
22 |
23 | .canvas2d {
24 | position:fixed;
25 | left:1000px;
26 | }
27 |
28 | .hiddenCanvas{
29 | position:fixed;
30 | left:1000px;
31 | }
32 |
--------------------------------------------------------------------------------
/package_opencv_tracker/photo/photo.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | Image Detecting using OpenCV
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
The Input Image:
28 |
29 |
Detect
31 |
32 |
The Pattern Image:
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
Loading...
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
--------------------------------------------------------------------------------
/package_opencv_tracker/photo/photo.js:
--------------------------------------------------------------------------------
1 | import * as imageTracker from '../utils/imageTracker.js?v3';
2 | import * as model from '../utils/modelBusiness.js?v3';
3 | const canvasId = 'canvas2d';
4 | const canvasWebGLId = 'canvasWebGL';
5 | const maxCanvasWidth = 375;
6 | // a url of a image
7 | const modelUrl = '../../assets/cat_beard.png';
8 |
9 | var app = new Vue({
10 | el: '#app',
11 | data: {
12 | isShowLoadingToast: false,
13 | isButtonDisabled: false,
14 | notice: '',
15 | patternImageUrl: '../../assets/face_pattern.jpg',
16 | },
17 | methods: {
18 | processPhoto(inputData, imageWidth, imageHeight) {
19 | var _that = this;
20 |
21 | var canvasWidth = imageWidth;
22 | var canvasHeight = imageHeight;
23 |
24 | // process start
25 | var startTime = new Date();
26 | var result = imageTracker.detect(inputData);
27 | var end = new Date() - startTime
28 |
29 | if (result && result.prediction) {
30 | // set the rotation and position of the 3d model.
31 | model.setModel(result.prediction,
32 | canvasWidth,
33 | canvasHeight);
34 |
35 | _that.notice = "detect: " + end + ' ms.';
36 |
37 | } else {
38 | // set the default position
39 | model.setModelOnDefaultposition();
40 | var message = 'No results.';
41 | _that.notice = message;
42 | console.log('detect:', message);
43 | }
44 | // process end
45 | },
46 | takePhoto() {
47 | if (this.isButtonDisabled) {
48 | return
49 | }
50 | // 因为有css样式表,所以元素inputData的宽度变为375px。
51 | const inputData = document.getElementById('inputData');
52 | this.processPhoto(inputData,
53 | inputData.width,
54 | inputData.height);
55 | },
56 | load() {
57 | var _that = this;
58 | _that.isButtonDisabled = true;
59 | const inputData = document.getElementById("inputData");
60 | // load 3d model
61 | model.initThree(canvasWebGLId,
62 | modelUrl,
63 | inputData.width,
64 | inputData.height);
65 |
66 | patternImage.addEventListener("load", function () {
67 | // waiting for OpenCV to be ready
68 | setTimeout(function(){
69 | imageTracker.initTemplateImage('patternImage')
70 | _that.isButtonDisabled = false;
71 | },1200)
72 | })
73 | },
74 | },
75 | mounted: function () {
76 | this.load();
77 | },
78 | })
79 |
80 | // change input image
81 | document.getElementById("uploaderInput").addEventListener("change", function (e) {
82 | var files = e.target.files;
83 | if (files.length == 0) {
84 | return
85 | }
86 | var url = window.URL || window.webkitURL;
87 | var src;
88 | if (url) {
89 | src = url.createObjectURL(files[0]);
90 | }
91 | var inputData = document.getElementById("inputData");
92 | inputData.src = src;
93 | });
94 |
--------------------------------------------------------------------------------
/package_opencv_tracker/utils/imageTracker.js:
--------------------------------------------------------------------------------
1 | // 有效的特征点数目
2 | const ValidPointTotal = 15
3 | // 模糊的size
4 | const BlurSize = 4
5 | // 识别图的特征点
6 | var template_keypoints_vector;
7 | // 识别图的特征点的描述因子
8 | var template_descriptors;
9 | // 单应性矩阵
10 | var homography_transform;
11 | // 查看opencv.js包含的方法
12 | console.log('cv', cv)
13 |
14 | // 识别图
15 | function initTemplateImage(templateImageData) {
16 | var src = cv.imread(templateImageData);
17 | // 灰度化
18 | cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
19 | // 模糊化
20 | let ksize = new cv.Size(BlurSize, BlurSize);
21 | let anchor = new cv.Point(-1, -1);
22 | cv.blur(src, src, ksize, anchor, cv.BORDER_DEFAULT);
23 | // 特征点
24 | template_keypoints_vector = new cv.KeyPointVector();
25 | // 描述因子
26 | template_descriptors = new cv.Mat();
27 | // 占位置的输入参数
28 | var noArray = new cv.Mat();
29 | // ORB特征点检测
30 | var orb = new cv.ORB();
31 | // 检测特征点
32 | orb.detectAndCompute(src, noArray, template_keypoints_vector, template_descriptors)
33 | // 回收对象
34 | src.delete()
35 | noArray.delete()
36 | orb.delete()
37 | }
38 |
39 | // 相机图像
40 | function detectAndCompute(keyFrameImageData) {
41 | // 读取图片
42 | var src = cv.imread(keyFrameImageData);
43 |
44 | // 灰度化
45 | cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
46 |
47 | // 模糊化
48 | let ksize = new cv.Size(BlurSize, BlurSize);
49 | let anchor = new cv.Point(-1, -1);
50 | cv.blur(src, src, ksize, anchor, cv.BORDER_DEFAULT);
51 |
52 | // 特征点
53 | var frame_keypoints_vector = new cv.KeyPointVector();
54 | // 描述因子
55 | var frame_descriptors = new cv.Mat();
56 | // ORB特征点检测
57 | var orb = new cv.ORB();
58 | // 占位置的输入参数
59 | var noArray = new cv.Mat();
60 | // 检测特征点
61 | orb.detectAndCompute(src, noArray, frame_keypoints_vector, frame_descriptors)
62 |
63 | var knnMatches = new cv.DMatchVectorVector();
64 | // 特征点匹配
65 | var matcher = new cv.BFMatcher();
66 |
67 | matcher.knnMatch(frame_descriptors, template_descriptors, knnMatches, 2)
68 |
69 | // 相机图像的特征点
70 | var frame_keypoints = [];
71 | // 识别图的特征点
72 | var template_keypoints = [];
73 |
74 | // 保存特征点匹配的结果
75 | var matchTotal = knnMatches.size()
76 | for (var i = 0; i < matchTotal; i++) {
77 | var point = knnMatches.get(i).get(0)
78 | var point2 = knnMatches.get(i).get(1)
79 |
80 | if (point.distance < 0.7 * point2.distance) {
81 | // 相机图像
82 | var frame_point = frame_keypoints_vector.get(point.queryIdx).pt
83 | frame_keypoints.push(frame_point)
84 | // 识别图
85 | var template_point = template_keypoints_vector.get(point.trainIdx).pt
86 | template_keypoints.push(template_point)
87 | }
88 | }
89 |
90 | // 将js数组转换为cv.Mat对象
91 | var frameMat = new cv.Mat(frame_keypoints.length, 1, cv.CV_32FC2);
92 | var templateMat = new cv.Mat(template_keypoints.length, 1, cv.CV_32FC2);
93 |
94 | for (let i = 0; i < template_keypoints.length; i++) {
95 | // 相机图像
96 | frameMat.data32F[i * 2] = frame_keypoints[i].x;
97 | frameMat.data32F[i * 2 + 1] = frame_keypoints[i].y;
98 | // 识别图
99 | templateMat.data32F[i * 2] = template_keypoints[i].x;
100 | templateMat.data32F[i * 2 + 1] = template_keypoints[i].y;
101 | }
102 |
103 | // 如果有15个有效的点
104 | if (template_keypoints.length >= ValidPointTotal) {
105 | var homography = cv.findHomography(templateMat, frameMat, cv.RANSAC)
106 | homography_transform = homography.data64F
107 | } else {
108 | homography_transform = null
109 | }
110 |
111 | // 回收对象
112 | noArray.delete()
113 | orb.delete()
114 | frame_keypoints_vector.delete()
115 | frame_descriptors.delete()
116 | knnMatches.delete()
117 | matcher.delete()
118 | templateMat.delete()
119 | frameMat.delete()
120 | src.delete()
121 | frame_keypoints = null
122 | template_keypoints = null
123 |
124 | return {
125 | prediction: homography_transform,
126 | }
127 | }
128 |
129 |
130 | // 检测
131 | function detect(imageData) {
132 | var result;
133 | var startTime = new Date();
134 |
135 | result = detectAndCompute(imageData)
136 | console.log('detectAndCompute:', new Date() - startTime, 'ms');
137 |
138 | return result
139 | }
140 |
141 | function dispose() {
142 | // 是否有效
143 | isValidKeyFrame = false;
144 | // 识别图的特征点
145 | if (template_keypoints_vector) {
146 | template_keypoints_vector.delete()
147 | template_keypoints_vector = null
148 | }
149 | // 识别图的特征点的描述因子
150 | if (template_descriptors) {
151 | template_descriptors.delete()
152 | template_descriptors = null
153 | }
154 | // 单应性矩阵
155 | homography_transform = null
156 | // 有特征点的图像
157 | var lastFrame;
158 | if (lastFrame) {
159 | lastFrame.delete()
160 | lastFrame = null
161 | }
162 | // 有效的特征点
163 | var lastFrameMat
164 | if (lastFrameMat) {
165 | lastFrameMat.delete()
166 | lastFrameMat = null
167 | }
168 | }
169 |
170 | export {
171 | initTemplateImage,
172 | detect,
173 | dispose,
174 | }
--------------------------------------------------------------------------------
/package_opencv_tracker/utils/modelBusiness.js:
--------------------------------------------------------------------------------
1 | // the scale of the model image
2 | const initScale = 300;
3 | // a index of a track point on a pattern image
4 | const trackPoint = {
5 | x: 185, // the width of the pattern image is 375
6 | y: 224, // the height of the pattern image is 375
7 | }
8 |
9 | var camera, scene, renderer;
10 | var mainModel;
11 | var canvasWidth, canvasHeight;
12 |
13 | function initThree(canvasWebGLId,
14 | modelUrl,
15 | _canvasWidth,
16 | _canvasHeight) {
17 | canvasWidth = _canvasWidth;
18 | canvasHeight = _canvasHeight;
19 |
20 | var canvas_webgl = document.getElementById(canvasWebGLId);
21 | initScene(canvas_webgl);
22 | loadModel(modelUrl);
23 | }
24 |
25 | function initScene(canvas_webgl) {
26 | camera = new THREE.OrthographicCamera(1, 1, 1, 1, -1000, 1000);
27 | setSize();
28 | scene = new THREE.Scene();
29 | // ambient light
30 | scene.add(new THREE.AmbientLight(0xffffff));
31 | // direction light
32 | var directionallight = new THREE.DirectionalLight(0xffffff, 1);
33 | directionallight.position.set(0, 0, 1000);
34 | scene.add(directionallight);
35 |
36 | // init render
37 | renderer = new THREE.WebGLRenderer({
38 | canvas: canvas_webgl,
39 | antialias: true,
40 | alpha: true,
41 | });
42 | const devicePixelRatio = window.devicePixelRatio;
43 | console.log('device pixel ratio', devicePixelRatio);
44 | renderer.setPixelRatio(devicePixelRatio);
45 | renderer.setSize(canvasWidth, canvasHeight);
46 |
47 | animate();
48 | }
49 |
50 | function loadModel(modelUrl) {
51 | const texture1 = new THREE.TextureLoader().load(modelUrl);
52 | const material1 = new THREE.MeshBasicMaterial({ map: texture1, transparent: true });
53 | const geometry1 = new THREE.PlaneGeometry(1, 1);
54 | const plane1 = new THREE.Mesh(geometry1, material1);
55 | plane1.scale.setScalar(initScale);
56 | mainModel = plane1;
57 | scene.add(mainModel);
58 | console.log('loadModel', 'success');
59 |
60 | }
61 |
62 | function updateModel(modelUrl) {
63 |
64 | const texture1 = new THREE.TextureLoader().load(modelUrl);
65 | const material1 = new THREE.MeshBasicMaterial({ map: texture1, transparent: true });
66 | const geometry1 = new THREE.PlaneGeometry(1, 1);
67 | const plane1 = new THREE.Mesh(geometry1, material1);
68 | plane1.scale.setScalar(initScale);
69 | // remove old model
70 | scene.remove(mainModel);
71 | // save new model
72 | mainModel = plane1;
73 | // add new model
74 | scene.add(mainModel);
75 | console.log('updateModel', 'success');
76 |
77 | }
78 |
79 | function setSize() {
80 | const w = canvasWidth;
81 | const h = canvasHeight;
82 | camera.left = -0.5 * w;
83 | camera.right = 0.5 * w;
84 | camera.top = 0.5 * h;
85 | camera.bottom = -0.5 * h;
86 | camera.updateProjectionMatrix();
87 | }
88 |
89 | function setModel(transform,
90 | _canvasWidth,
91 | _canvasHeight) {
92 |
93 | if (_canvasWidth !== canvasWidth) {
94 | canvasWidth = _canvasWidth;
95 | canvasHeight = _canvasHeight;
96 | setSize();
97 | }
98 |
99 | console.log('prediction', transform);
100 |
101 | if (!mainModel) {
102 | console.log('setModel', '3d model is not loaded.');
103 | return;
104 | }
105 |
106 | // position
107 | var target = getTranslation(transform,
108 | trackPoint.x,
109 | trackPoint.y);
110 |
111 | mainModel.position.set(target._x - canvasWidth / 2,
112 | canvasHeight / 2 - target._y, 0);
113 |
114 | // rotation
115 | var r = getRotationAndScale(transform);
116 | var rotationMatrix = new THREE.Matrix4();
117 | rotationMatrix.fromArray(r.rotation);
118 | mainModel.rotation.setFromRotationMatrix(rotationMatrix);
119 |
120 | // scale
121 | mainModel.scale.setScalar(initScale * r.scale);
122 |
123 | }
124 |
125 | function setModelOnDefaultposition() {
126 | if (!mainModel) {
127 | console.log('setModel', '3d model is not loaded.');
128 | return;
129 | }
130 |
131 | // position
132 | mainModel.position.set(0, 0, 0);
133 | // rotation
134 | mainModel.material.rotation = 0;
135 | // scale
136 | mainModel.scale.setScalar(initScale * 0.65);
137 | }
138 |
139 | function getTranslation(td, x, y) {
140 | var m00 = td[0], m01 = td[1], m02 = td[2],
141 | m10 = td[3], m11 = td[4], m12 = td[5],
142 | m20 = td[6], m21 = td[7], m22 = td[8];
143 | var x2 = m00 * x + m01 * y + m02;
144 | var y2 = m10 * x + m11 * y + m12;
145 | var ws = m20 * x + m21 * y + m22;
146 | var sc = 1.0 / ws;
147 | var _x = x2 * sc;
148 | var _y = y2 * sc;
149 |
150 | console.log('translation', _x, _y);
151 |
152 | return { _x, _y };
153 |
154 | }
155 |
156 | function getRotationAndScale(td) {
157 | var m00 = td[0],
158 | m10 = td[3],
159 | m20 = td[6];
160 | var norm = Math.sqrt(m00 * m00 +
161 | m10 * m10 +
162 | m20 * m20);
163 | // normal
164 | var H = td.map(function (item) {
165 | return item / norm;
166 | });
167 |
168 | m00 = H[0];
169 | m10 = H[3];
170 | m20 = H[6];
171 |
172 | var m01 = H[1], m02 = H[2],
173 | m11 = H[4], m12 = H[5],
174 | m21 = H[7], m22 = H[8];
175 |
176 | // rotate
177 | var c1 = [m00, m10, m20];
178 | var c2 = [m01, m11, m21];
179 | var c3 = [
180 | m21 * m10 - m20 * m11,
181 | m20 * m01 - m21 * m00,
182 | m00 * m11 - m10 * m01];
183 |
184 | var scale = 1 / m22;
185 | // convert 3x3 to 4x4
186 | var rotation =
187 | [c1[0], c2[0], c3[0], 0,
188 | c1[1], c2[1], c3[1], 0,
189 | c1[2], c2[2], c3[2], 0,
190 | 0, 0, 0, 1
191 | ];
192 |
193 | console.log('scale', scale);
194 | console.log('rotation', rotation);
195 |
196 | return {
197 | scale,
198 | rotation
199 | };
200 | }
201 |
202 |
203 | function setSceneBackground(frame) {
204 | var texture = new THREE.DataTexture(frame.data,
205 | frame.width,
206 | frame.height,
207 | THREE.RGBAFormat);
208 | texture.flipY = true;
209 | texture.needsUpdate = true;
210 | scene.background = texture;
211 | }
212 |
213 | function clearSceneBackground() {
214 | scene.background = null;
215 | }
216 |
217 | function animate() {
218 | window.requestAnimationFrame(animate);
219 | renderer.render(scene, camera);
220 | }
221 |
222 | function dispose() {
223 | camera = null;
224 | scene = null;
225 | renderer = null;
226 | canvas = null;
227 | THREE = null;
228 | mainModel = null;
229 | requestId = null;
230 | canvasWidth = null;
231 | canvasHeight = null;
232 | }
233 |
234 | export {
235 | initThree,
236 | updateModel,
237 | setModel,
238 | setModelOnDefaultposition,
239 | setSceneBackground,
240 | clearSceneBackground,
241 | dispose,
242 | }
--------------------------------------------------------------------------------
/package_video_tracker/camera/camera.css:
--------------------------------------------------------------------------------
1 | .page__bd_spacing{
2 | padding-top:0;
3 | padding-left: 0;
4 | padding-right: 0;
5 | }
6 |
7 | .page__bd{
8 | text-align: center;
9 | }
10 |
11 | .videoPlayer{
12 | width: 375px;
13 | height: 375px;
14 | margin: auto;
15 | position: relative;
16 | display: flex;
17 | justify-content: center;
18 | align-items: center;
19 | }
20 |
21 | .camera{
22 | position: absolute;
23 | }
24 |
25 | .videoMask{
26 | position: absolute;
27 | transition: transform 1s;
28 | }
29 |
30 | .canvas2d {
31 | position:fixed;
32 | left:1000px;
33 | }
34 |
35 | .hiddenCanvas{
36 | position:fixed;
37 | left:1000px;
38 | }
39 |
40 |
--------------------------------------------------------------------------------
/package_video_tracker/camera/camera.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | Image Detecting and Video Mask
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
Detect
29 |
30 |
Pattern Image:
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
Loading...
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/package_video_tracker/camera/camera.js:
--------------------------------------------------------------------------------
1 | import * as image from '../utils/imageBusiness.js';
2 | const canvasId = 'canvas2d';
3 | // a url of a video
4 | const videoUrl = '../../assets/sample.mp4';
5 | const videoMaskId = "videoMask";
6 | const videoMaskSourceId = "videoMaskSource";
7 | // mask image
8 | const trackPoint = {
9 | x: 187, // the width of the pattern image is 375
10 | y: 187, // the height of the pattern image is 375
11 | };
12 | // pattern image
13 | const patternFrame = {
14 | w: '375px',
15 | h: '375px',
16 | }
17 | // it should be more than detect time
18 | const frameTimeout = 100;
19 | const canvasWidth = 375;
20 | const canvasHeight = 375;
21 |
22 | var app = new Vue({
23 | el: '#app',
24 | data: {
25 | isShowLoadingToast: false,
26 | isButtonDisabled: false,
27 | notice: '',
28 | },
29 | methods: {
30 | async processVideo(frame) {
31 | var _that = this;
32 | const ctx = document.getElementById(canvasId).getContext('2d');
33 | // draw a video frame on a 2d canvas
34 | ctx.drawImage(frame, 0, 0, canvasWidth, canvasHeight);
35 | // get a video frame from a 2d canvas
36 | var res = ctx.getImageData(0, 0, canvasWidth, canvasHeight)
37 |
38 | // process start
39 | image.detect(res.data,
40 | canvasWidth,
41 | canvasHeight,
42 | function (event) {
43 | var result = event.data;
44 |
45 | if (result && result.prediction) {
46 | // set the position
47 | image.updateMaskVideoPosition(result.prediction,
48 | videoMaskId,
49 | trackPoint,
50 | canvasWidth,
51 | canvasHeight)
52 | _that.notice = "detect: " + result.prediction.goodMatch + " points, " + result.end + ' ms.';
53 | } else {
54 | // set the default position
55 | image.setMaskVideoDefaultPosition(videoMaskId);
56 | var message = 'No results.';
57 | _that.notice = message;
58 | console.log('detect:', message);
59 | }
60 | });
61 | // process end
62 | },
63 | playMaskVideo() {
64 | var videoMaskSource = document.getElementById(videoMaskSourceId);
65 | videoMaskSource.src = videoUrl;
66 | var videoMask = document.getElementById(videoMaskId);
67 | videoMask.style.width = patternFrame.w;
68 | videoMask.style.height = patternFrame.h;
69 | videoMask.load();
70 | videoMask.play();
71 | },
72 | updateMaskVideoPosition(prediction) {
73 | console.log('prediction', prediction)
74 | var t = prediction.transform.data;
75 | var target = model.getTranslation(t, trackPoint.x, trackPoint.y)
76 | var x = target._x - canvasWidth / 2;
77 | var y = target._y - canvasHeight / 2;
78 | // convert 3x3 to 4x4
79 | var t_array = [t[0], t[3], 0, t[6],
80 | t[1], t[4], 0, t[7],
81 | 0, 0, 1, 0,
82 | x, y, 0, t[8]];
83 | var t_matrix = 'matrix3d(' + t_array.join(',') + ')';
84 |
85 | var videoMask = document.getElementById(videoMaskId);
86 | videoMask.style.transform = t_matrix;
87 | },
88 | setMaskVideoDefaultPosition() {
89 | var videoMask = document.getElementById(videoMaskId);
90 | var t_matrix = 'matrix3d(0.65, 0, 0, 0, 0, 0.65, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1)';
91 | videoMask.style.transform = t_matrix;
92 | },
93 | getTranslation(td, x, y) {
94 | var m00 = td[0], m01 = td[1], m02 = td[2],
95 | m10 = td[3], m11 = td[4], m12 = td[5],
96 | m20 = td[6], m21 = td[7], m22 = td[8];
97 | var x2 = m00 * x + m01 * y + m02;
98 | var y2 = m10 * x + m11 * y + m12;
99 | var ws = m20 * x + m21 * y + m22;
100 | var sc = 1.0 / ws;
101 | var _x = x2 * sc;
102 | var _y = y2 * sc;
103 |
104 | // console.log('translation', _x, _y);
105 | return { _x, _y };
106 | },
107 | async takePhoto() {
108 | if (!navigator.mediaDevices) {
109 | var msg = 'No navigator.mediaDevices, needs a https site.';
110 | this.notice = msg;
111 | console.log('takePhoto', msg);
112 | return
113 | }
114 |
115 | if (this.isButtonDisabled) {
116 | return
117 | }
118 |
119 | this.playMaskVideo();
120 |
121 | const stream = await navigator.mediaDevices.getUserMedia({
122 | video: {
123 | width: canvasWidth,
124 | height: canvasHeight,
125 | facingMode: 'environment',
126 | }
127 | });
128 | var inputData = document.getElementById("inputData");
129 | inputData.srcObject = stream;
130 | await this.onVideoPlay();
131 |
132 | },
133 | async onVideoPlay() {
134 | var inputData = document.getElementById("inputData");
135 | // check the state of the video player
136 | if (!inputData.paused && !inputData.ended) {
137 | await this.processVideo(inputData);
138 | }
139 |
140 | setTimeout(this.onVideoPlay, frameTimeout);
141 | },
142 | load() {
143 | this.isButtonDisabled = true;
144 | image.initTracker();
145 | this.isButtonDisabled = false;
146 | },
147 | },
148 | mounted: function () {
149 | this.load();
150 | },
151 | })
152 |
153 |
--------------------------------------------------------------------------------
/package_video_tracker/photo/photo.css:
--------------------------------------------------------------------------------
1 |
2 | .page__bd_spacing{
3 | padding-top:0;
4 | padding-left: 0;
5 | padding-right: 0;
6 | }
7 |
8 | .page__bd{
9 | text-align: center;
10 | }
11 |
12 | .imagePlayer{
13 | width: 375px;
14 | height: 450px;
15 | margin: auto;
16 | position: relative;
17 | display: flex;
18 | justify-content: center;
19 | align-items: center;
20 | }
21 |
22 | .camera{
23 | width: 375px;
24 | position: absolute;
25 | }
26 |
27 | .videoMask{
28 | position: absolute;
29 | transition: transform 1s;
30 | }
31 |
32 | .canvas2d {
33 | position:fixed;
34 | left:1000px;
35 | }
36 |
37 | .hiddenCanvas{
38 | position:fixed;
39 | left:1000px;
40 | }
41 |
--------------------------------------------------------------------------------
/package_video_tracker/photo/photo.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | Image Detecting and Video Mask
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
A input image:
28 |
29 |
Detect
31 |
32 |
Pattern Image:
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
Loading...
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
--------------------------------------------------------------------------------
/package_video_tracker/photo/photo.js:
--------------------------------------------------------------------------------
1 | import * as image from '../utils/imageBusiness.js';
2 | const canvasId = 'canvas2d';
3 | const maxCanvasWidth = 375;
4 | // a url of a video
5 | const videoUrl = '../../assets/sample.mp4';
6 | const videoMaskId = "videoMask";
7 | const videoMaskSourceId = "videoMaskSource";
8 | // mask image
9 | const trackPoint = {
10 | x: 187, // the width of the pattern image is 375
11 | y: 187, // the height of the pattern image is 375
12 | };
13 | // pattern image
14 | const patternFrame = {
15 | w: '375px',
16 | h: '375px',
17 | }
18 |
19 | var canvasWidth;
20 | var canvasHeight;
21 |
22 | var app = new Vue({
23 | el: '#app',
24 | data: {
25 | isShowLoadingToast: false,
26 | isButtonDisabled: false,
27 | notice: '',
28 | },
29 | methods: {
30 | processPhoto(photo, imageWidth, imageHeight) {
31 | var _that = this;
32 | const ctx = document.getElementById(canvasId).getContext('2d');
33 | canvasWidth = imageWidth;
34 | if (canvasWidth > maxCanvasWidth) {
35 | canvasWidth = maxCanvasWidth;
36 | }
37 | // canvas Height
38 | canvasHeight = Math.floor(canvasWidth * (imageHeight / imageWidth));
39 |
40 | // draw image on canvas
41 | ctx.drawImage(photo,
42 | 0, 0, canvasWidth, canvasHeight);
43 | var res = ctx.getImageData(0, 0, canvasWidth, canvasHeight)
44 |
45 | // process start
46 | image.detect(res.data,
47 | canvasWidth,
48 | canvasHeight,
49 | function (event) {
50 | var result = event.data;
51 |
52 | if (result && result.prediction) {
53 | // set the position
54 | image.updateMaskVideoPosition(result.prediction,
55 | videoMaskId,
56 | trackPoint,
57 | canvasWidth,
58 | canvasHeight)
59 | _that.notice = "detect: " + result.prediction.goodMatch + " points, " + result.end + ' ms.';
60 | } else {
61 | // set the default position
62 | image.setMaskVideoDefaultPosition(videoMaskId);
63 | var message = 'No results.';
64 | _that.notice = message;
65 | console.log('detect:', message);
66 | }
67 | });
68 | // process end
69 | },
70 | playMaskVideo() {
71 | var videoMaskSource = document.getElementById(videoMaskSourceId);
72 | videoMaskSource.src = videoUrl;
73 | var videoMask = document.getElementById(videoMaskId);
74 | videoMask.style.width = patternFrame.w;
75 | videoMask.style.height = patternFrame.h;
76 | videoMask.load();
77 | videoMask.play();
78 | },
79 | takePhoto() {
80 | if (this.isButtonDisabled) {
81 | return
82 | }
83 |
84 | this.playMaskVideo();
85 | const inputData = document.getElementById('inputData');
86 | this.processPhoto(inputData,
87 | inputData.width,
88 | inputData.height);
89 | },
90 | load() {
91 | this.isButtonDisabled = true;
92 | image.initTracker();
93 | this.isButtonDisabled = false;
94 | },
95 | },
96 | mounted: function () {
97 | this.load();
98 | },
99 | })
100 |
101 | // change input image
102 | document.getElementById("uploaderInput").addEventListener("change", function (e) {
103 | var files = e.target.files;
104 | if (files.length == 0) {
105 | return
106 | }
107 | var url = window.URL || window.webkitURL;
108 | var src;
109 | if (url) {
110 | src = url.createObjectURL(files[0]);
111 | }
112 | var inputData = document.getElementById("inputData");
113 | inputData.src = src;
114 | });
115 |
116 |
--------------------------------------------------------------------------------
/package_video_tracker/utils/ImageTracker.js:
--------------------------------------------------------------------------------
1 | const fastThreshold = 20;
2 | const blurRadius = 2;
3 | const descriptorLength = 128;
4 | const minMatchNumber = 15;
5 |
6 | // custom tracker
7 | var ImageTracker = function (patternImageArray) {
8 | ImageTracker.base(this, 'constructor');
9 | if (patternImageArray) {
10 | this.setPattern(patternImageArray);
11 | }
12 | }
13 |
14 | tracking.inherits(ImageTracker, tracking.Tracker);
15 | ImageTracker.prototype.track = function (pixels, width, height) {
16 | var _that = this;
17 | var patterns = _that.getPattern();
18 |
19 | if (!patterns) {
20 | console.log('Pattern not specified.');
21 | return;
22 | }
23 |
24 | // start
25 | var start = new Date();
26 | var results = _that.trackImage_(patterns, pixels, width, height);
27 | // end
28 | var end = new Date() - start;
29 | console.log('detect', end, 'ms');
30 | results.end = end;
31 |
32 | // optional
33 | this.emit('track', {
34 | data: results,
35 | });
36 |
37 | return {
38 | data: results,
39 | };
40 | }
41 | ImageTracker.prototype.setPattern = function (patternImageArray) {
42 | this.pattern = patternImageArray;
43 | }
44 | ImageTracker.prototype.getPattern = function () {
45 | return this.pattern;
46 | };
47 | ImageTracker.prototype.calcTransform = function (matches,
48 | patternWidth,
49 | patternHeight,
50 | originPatternWidth,
51 | originPatternHeight) {
52 | var ransac = jsfeat.motion_estimator.ransac;
53 | var homography2d_kernel = new jsfeat.motion_model.homography2d();
54 | var transform = new jsfeat.matrix_t(3, 3, jsfeat.F32_t | jsfeat.C1_t);
55 | var from = [];
56 | var to = [];
57 | var newFrom = [];
58 | var newTo = [];
59 | var count = matches.length;
60 |
61 | // Both originPatternWidth and originPatternHeight are the origin size of the pattern image.
62 | var widthRatio = originPatternWidth / patternWidth;
63 | var heightRatio = originPatternHeight / patternHeight;
64 |
65 | for (var i = 0; i < count; i++) {
66 | var match = matches[i];
67 | from[i] = {
68 | x: match.keypoint1[0] * widthRatio,
69 | y: match.keypoint1[1] * heightRatio,
70 | };
71 | to[i] = {
72 | x: match.keypoint2[0],
73 | y: match.keypoint2[1]
74 | };
75 | }
76 |
77 | var mask = new jsfeat.matrix_t(count, 1, jsfeat.U8_t | jsfeat.C1_t);
78 | // minimum points to estimate motion
79 | var model_size = 4;
80 | // max error to classify as inlier
81 | var thresh = 3;
82 | // max outliers ratio
83 | var eps = 0.5;
84 | // probability of success
85 | var prob = 0.99;
86 | var params = new jsfeat.ransac_params_t(model_size, thresh, eps, prob);
87 | var max_iters = 1000;
88 | var goodMatch = 0;
89 | var isOK = ransac(params, homography2d_kernel, from, to, count, transform, mask, max_iters);
90 |
91 | if (isOK) {
92 | newFrom = from.filter(function (item, index) {
93 | return mask.data[index];
94 | });
95 | newTo = to.filter(function (item, index) {
96 | return mask.data[index];
97 | });
98 | goodMatch = newFrom.length;
99 | }
100 | homography2d_kernel.run(newFrom, newTo, transform, goodMatch);
101 |
102 | return {
103 | transform: transform,
104 | goodMatch: goodMatch,
105 | landmarks: newTo,
106 | };
107 |
108 | };
109 |
110 | ImageTracker.prototype.trackImage_ = function (patterns, pixels, width, height) {
111 | tracking.Brief.N = descriptorLength;
112 | tracking.Fast.THRESHOLD = fastThreshold;
113 | var transformDataArray = [];
114 | // camera image
115 | var blur2 = tracking.Image.blur(pixels, width, height, blurRadius);
116 | var gray2 = tracking.Image.grayscale(blur2, width, height);
117 | var corners2 = tracking.Fast.findCorners(gray2, width, height);
118 | var descriptors2 = tracking.Brief.getDescriptors(gray2, width, corners2);
119 | var goodIndex = -1;
120 |
121 | // pattern image
122 | for (var i = 0; i < patterns.length; i++) {
123 | var pattern = patterns[i];
124 | // blur
125 | var blur1 = tracking.Image.blur(pattern.pixels, pattern.width, pattern.height, blurRadius);
126 | // grayscale
127 | var gray1 = tracking.Image.grayscale(blur1, pattern.width, pattern.height);
128 | // find corners
129 | var corners1 = tracking.Fast.findCorners(gray1, pattern.width, pattern.height);
130 | // get descriptors
131 | var descriptors1 = tracking.Brief.getDescriptors(gray1, pattern.width, corners1);
132 | // match corners
133 | var matches = tracking.Brief.reciprocalMatch(corners1, descriptors1, corners2, descriptors2);
134 | // calculate transform
135 | var transformData = this.calcTransform(matches,
136 | // scale pattern image
137 | pattern.width,
138 | pattern.height,
139 | // origin pattern image
140 | patterns[0].width,
141 | patterns[0].height);
142 |
143 | // save data
144 | transformDataArray.push(transformData);
145 |
146 | if (transformDataArray[i].goodMatch >= minMatchNumber) {
147 | goodIndex = i;
148 | break;
149 | }
150 | }
151 |
152 | if (goodIndex === -1) {
153 | return {
154 | prediction: null
155 | }
156 | } else {
157 | var properTransform = transformDataArray[goodIndex];
158 | // var properPattern = patterns[goodIndex];
159 | return {
160 | prediction: {
161 | goodMatch: properTransform.goodMatch,
162 | transform: properTransform.transform,
163 | landmarks: transformData.landmarks,
164 | }
165 | };
166 | }
167 |
168 | };
169 |
--------------------------------------------------------------------------------
/package_video_tracker/utils/imageBusiness.js:
--------------------------------------------------------------------------------
1 | const hiddenCanvasId = 'hiddenCanvas';
2 | // pattern image resample levels
3 | const resampleLevels = 4;
4 | // pattern image width
5 | var patternFrameWidth;
6 | // pattern image height
7 | var patternFrameHeight;
8 | // pattern image max width
9 | const patternFrameMaxWidth = 375;
10 | // image tracker.
11 | var tracker = null;
12 | // pattern Image Array
13 | var patternImageArray = [];
14 | // magic number
15 | const sc_inc = Math.sqrt(2.0);
16 | // pattern image url: relative url,temp url and network url.
17 | const patternImageUrl = '../../assets/face_pattern.jpg';
18 |
19 | function detect(frame, width, height, callback) {
20 | if (!tracker) {
21 | console.log('detect:', 'waiting for the tracker initing to complete.');
22 | return;
23 | }
24 | var result = tracker.track(frame, width, height);
25 | if (callback) {
26 | callback(result);
27 | }
28 |
29 | }
30 |
31 | function drawPatternImageCallback(ctx) {
32 | var imageX = 0;
33 | var newWidth = patternFrameWidth;
34 | var newHeight = patternFrameHeight;
35 | // init
36 | patternImageArray = [];
37 |
38 | for (var i = 0; i < resampleLevels; i++) {
39 | var canvasRes = ctx.getImageData(imageX, 0, newWidth, newHeight);
40 |
41 | console.log('resample pattern image', canvasRes.width, canvasRes.height);
42 | patternImageArray.push({
43 | pixels: canvasRes.data,
44 | width: canvasRes.width,
45 | height: canvasRes.height,
46 | });
47 |
48 | // resample
49 | imageX += newWidth;
50 | newWidth = Math.round(newWidth / sc_inc);
51 | newHeight = Math.round(newHeight / sc_inc);
52 | }
53 |
54 | // init ImageTracker
55 | tracker = new ImageTracker(patternImageArray);
56 |
57 | }
58 |
59 | function initTrackerCallback(patternImage, newWidth, newHeight) {
60 | const ctx = document.getElementById(hiddenCanvasId).getContext('2d');
61 |
62 | var imageX = 0;
63 |
64 | for (var i = 0; i < resampleLevels; i++) {
65 | // draw image on canvas
66 | ctx.drawImage(patternImage, imageX, 0, newWidth, newHeight);
67 | // resample
68 | imageX += newWidth;
69 | newWidth = Math.round(newWidth / sc_inc);
70 | newHeight = Math.round(newHeight / sc_inc);
71 | }
72 |
73 | drawPatternImageCallback(ctx);
74 | }
75 |
76 | // get patter image
77 | function initTracker() {
78 | // set pattern image
79 | var patternImage = document.getElementById("patternImage");
80 | patternImage.src = patternImageUrl;
81 | patternImage.addEventListener("load", function () {
82 | // pattern image width
83 | patternFrameWidth = patternImage.width;
84 | // pattern image height
85 | patternFrameHeight = patternImage.height;
86 |
87 | // reduce image size to increase image process speed
88 | if (patternFrameWidth > patternFrameMaxWidth) {
89 | patternFrameWidth = patternFrameMaxWidth;
90 | patternFrameHeight = (patternImage.height / patternImage.width) * patternFrameMaxWidth;
91 | }
92 | // resample width and height
93 | var newWidth = patternFrameWidth;
94 | var newHeight = patternFrameHeight;
95 | initTrackerCallback(patternImage, newWidth, newHeight);
96 |
97 |
98 | });
99 | }
100 |
101 | function getTranslation(td, x, y) {
102 | var m00 = td[0], m01 = td[1], m02 = td[2],
103 | m10 = td[3], m11 = td[4], m12 = td[5],
104 | m20 = td[6], m21 = td[7], m22 = td[8];
105 | var x2 = m00 * x + m01 * y + m02;
106 | var y2 = m10 * x + m11 * y + m12;
107 | var ws = m20 * x + m21 * y + m22;
108 | var sc = 1.0 / ws;
109 | var _x = x2 * sc;
110 | var _y = y2 * sc;
111 |
112 | // console.log('translation', _x, _y);
113 | return { _x, _y };
114 | }
115 |
116 | function updateMaskVideoPosition(prediction,
117 | videoMaskId,
118 | trackPoint,
119 | canvasWidth,
120 | canvasHeight) {
121 | console.log('prediction', prediction)
122 | var t = prediction.transform.data;
123 | var target = getTranslation(t, trackPoint.x, trackPoint.y)
124 | var x = target._x - canvasWidth / 2;
125 | var y = target._y - canvasHeight / 2;
126 | // convert 3x3 to 4x4
127 | var t_array = [t[0], t[3], 0, t[6],
128 | t[1], t[4], 0, t[7],
129 | 0, 0, 1, 0,
130 | x, y, 0, t[8]];
131 | var t_matrix = 'matrix3d(' + t_array.join(',') + ')';
132 |
133 | var videoMask = document.getElementById(videoMaskId);
134 | videoMask.style.transform = t_matrix;
135 | }
136 |
137 | function setMaskVideoDefaultPosition(videoMaskId) {
138 | var videoMask = document.getElementById(videoMaskId);
139 | var t_matrix = 'matrix3d(0.65, 0, 0, 0, 0, 0.65, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1)';
140 | videoMask.style.transform = t_matrix;
141 | }
142 |
143 |
144 | export {
145 | initTracker,
146 | detect,
147 | updateMaskVideoPosition,
148 | setMaskVideoDefaultPosition,
149 | getTranslation,
150 | };
151 |
--------------------------------------------------------------------------------
/screenshot/1-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/screenshot/1-1.jpg
--------------------------------------------------------------------------------
/screenshot/1-2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/screenshot/1-2.jpg
--------------------------------------------------------------------------------
/screenshot/1-3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/screenshot/1-3.jpg
--------------------------------------------------------------------------------
/screenshot/4-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/screenshot/4-1.jpg
--------------------------------------------------------------------------------
/screenshot/4-2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/screenshot/4-2.jpg
--------------------------------------------------------------------------------
/screenshot/5-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/screenshot/5-1.jpg
--------------------------------------------------------------------------------
/screenshot/5-2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/screenshot/5-2.jpg
--------------------------------------------------------------------------------
/screenshot/index.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanyuered/WebAR/4f8da79bb6a05c4710ed4952381f652859bd3f2d/screenshot/index.jpg
--------------------------------------------------------------------------------
/style/app.css:
--------------------------------------------------------------------------------
1 | /**app.wxss**/
2 | .page {
3 | background-color: #F8F8F8;
4 | font-size: 16px;
5 | }
6 | .page__hd {
7 | padding: 40px;
8 | }
9 | .page__bd {
10 | padding-bottom: 40px;
11 | }
12 | .page__bd_spacing {
13 | padding-left: 15px;
14 | padding-right: 15px;
15 | }
16 |
17 | .page__ft{
18 | padding-bottom: 10px;
19 | text-align: center;
20 | }
21 |
22 | .page__title {
23 | text-align: left;
24 | font-size: 20px;
25 | font-weight: 400;
26 | }
27 |
28 | .page__desc {
29 | margin-top: 5px;
30 | color: #888888;
31 | text-align: left;
32 | font-size: 14px;
33 | }
34 |
35 | .marginTop10{
36 | display: block;
37 | margin-top: 10px;
38 | }
--------------------------------------------------------------------------------
/third_party/vue.min.js:
--------------------------------------------------------------------------------
1 | /*!
2 | * Vue.js v2.5.16
3 | * (c) 2014-2018 Evan You
4 | * Released under the MIT License.
5 | */
6 | !function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):e.Vue=t()}(this,function(){"use strict";var y=Object.freeze({});function M(e){return null==e}function D(e){return null!=e}function S(e){return!0===e}function T(e){return"string"==typeof e||"number"==typeof e||"symbol"==typeof e||"boolean"==typeof e}function P(e){return null!==e&&"object"==typeof e}var r=Object.prototype.toString;function l(e){return"[object Object]"===r.call(e)}function i(e){var t=parseFloat(String(e));return 0<=t&&Math.floor(t)===t&&isFinite(e)}function t(e){return null==e?"":"object"==typeof e?JSON.stringify(e,null,2):String(e)}function F(e){var t=parseFloat(e);return isNaN(t)?e:t}function s(e,t){for(var n=Object.create(null),r=e.split(","),i=0;ie.id;)n--;bt.splice(n+1,0,e)}else bt.push(e);Ct||(Ct=!0,Ze(At))}}(this)},St.prototype.run=function(){if(this.active){var e=this.get();if(e!==this.value||P(e)||this.deep){var t=this.value;if(this.value=e,this.user)try{this.cb.call(this.vm,e,t)}catch(e){Fe(e,this.vm,'callback for watcher "'+this.expression+'"')}else this.cb.call(this.vm,e,t)}}},St.prototype.evaluate=function(){this.value=this.get(),this.dirty=!1},St.prototype.depend=function(){for(var e=this.deps.length;e--;)this.deps[e].depend()},St.prototype.teardown=function(){if(this.active){this.vm._isBeingDestroyed||f(this.vm._watchers,this);for(var e=this.deps.length;e--;)this.deps[e].removeSub(this);this.active=!1}};var Tt={enumerable:!0,configurable:!0,get:$,set:$};function Et(e,t,n){Tt.get=function(){return this[t][n]},Tt.set=function(e){this[t][n]=e},Object.defineProperty(e,n,Tt)}function jt(e){e._watchers=[];var t=e.$options;t.props&&function(n,r){var i=n.$options.propsData||{},o=n._props={},a=n.$options._propKeys=[];n.$parent&&ge(!1);var e=function(e){a.push(e);var t=Ie(e,r,i,n);Ce(o,e,t),e in n||Et(n,"_props",e)};for(var t in r)e(t);ge(!0)}(e,t.props),t.methods&&function(e,t){e.$options.props;for(var n in t)e[n]=null==t[n]?$:v(t[n],e)}(e,t.methods),t.data?function(e){var t=e.$options.data;l(t=e._data="function"==typeof t?function(e,t){se();try{return e.call(t,t)}catch(e){return Fe(e,t,"data()"),{}}finally{ce()}}(t,e):t||{})||(t={});var n=Object.keys(t),r=e.$options.props,i=(e.$options.methods,n.length);for(;i--;){var o=n[i];r&&p(r,o)||(void 0,36!==(a=(o+"").charCodeAt(0))&&95!==a&&Et(e,"_data",o))}var a;we(t,!0)}(e):we(e._data={},!0),t.computed&&function(e,t){var n=e._computedWatchers=Object.create(null),r=Y();for(var i in t){var o=t[i],a="function"==typeof o?o:o.get;r||(n[i]=new St(e,a||$,$,Nt)),i in e||Lt(e,i,o)}}(e,t.computed),t.watch&&t.watch!==G&&function(e,t){for(var n in t){var r=t[n];if(Array.isArray(r))for(var i=0;iparseInt(this.max)&&bn(a,s[0],s,this._vnode)),t.data.keepAlive=!0}return t||e&&e[0]}}};$n=hn,Cn={get:function(){return j}},Object.defineProperty($n,"config",Cn),$n.util={warn:re,extend:m,mergeOptions:Ne,defineReactive:Ce},$n.set=xe,$n.delete=ke,$n.nextTick=Ze,$n.options=Object.create(null),k.forEach(function(e){$n.options[e+"s"]=Object.create(null)}),m(($n.options._base=$n).options.components,kn),$n.use=function(e){var t=this._installedPlugins||(this._installedPlugins=[]);if(-1=a&&l()};setTimeout(function(){c\/=]+)(?:\s*(=)\s*(?:"([^"]*)"+|'([^']*)'+|([^\s"'=<>`]+)))?/,oo="[a-zA-Z_][\\w\\-\\.]*",ao="((?:"+oo+"\\:)?"+oo+")",so=new RegExp("^<"+ao),co=/^\s*(\/?)>/,lo=new RegExp("^<\\/"+ao+"[^>]*>"),uo=/^]+>/i,fo=/^",""":'"',"&":"&","
":"\n"," ":"\t"},go=/&(?:lt|gt|quot|amp);/g,_o=/&(?:lt|gt|quot|amp|#10|#9);/g,bo=s("pre,textarea",!0),$o=function(e,t){return e&&bo(e)&&"\n"===t[0]};var wo,Co,xo,ko,Ao,Oo,So,To,Eo=/^@|^v-on:/,jo=/^v-|^@|^:/,No=/([^]*?)\s+(?:in|of)\s+([^]*)/,Lo=/,([^,\}\]]*)(?:,([^,\}\]]*))?$/,Io=/^\(|\)$/g,Mo=/:(.*)$/,Do=/^:|^v-bind:/,Po=/\.[^.]+/g,Fo=e(eo);function Ro(e,t,n){return{type:1,tag:e,attrsList:t,attrsMap:function(e){for(var t={},n=0,r=e.length;n]*>)","i")),n=i.replace(t,function(e,t,n){return r=n.length,ho(o)||"noscript"===o||(t=t.replace(//g,"$1").replace(//g,"$1")),$o(o,t)&&(t=t.slice(1)),d.chars&&d.chars(t),""});a+=i.length-n.length,i=n,A(o,a-r,a)}else{var s=i.indexOf("<");if(0===s){if(fo.test(i)){var c=i.indexOf("--\x3e");if(0<=c){d.shouldKeepComment&&d.comment(i.substring(4,c)),C(c+3);continue}}if(po.test(i)){var l=i.indexOf("]>");if(0<=l){C(l+2);continue}}var u=i.match(uo);if(u){C(u[0].length);continue}var f=i.match(lo);if(f){var p=a;C(f[0].length),A(f[1],p,a);continue}var _=x();if(_){k(_),$o(v,i)&&C(1);continue}}var b=void 0,$=void 0,w=void 0;if(0<=s){for($=i.slice(s);!(lo.test($)||so.test($)||fo.test($)||po.test($)||(w=$.indexOf("<",1))<0);)s+=w,$=i.slice(s);b=i.substring(0,s),C(s)}s<0&&(b=i,i=""),d.chars&&b&&d.chars(b)}if(i===e){d.chars&&d.chars(i);break}}function C(e){a+=e,i=i.substring(e)}function x(){var e=i.match(so);if(e){var t,n,r={tagName:e[1],attrs:[],start:a};for(C(e[0].length);!(t=i.match(co))&&(n=i.match(io));)C(n[0].length),r.attrs.push(n);if(t)return r.unarySlash=t[1],C(t[0].length),r.end=a,r}}function k(e){var t=e.tagName,n=e.unarySlash;m&&("p"===v&&ro(t)&&A(v),g(t)&&v===t&&A(t));for(var r,i,o,a=y(t)||!!n,s=e.attrs.length,c=new Array(s),l=0;l-1"+("true"===d?":("+l+")":":_q("+l+","+d+")")),Ar(c,"change","var $$a="+l+",$$el=$event.target,$$c=$$el.checked?("+d+"):("+v+");if(Array.isArray($$a)){var $$v="+(f?"_n("+p+")":p)+",$$i=_i($$a,$$v);if($$el.checked){$$i<0&&("+Er(l,"$$a.concat([$$v])")+")}else{$$i>-1&&("+Er(l,"$$a.slice(0,$$i).concat($$a.slice($$i+1))")+")}}else{"+Er(l,"$$c")+"}",null,!0);else if("input"===$&&"radio"===w)r=e,i=_,a=(o=b)&&o.number,s=Or(r,"value")||"null",Cr(r,"checked","_q("+i+","+(s=a?"_n("+s+")":s)+")"),Ar(r,"change",Er(i,s),null,!0);else if("input"===$||"textarea"===$)!function(e,t,n){var r=e.attrsMap.type,i=n||{},o=i.lazy,a=i.number,s=i.trim,c=!o&&"range"!==r,l=o?"change":"range"===r?Pr:"input",u="$event.target.value";s&&(u="$event.target.value.trim()"),a&&(u="_n("+u+")");var f=Er(t,u);c&&(f="if($event.target.composing)return;"+f),Cr(e,"value","("+t+")"),Ar(e,l,f,null,!0),(s||a)&&Ar(e,"blur","$forceUpdate()")}(e,_,b);else if(!j.isReservedTag($))return Tr(e,_,b),!1;return!0},text:function(e,t){t.value&&Cr(e,"textContent","_s("+t.value+")")},html:function(e,t){t.value&&Cr(e,"innerHTML","_s("+t.value+")")}},isPreTag:function(e){return"pre"===e},isUnaryTag:to,mustUseProp:Sn,canBeLeftOpenTag:no,isReservedTag:Un,getTagNamespace:Vn,staticKeys:(Go=Wo,Go.reduce(function(e,t){return e.concat(t.staticKeys||[])},[]).join(","))},Qo=e(function(e){return s("type,tag,attrsList,attrsMap,plain,parent,children,attrs"+(e?","+e:""))});function ea(e,t){e&&(Zo=Qo(t.staticKeys||""),Xo=t.isReservedTag||O,function e(t){t.static=function(e){if(2===e.type)return!1;if(3===e.type)return!0;return!(!e.pre&&(e.hasBindings||e.if||e.for||c(e.tag)||!Xo(e.tag)||function(e){for(;e.parent;){if("template"!==(e=e.parent).tag)return!1;if(e.for)return!0}return!1}(e)||!Object.keys(e).every(Zo)))}(t);if(1===t.type){if(!Xo(t.tag)&&"slot"!==t.tag&&null==t.attrsMap["inline-template"])return;for(var n=0,r=t.children.length;n|^function\s*\(/,na=/^[A-Za-z_$][\w$]*(?:\.[A-Za-z_$][\w$]*|\['[^']*?']|\["[^"]*?"]|\[\d+]|\[[A-Za-z_$][\w$]*])*$/,ra={esc:27,tab:9,enter:13,space:32,up:38,left:37,right:39,down:40,delete:[8,46]},ia={esc:"Escape",tab:"Tab",enter:"Enter",space:" ",up:["Up","ArrowUp"],left:["Left","ArrowLeft"],right:["Right","ArrowRight"],down:["Down","ArrowDown"],delete:["Backspace","Delete"]},oa=function(e){return"if("+e+")return null;"},aa={stop:"$event.stopPropagation();",prevent:"$event.preventDefault();",self:oa("$event.target !== $event.currentTarget"),ctrl:oa("!$event.ctrlKey"),shift:oa("!$event.shiftKey"),alt:oa("!$event.altKey"),meta:oa("!$event.metaKey"),left:oa("'button' in $event && $event.button !== 0"),middle:oa("'button' in $event && $event.button !== 1"),right:oa("'button' in $event && $event.button !== 2")};function sa(e,t,n){var r=t?"nativeOn:{":"on:{";for(var i in e)r+='"'+i+'":'+ca(i,e[i])+",";return r.slice(0,-1)+"}"}function ca(t,e){if(!e)return"function(){}";if(Array.isArray(e))return"["+e.map(function(e){return ca(t,e)}).join(",")+"]";var n=na.test(e.value),r=ta.test(e.value);if(e.modifiers){var i="",o="",a=[];for(var s in e.modifiers)if(aa[s])o+=aa[s],ra[s]&&a.push(s);else if("exact"===s){var c=e.modifiers;o+=oa(["ctrl","shift","alt","meta"].filter(function(e){return!c[e]}).map(function(e){return"$event."+e+"Key"}).join("||"))}else a.push(s);return a.length&&(i+="if(!('button' in $event)&&"+a.map(la).join("&&")+")return null;"),o&&(i+=o),"function($event){"+i+(n?"return "+e.value+"($event)":r?"return ("+e.value+")($event)":e.value)+"}"}return n||r?e.value:"function($event){"+e.value+"}"}function la(e){var t=parseInt(e,10);if(t)return"$event.keyCode!=="+t;var n=ra[e],r=ia[e];return"_k($event.keyCode,"+JSON.stringify(e)+","+JSON.stringify(n)+",$event.key,"+JSON.stringify(r)+")"}var ua={on:function(e,t){e.wrapListeners=function(e){return"_g("+e+","+t.value+")"}},bind:function(t,n){t.wrapData=function(e){return"_b("+e+",'"+t.tag+"',"+n.value+","+(n.modifiers&&n.modifiers.prop?"true":"false")+(n.modifiers&&n.modifiers.sync?",true":"")+")"}},cloak:$},fa=function(e){this.options=e,this.warn=e.warn||$r,this.transforms=wr(e.modules,"transformCode"),this.dataGenFns=wr(e.modules,"genData"),this.directives=m(m({},ua),e.directives);var t=e.isReservedTag||O;this.maybeComponent=function(e){return!t(e.tag)},this.onceId=0,this.staticRenderFns=[]};function pa(e,t){var n=new fa(t);return{render:"with(this){return "+(e?da(e,n):'_c("div")')+"}",staticRenderFns:n.staticRenderFns}}function da(e,t){if(e.staticRoot&&!e.staticProcessed)return va(e,t);if(e.once&&!e.onceProcessed)return ha(e,t);if(e.for&&!e.forProcessed)return f=t,v=(u=e).for,h=u.alias,m=u.iterator1?","+u.iterator1:"",y=u.iterator2?","+u.iterator2:"",u.forProcessed=!0,(d||"_l")+"(("+v+"),function("+h+m+y+"){return "+(p||da)(u,f)+"})";if(e.if&&!e.ifProcessed)return ma(e,t);if("template"!==e.tag||e.slotTarget){if("slot"===e.tag)return function(e,t){var n=e.slotName||'"default"',r=_a(e,t),i="_t("+n+(r?","+r:""),o=e.attrs&&"{"+e.attrs.map(function(e){return g(e.name)+":"+e.value}).join(",")+"}",a=e.attrsMap["v-bind"];!o&&!a||r||(i+=",null");o&&(i+=","+o);a&&(i+=(o?"":",null")+","+a);return i+")"}(e,t);var n;if(e.component)a=e.component,c=t,l=(s=e).inlineTemplate?null:_a(s,c,!0),n="_c("+a+","+ya(s,c)+(l?","+l:"")+")";else{var r=e.plain?void 0:ya(e,t),i=e.inlineTemplate?null:_a(e,t,!0);n="_c('"+e.tag+"'"+(r?","+r:"")+(i?","+i:"")+")"}for(var o=0;o ':'
',0