├── .gitignore
├── Gruntfile.js
├── README.md
├── headtrackr.js
├── headtrackr.min.js
├── package.json
└── src
├── camshift.js
├── cascade.js
├── ccv.js
├── controllers.js
├── facetrackr.js
├── footer.js.txt
├── header.js.txt
├── headposition.js
├── license.js
├── main.js
├── smoother.js
├── ui.js
└── whitebalance.js
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/
2 |
--------------------------------------------------------------------------------
/Gruntfile.js:
--------------------------------------------------------------------------------
1 | module.exports = function(grunt) {
2 | grunt.loadNpmTasks('grunt-contrib-concat');
3 | grunt.loadNpmTasks('grunt-contrib-uglify');
4 |
5 | grunt.initConfig({
6 | concat: {
7 | dist: {
8 | src: [
9 | 'src/license.js',
10 | 'src/header.js.txt',
11 | 'src/main.js',
12 | 'src/ccv.js',
13 | 'src/cascade.js',
14 | 'src/whitebalance.js',
15 | 'src/smoother.js',
16 | 'src/camshift.js',
17 | 'src/facetrackr.js',
18 | 'src/ui.js',
19 | 'src/headposition.js',
20 | 'src/controllers.js',
21 | 'src/footer.js.txt'],
22 | dest: './headtrackr.js'
23 | }
24 | },
25 | uglify: {
26 | options: {
27 | report: 'gzip',
28 | preserveComments: 'false',
29 | mangle: {
30 | except: ['headtrackr']
31 | }
32 | },
33 | dist: {
34 | src: ['./headtrackr.js'],
35 | dest: './headtrackr.min.js'
36 | }
37 | }
38 | });
39 |
40 | // Default task.
41 | grunt.registerTask('default', [
42 | 'concat',
43 | 'uglify'
44 | ]);
45 | };
46 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | headtrackr
2 | ==========
3 |
4 | **headtrackr** is a javascript library for real-time *face tracking* and *head tracking*, tracking the position of a users head in relation to the computer screen, via a web camera and the [webRTC](http://www.webrtc.org/)/[getUserMedia](http://dev.w3.org/2011/webrtc/editor/getusermedia.html) standard.
5 |
6 | For a demonstration see [this video](https://vimeo.com/44049736) or try out some of the examples with a laptop that has a camera and a browser that has camera webRTC/getUserMedia support. For an overview of browsers supporting the getUserMedia standard see [http://caniuse.com/stream](http://caniuse.com/stream).
7 |
8 | [Reference](http://auduno.github.com/headtrackr/documentation/reference.html) - [Overview](http://auduno.tumblr.com/post/25125149521/head-tracking-with-webrtc)
9 |
10 | ### Examples ###
11 |
12 | [](http://auduno.github.com/headtrackr/examples/facetracking.html)
13 | [](http://auduno.github.com/headtrackr/examples/sprites_canvas.html)
14 | [](http://www.shinydemos.com/facekat/)
15 | [](http://auduno.github.com/headtrackr/examples/targets.html)
16 |
17 | ### Usage ###
18 |
19 | Download the minified library [headtrackr.js](https://github.com/auduno/headtrackr/raw/master/headtrackr.js) and include it in your webpage.
20 |
21 | ```html
22 |
23 | ```
24 |
25 | The following code initiates the headtrackr with a video element which will be used for the mediastream, and a canvas element we will copy the videoframes to.
26 |
27 | ```html
28 |
29 |
30 |
38 | ```
39 |
40 | When the headtracker is started, this will now regularly generate the events *headtrackingEvent* and *facetrackingEvent* on the document. The event *headtrackingEvent* has the attributes *x*, *y*, *z*, which tells us the estimated position of the users head in relation to the center of the screen, in centimeters. The event *facetrackingEvent* has the attributes *x*, *y*, *width*, *height* and *angle*, which tell us the estimated position and size of the face on the video.
41 |
42 | You can now either create an eventlistener to handle these events somehow, or, if you're using [three.js](https://github.com/mrdoob/three.js/), try to use one of the pre-packaged controllers in this library to create pseudo-3D, aka [head-coupled perspective](http://en.wikipedia.org/wiki/Head-coupled_perspective) effects.
43 |
44 | To get some more idea about usage look at the source code for the examples above, [this overview](http://auduno.tumblr.com/post/25125149521/head-tracking-with-webrtc), or [the reference](http://auduno.github.com/headtrackr/documentation/reference.html).
45 |
46 | ### Projects that have used headtrackr ###
47 |
48 | [](http://movembergames.com)
49 | [](https://github.com/alexhancock/street-facing)
50 | [](http://webdesign.maratz.com/lab/responsivetypography/realtime/)
51 | [](http://nicolas-beauvais.com/Snake/)
52 |
53 | ### Building from source ###
54 |
55 | Make sure you have [grunt](http://gruntjs.com/) and [node](http://nodejs.org/download/) installed.
56 | To install the development dependencies run ```npm install``` and to build it run ```grunt``` in the root directory.
57 |
58 | ### License ###
59 |
60 | Headtrackr.js is distributed under the [MIT License](http://www.opensource.org/licenses/MIT), and includes some code bits (courtesy [Liu Liu](https://github.com/liuliu) and Benjamin Jung) that are under the [BSD-3 License](http://www.opensource.org/licenses/BSD-3-Clause) and the [MIT License](http://www.opensource.org/licenses/MIT) respectively.
61 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "headtrackr",
3 | "description": "headtracking via webcam and WebRTC/getUserMedia",
4 | "keywords": ["webcam", "headtracking", "computer vision", "facetracking"],
5 | "version": "1.0.2",
6 | "author": "Audun Mathias Øygard ",
7 | "repository": {
8 | "type": "git",
9 | "url": "https://github.com/auduno/headtrackr.git"
10 | },
11 | "bugs": "https://github.com/auduno/headtrackr/issues",
12 | "main": "headtrackr.js",
13 | "devDependencies": {
14 | "grunt": "~0.4.2",
15 | "grunt-contrib-concat": "~0.3.0",
16 | "grunt-contrib-uglify": "~0.2.7"
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/src/camshift.js:
--------------------------------------------------------------------------------
1 | /**
2 | * camshift object tracker
3 | *
4 | * ported with some optimizations from actionscript3 library FaceIt:
5 | * http://www.mukimuki.fr/flashblog/2009/06/18/camshift-going-to-the-source/
6 | * http://www.libspark.org/browser/as3/FaceIt
7 | * some explanation of algorithm here :
8 | * http://www.cognotics.com/opencv/servo_2007_series/part_3/sidebar.html
9 | *
10 | * usage:
11 | * // create a new tracker
12 | * var cstracker = new headtrackr.camshift.Tracker();
13 | * // initialize it with a canvas, and a rectangle around the object on the canvas we'd like to track
14 | * cstracker.initTracker(some_canvas, new headtrackr.camshift.Rectangle(x,y,w,h));
15 | * // find object in same or some other canvas
16 | * cstracker.track(some_canvas);
17 | * // get position of found object
18 | * var currentPos = cstracker.getTrackObj();
19 | * currentPos.x // x-coordinate of center of object on canvas
20 | * currentPos.y // y-coordinate of center of object on canvas
21 | * currentPos.width // width of object
22 | * currentPos.height // heigh of object
23 | * currentPos.angle // angle of object in radians
24 | *
25 | * @author Benjamin Jung / jungbenj@gmail.com
26 | * @author auduno / github.com/auduno
27 | *
28 | * License of original actionscript code:
29 | *
30 | * Copyright (C)2009 Benjamin Jung
31 | *
32 | * Licensed under the MIT License
33 | *
34 | * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
35 | *
36 | * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
37 | *
38 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 | *
40 | */
41 |
42 | headtrackr.camshift = {};
43 |
44 | /**
45 | * RGB histogram
46 | *
47 | * @constructor
48 | */
49 | headtrackr.camshift.Histogram = function(imgdata) {
50 |
51 | this.size = 4096;
52 |
53 | var bins = [];
54 | var i, x, r, g, b, il;
55 |
56 | //initialize bins
57 | for (i = 0; i < this.size; i++) {
58 | bins.push(0);
59 | }
60 |
61 | //add histogram data
62 | for (x = 0, il = imgdata.length;x < il; x += 4) {
63 | r = imgdata[x+0] >> 4; // round down to bins of 16
64 | g = imgdata[x+1] >> 4;
65 | b = imgdata[x+2] >> 4;
66 | bins[256 * r + 16 * g + b] += 1;
67 | }
68 |
69 | this.getBin = function( index ) {
70 | return bins[index];
71 | }
72 | };
73 |
74 | /**
75 | * moments object
76 | *
77 | * @constructor
78 | */
79 | headtrackr.camshift.Moments = function(data, x, y, w, h, second) {
80 |
81 | this.m00 = 0;
82 | this.m01 = 0;
83 | this.m10 = 0;
84 | this.m11 = 0;
85 | this.m02 = 0;
86 | this.m20 = 0;
87 |
88 | var i, j, val, vx, vy;
89 | var a = [];
90 | for (i = x; i < w; i++) {
91 | a = data[i];
92 | vx = i-x;
93 |
94 | for (j = y; j < h; j++) {
95 | val = a[j];
96 |
97 | vy = j-y;
98 | this.m00 += val;
99 | this.m01 += vy * val;
100 | this.m10 += vx * val;
101 | if (second) {
102 | this.m11 += vx * vy * val;
103 | this.m02 += vy * vy * val;
104 | this.m20 += vx * vx * val;
105 | }
106 | }
107 | }
108 |
109 | this.invM00 = 1 / this.m00;
110 | this.xc = this.m10 * this.invM00;
111 | this.yc = this.m01 * this.invM00;
112 | this.mu00 = this.m00;
113 | this.mu01 = 0;
114 | this.mu10 = 0;
115 | if (second) {
116 | this.mu20 = this.m20 - this.m10 * this.xc;
117 | this.mu02 = this.m02 - this.m01 * this.yc;
118 | this.mu11 = this.m11 - this.m01 * this.xc;
119 | }
120 | };
121 |
122 | /**
123 | * rectangle object
124 | *
125 | * @constructor
126 | */
127 | headtrackr.camshift.Rectangle = function(x,y,w,h) {
128 | this.x = x;
129 | this.y = y;
130 | this.width = w;
131 | this.height = h;
132 |
133 | this.clone = function() {
134 | var c = new headtrackr.camshift.Rectangle();
135 | c.height = this.height;
136 | c.width = this.width;
137 | c.x = this.x;
138 | c.y = this.y;
139 | return c;
140 | }
141 | };
142 |
143 | /**
144 | * Tracker object
145 | *
146 | * @constructor
147 | */
148 | headtrackr.camshift.Tracker = function(params) {
149 |
150 | if (params === undefined) params = {};
151 | if (params.calcAngles === undefined) params.calcAngles = true;
152 |
153 | var _modelHist,
154 | _curHist, //current histogram
155 | _pdf, // pixel probability data for current searchwindow
156 | _searchWindow, // rectangle where we are searching
157 | _trackObj, // object holding data about where current tracked object is
158 | _canvasCtx, // canvas context for initial canvas
159 | _canvasw, // canvas width for tracking canvas
160 | _canvash; // canvas height for tracking canvas
161 |
162 | this.getSearchWindow = function() {
163 | // return the search window used by the camshift algorithm in the current analysed image
164 | return _searchWindow.clone();
165 | }
166 |
167 | this.getTrackObj = function() {
168 | // return a trackobj with the size and orientation of the tracked object in the current analysed image
169 | return _trackObj.clone();
170 | }
171 |
172 | this.getPdf = function() {
173 | // returns a nested array representing color
174 | return _pdf;
175 | }
176 |
177 | this.getBackProjectionImg = function() {
178 | // return imgData representing pixel color probabilities, which can then be put into canvas
179 | var weights = _pdf;
180 | var w = _canvasw;
181 | var h = _canvash;
182 | var img = _canvasCtx.createImageData(w, h);
183 | var imgData = img.data;
184 | var x, y, val;
185 | for (x = 0; x < w; x++) {
186 | for (y = 0; y < h; y++) {
187 | val = Math.floor(255 * weights[x][y]);
188 | pos = ((y*w)+x)*4;
189 | imgData[pos] = val;
190 | imgData[pos+1] = val;
191 | imgData[pos+2] = val;
192 | imgData[pos+3] = 255;
193 | }
194 | }
195 | return img;
196 | }
197 |
198 | this.initTracker = function(canvas, trackedArea) {
199 | // initialize the tracker with canvas and the area of interest as a rectangle
200 |
201 | _canvasCtx = canvas.getContext("2d");
202 | var taw = trackedArea.width;
203 | var tah = trackedArea.height;
204 | var tax = trackedArea.x;
205 | var tay = trackedArea.y;
206 | var trackedImg = _canvasCtx.getImageData(tax, tay, taw, tah);
207 |
208 | _modelHist = new headtrackr.camshift.Histogram(trackedImg.data);
209 | _searchWindow = trackedArea.clone();
210 | _trackObj = new headtrackr.camshift.TrackObj();
211 | }
212 |
213 | this.track = function(canvas) {
214 | // search the tracked object by camshift
215 | var canvasCtx = canvas.getContext("2d");
216 | _canvash = canvas.height;
217 | _canvasw = canvas.width;
218 | var imgData = canvasCtx.getImageData(0, 0, canvas.width, canvas.height);
219 | if (imgData.width != 0 && imgData.height != 0) camShift(imgData);
220 | }
221 |
222 | function camShift(frame) {
223 |
224 | var w = frame.width;
225 | var h = frame.height;
226 |
227 | // search location
228 | var m = meanShift(frame);
229 |
230 | var a = m.mu20 * m.invM00;
231 | var c = m.mu02 * m.invM00;
232 |
233 | if (params.calcAngles) {
234 | // use moments to find size and orientation
235 | var b = m.mu11 * m.invM00;
236 | var d = a + c;
237 | var e = Math.sqrt((4*b * b) + ((a - c) * (a - c)));
238 |
239 | // update object position
240 | _trackObj.width = Math.sqrt((d - e)*0.5) << 2;
241 | _trackObj.height = Math.sqrt((d + e)*0.5) << 2;
242 | _trackObj.angle = Math.atan2(2 * b, a - c + e);
243 |
244 | // to have a positive counter clockwise angle
245 | if (_trackObj.angle < 0) _trackObj.angle = _trackObj.angle + Math.PI;
246 | } else {
247 | _trackObj.width = Math.sqrt(a) << 2;
248 | _trackObj.height = Math.sqrt(c) << 2;
249 | _trackObj.angle = Math.PI/2;
250 | }
251 |
252 | // check if tracked object is into the limit
253 | _trackObj.x = Math.floor(Math.max(0, Math.min(_searchWindow.x + _searchWindow.width/2, w)));
254 | _trackObj.y = Math.floor(Math.max(0, Math.min(_searchWindow.y + _searchWindow.height/2, h)));
255 |
256 | // new search window size
257 | _searchWindow.width = Math.floor(1.1 * _trackObj.width);
258 | _searchWindow.height = Math.floor(1.1 * _trackObj.height);
259 | }
260 |
261 | function meanShift(frame) {
262 | // mean-shift algorithm on frame
263 |
264 | var w = frame.width;
265 | var h = frame.height;
266 | var imgData = frame.data;
267 |
268 | var curHist = new headtrackr.camshift.Histogram(imgData);
269 |
270 | var weights = getWeights(_modelHist, curHist);
271 |
272 | // Color probabilities distributions
273 | _pdf = getBackProjectionData(imgData, frame.width, frame.height, weights);
274 |
275 | var m, x, y, i, wadx, wady, wadw, wadh;
276 |
277 | var meanShiftIterations = 10; // maximum number of iterations
278 |
279 | // store initial searchwindow
280 | var prevx = _searchWindow.x;
281 | var prevy = _searchWindow.y;
282 |
283 | // Locate by iteration the maximum of density into the probability distributions
284 | for (i = 0;i < meanShiftIterations; i++) {
285 | // get searchwindow from _pdf:
286 | wadx = Math.max(_searchWindow.x,0);
287 | wady = Math.max(_searchWindow.y,0);
288 | wadw = Math.min(wadx + _searchWindow.width,w);
289 | wadh = Math.min(wady + _searchWindow.height,h);
290 |
291 | m = new headtrackr.camshift.Moments(_pdf, wadx, wady, wadw, wadh, (i == meanShiftIterations -1));
292 | x = m.xc;
293 | y = m.yc;
294 |
295 | _searchWindow.x += ((x - _searchWindow.width/2) >> 0);
296 | _searchWindow.y += ((y - _searchWindow.height/2) >> 0);
297 |
298 | // if we have reached maximum density, get second moments and stop iterations
299 | if (_searchWindow.x == prevx && _searchWindow.y == prevy) {
300 | m = new headtrackr.camshift.Moments(_pdf, wadx, wady, wadw, wadh, true);
301 | break;
302 | } else {
303 | prevx = _searchWindow.x;
304 | prevy = _searchWindow.y;
305 | }
306 | }
307 |
308 | _searchWindow.x = Math.max(0, Math.min(_searchWindow.x, w));
309 | _searchWindow.y = Math.max(0, Math.min(_searchWindow.y, h));
310 |
311 | return m;
312 | }
313 |
314 | function getWeights(mh, ch) {
315 | // Return an array of the probabilities of each histogram color bins
316 | var weights = [];
317 | var p;
318 |
319 | // iterate over the entire histogram and compare
320 | for (var i = 0; i < 4096; i++) {
321 | if (ch.getBin(i) != 0) {
322 | p = Math.min(mh.getBin(i)/ch.getBin(i), 1);
323 | } else {
324 | p = 0;
325 | }
326 | weights.push(p);
327 | }
328 |
329 | return weights;
330 | }
331 |
332 | function getBackProjectionData(imgData, idw, idh, weights, hsMap) {
333 | // Return a matrix representing pixel color probabilities
334 | var data = [];
335 | var x,y,r,g,b,pos;
336 | var a = [];
337 |
338 | // TODO : we could use typed arrays here
339 | // but we should then do a compatibilitycheck
340 |
341 | for (x = 0; x < idw; x++) {
342 | a = [];
343 | for (y = 0; y < idh; y++) {
344 | pos = ((y*idw)+x)*4;
345 | r = imgData[pos] >> 4;
346 | g = imgData[pos+1] >> 4;
347 | b = imgData[pos+2] >> 4;
348 | a.push(weights[256 * r + 16 * g + b]);
349 | }
350 | data[x] = a;
351 | }
352 | return data;
353 | }
354 | };
355 |
356 | /**
357 | * Object returned by tracker
358 | * note that x,y is the point of the center of the tracker
359 | *
360 | * @constructor
361 | */
362 | headtrackr.camshift.TrackObj = function() {
363 | this.height = 0;
364 | this.width = 0;
365 | this.angle = 0;
366 | this.x = 0;
367 | this.y = 0;
368 |
369 | this.clone = function() {
370 | var c = new headtrackr.camshift.TrackObj();
371 | c.height = this.height;
372 | c.width = this.width;
373 | c.angle = this.angle;
374 | c.x = this.x;
375 | c.y = this.y;
376 | return c;
377 | }
378 | };
--------------------------------------------------------------------------------
/src/ccv.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Viola-Jones-like face detection algorithm
3 | * Some explanation here: http://liuliu.me/eyes/javascript-face-detection-explained/
4 | *
5 | * @author Liu Liu / github.com/liuliu
6 | *
7 | * Copyright (c) 2010, Liu Liu
8 | * All rights reserved.
9 | *
10 | * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
11 | *
12 | * * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
13 | * * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
14 | * * Neither the name of the authors nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
15 | *
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
17 | *
18 | */
19 |
20 | headtrackr.ccv = {};
21 |
22 | headtrackr.ccv.grayscale = function (canvas) {
23 | /* detect_objects requires gray-scale image */
24 | var ctx = canvas.getContext("2d");
25 | var imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
26 | var data = imageData.data;
27 | var pix1, pix2, pix = canvas.width * canvas.height * 4;
28 | while (pix > 0)
29 | data[pix -= 4] = data[pix1 = pix + 1] = data[pix2 = pix + 2] = (data[pix] * 0.3 + data[pix1] * 0.59 + data[pix2] * 0.11);
30 | ctx.putImageData(imageData, 0, 0);
31 | return canvas;
32 | };
33 |
34 | headtrackr.ccv.array_group = function (seq, gfunc) {
35 | var i, j;
36 | var node = new Array(seq.length);
37 | for (i = 0; i < seq.length; i++)
38 | node[i] = {"parent" : -1,
39 | "element" : seq[i],
40 | "rank" : 0};
41 | for (i = 0; i < seq.length; i++)
42 | {
43 | if (!node[i].element)
44 | continue;
45 | var root = i;
46 | while (node[root].parent != -1)
47 | root = node[root].parent;
48 | for (j = 0; j < seq.length; j++)
49 | {
50 | if( i != j && node[j].element && gfunc(node[i].element, node[j].element))
51 | {
52 | var root2 = j;
53 |
54 | while (node[root2].parent != -1)
55 | root2 = node[root2].parent;
56 |
57 | if(root2 != root)
58 | {
59 | if(node[root].rank > node[root2].rank)
60 | node[root2].parent = root;
61 | else
62 | {
63 | node[root].parent = root2;
64 | if (node[root].rank == node[root2].rank)
65 | node[root2].rank++;
66 | root = root2;
67 | }
68 |
69 | /* compress path from node2 to the root: */
70 | var temp, node2 = j;
71 | while (node[node2].parent != -1)
72 | {
73 | temp = node2;
74 | node2 = node[node2].parent;
75 | node[temp].parent = root;
76 | }
77 |
78 | /* compress path from node to the root: */
79 | node2 = i;
80 | while (node[node2].parent != -1)
81 | {
82 | temp = node2;
83 | node2 = node[node2].parent;
84 | node[temp].parent = root;
85 | }
86 | }
87 | }
88 | }
89 | }
90 | var idx = new Array(seq.length);
91 | var class_idx = 0;
92 | for(i = 0; i < seq.length; i++)
93 | {
94 | j = -1;
95 | var node1 = i;
96 | if(node[node1].element)
97 | {
98 | while (node[node1].parent != -1)
99 | node1 = node[node1].parent;
100 | if(node[node1].rank >= 0)
101 | node[node1].rank = ~class_idx++;
102 | j = ~node[node1].rank;
103 | }
104 | idx[i] = j;
105 | }
106 | return {"index" : idx, "cat" : class_idx};
107 | };
108 |
109 | headtrackr.ccv.detect_objects = function (canvas, cascade, interval, min_neighbors) {
110 | var scale = Math.pow(2, 1 / (interval + 1));
111 | var next = interval + 1;
112 | var scale_upto = Math.floor(Math.log(Math.min(cascade.width, cascade.height)) / Math.log(scale));
113 | var pyr = new Array((scale_upto + next * 2) * 4);
114 | pyr[0] = canvas;
115 | pyr[0].data = pyr[0].getContext("2d").getImageData(0, 0, pyr[0].width, pyr[0].height).data;
116 | var i, j, k, x, y, q;
117 | for (i = 1; i <= interval; i++) {
118 | pyr[i * 4] = document.createElement("canvas");
119 | pyr[i * 4].width = Math.floor(pyr[0].width / Math.pow(scale, i));
120 | pyr[i * 4].height = Math.floor(pyr[0].height / Math.pow(scale, i));
121 | pyr[i * 4].getContext("2d").drawImage(pyr[0], 0, 0, pyr[0].width, pyr[0].height, 0, 0, pyr[i * 4].width, pyr[i * 4].height);
122 | pyr[i * 4].data = pyr[i * 4].getContext("2d").getImageData(0, 0, pyr[i * 4].width, pyr[i * 4].height).data;
123 | }
124 | for (i = next; i < scale_upto + next * 2; i++) {
125 | pyr[i * 4] = document.createElement("canvas");
126 | pyr[i * 4].width = Math.floor(pyr[i * 4 - next * 4].width / 2);
127 | pyr[i * 4].height = Math.floor(pyr[i * 4 - next * 4].height / 2);
128 | pyr[i * 4].getContext("2d").drawImage(pyr[i * 4 - next * 4], 0, 0, pyr[i * 4 - next * 4].width, pyr[i * 4 - next * 4].height, 0, 0, pyr[i * 4].width, pyr[i * 4].height);
129 | pyr[i * 4].data = pyr[i * 4].getContext("2d").getImageData(0, 0, pyr[i * 4].width, pyr[i * 4].height).data;
130 | }
131 | for (i = next * 2; i < scale_upto + next * 2; i++) {
132 | pyr[i * 4 + 1] = document.createElement("canvas");
133 | pyr[i * 4 + 1].width = Math.floor(pyr[i * 4 - next * 4].width / 2);
134 | pyr[i * 4 + 1].height = Math.floor(pyr[i * 4 - next * 4].height / 2);
135 | pyr[i * 4 + 1].getContext("2d").drawImage(pyr[i * 4 - next * 4], 1, 0, pyr[i * 4 - next * 4].width - 1, pyr[i * 4 - next * 4].height, 0, 0, pyr[i * 4 + 1].width - 2, pyr[i * 4 + 1].height);
136 | pyr[i * 4 + 1].data = pyr[i * 4 + 1].getContext("2d").getImageData(0, 0, pyr[i * 4 + 1].width, pyr[i * 4 + 1].height).data;
137 | pyr[i * 4 + 2] = document.createElement("canvas");
138 | pyr[i * 4 + 2].width = Math.floor(pyr[i * 4 - next * 4].width / 2);
139 | pyr[i * 4 + 2].height = Math.floor(pyr[i * 4 - next * 4].height / 2);
140 | pyr[i * 4 + 2].getContext("2d").drawImage(pyr[i * 4 - next * 4], 0, 1, pyr[i * 4 - next * 4].width, pyr[i * 4 - next * 4].height - 1, 0, 0, pyr[i * 4 + 2].width, pyr[i * 4 + 2].height - 2);
141 | pyr[i * 4 + 2].data = pyr[i * 4 + 2].getContext("2d").getImageData(0, 0, pyr[i * 4 + 2].width, pyr[i * 4 + 2].height).data;
142 | pyr[i * 4 + 3] = document.createElement("canvas");
143 | pyr[i * 4 + 3].width = Math.floor(pyr[i * 4 - next * 4].width / 2);
144 | pyr[i * 4 + 3].height = Math.floor(pyr[i * 4 - next * 4].height / 2);
145 | pyr[i * 4 + 3].getContext("2d").drawImage(pyr[i * 4 - next * 4], 1, 1, pyr[i * 4 - next * 4].width - 1, pyr[i * 4 - next * 4].height - 1, 0, 0, pyr[i * 4 + 3].width - 2, pyr[i * 4 + 3].height - 2);
146 | pyr[i * 4 + 3].data = pyr[i * 4 + 3].getContext("2d").getImageData(0, 0, pyr[i * 4 + 3].width, pyr[i * 4 + 3].height).data;
147 | }
148 | for (j = 0; j < cascade.stage_classifier.length; j++)
149 | cascade.stage_classifier[j].orig_feature = cascade.stage_classifier[j].feature;
150 | var scale_x = 1, scale_y = 1;
151 | var dx = [0, 1, 0, 1];
152 | var dy = [0, 0, 1, 1];
153 | var seq = [];
154 | for (i = 0; i < scale_upto; i++) {
155 | var qw = pyr[i * 4 + next * 8].width - Math.floor(cascade.width / 4);
156 | var qh = pyr[i * 4 + next * 8].height - Math.floor(cascade.height / 4);
157 | var step = [pyr[i * 4].width * 4, pyr[i * 4 + next * 4].width * 4, pyr[i * 4 + next * 8].width * 4];
158 | var paddings = [pyr[i * 4].width * 16 - qw * 16,
159 | pyr[i * 4 + next * 4].width * 8 - qw * 8,
160 | pyr[i * 4 + next * 8].width * 4 - qw * 4];
161 | for (j = 0; j < cascade.stage_classifier.length; j++) {
162 | var orig_feature = cascade.stage_classifier[j].orig_feature;
163 | var feature = cascade.stage_classifier[j].feature = new Array(cascade.stage_classifier[j].count);
164 | for (k = 0; k < cascade.stage_classifier[j].count; k++) {
165 | feature[k] = {"size" : orig_feature[k].size,
166 | "px" : new Array(orig_feature[k].size),
167 | "pz" : new Array(orig_feature[k].size),
168 | "nx" : new Array(orig_feature[k].size),
169 | "nz" : new Array(orig_feature[k].size)};
170 | for (q = 0; q < orig_feature[k].size; q++) {
171 | feature[k].px[q] = orig_feature[k].px[q] * 4 + orig_feature[k].py[q] * step[orig_feature[k].pz[q]];
172 | feature[k].pz[q] = orig_feature[k].pz[q];
173 | feature[k].nx[q] = orig_feature[k].nx[q] * 4 + orig_feature[k].ny[q] * step[orig_feature[k].nz[q]];
174 | feature[k].nz[q] = orig_feature[k].nz[q];
175 | }
176 | }
177 | }
178 | for (q = 0; q < 4; q++) {
179 | var u8 = [pyr[i * 4].data, pyr[i * 4 + next * 4].data, pyr[i * 4 + next * 8 + q].data];
180 | var u8o = [dx[q] * 8 + dy[q] * pyr[i * 4].width * 8, dx[q] * 4 + dy[q] * pyr[i * 4 + next * 4].width * 4, 0];
181 | for (y = 0; y < qh; y++) {
182 | for (x = 0; x < qw; x++) {
183 | var sum = 0;
184 | var flag = true;
185 | for (j = 0; j < cascade.stage_classifier.length; j++) {
186 | sum = 0;
187 | var alpha = cascade.stage_classifier[j].alpha;
188 | var feature = cascade.stage_classifier[j].feature;
189 | for (k = 0; k < cascade.stage_classifier[j].count; k++) {
190 | var feature_k = feature[k];
191 | var p, pmin = u8[feature_k.pz[0]][u8o[feature_k.pz[0]] + feature_k.px[0]];
192 | var n, nmax = u8[feature_k.nz[0]][u8o[feature_k.nz[0]] + feature_k.nx[0]];
193 | if (pmin <= nmax) {
194 | sum += alpha[k * 2];
195 | } else {
196 | var f, shortcut = true;
197 | for (f = 0; f < feature_k.size; f++) {
198 | if (feature_k.pz[f] >= 0) {
199 | p = u8[feature_k.pz[f]][u8o[feature_k.pz[f]] + feature_k.px[f]];
200 | if (p < pmin) {
201 | if (p <= nmax) {
202 | shortcut = false;
203 | break;
204 | }
205 | pmin = p;
206 | }
207 | }
208 | if (feature_k.nz[f] >= 0) {
209 | n = u8[feature_k.nz[f]][u8o[feature_k.nz[f]] + feature_k.nx[f]];
210 | if (n > nmax) {
211 | if (pmin <= n) {
212 | shortcut = false;
213 | break;
214 | }
215 | nmax = n;
216 | }
217 | }
218 | }
219 | sum += (shortcut) ? alpha[k * 2 + 1] : alpha[k * 2];
220 | }
221 | }
222 | if (sum < cascade.stage_classifier[j].threshold) {
223 | flag = false;
224 | break;
225 | }
226 | }
227 | if (flag) {
228 | seq.push({"x" : (x * 4 + dx[q] * 2) * scale_x,
229 | "y" : (y * 4 + dy[q] * 2) * scale_y,
230 | "width" : cascade.width * scale_x,
231 | "height" : cascade.height * scale_y,
232 | "neighbor" : 1,
233 | "confidence" : sum});
234 | }
235 | u8o[0] += 16;
236 | u8o[1] += 8;
237 | u8o[2] += 4;
238 | }
239 | u8o[0] += paddings[0];
240 | u8o[1] += paddings[1];
241 | u8o[2] += paddings[2];
242 | }
243 | }
244 | scale_x *= scale;
245 | scale_y *= scale;
246 | }
247 | for (j = 0; j < cascade.stage_classifier.length; j++)
248 | cascade.stage_classifier[j].feature = cascade.stage_classifier[j].orig_feature;
249 | if (!(min_neighbors > 0))
250 | return seq;
251 | else {
252 | var result = headtrackr.ccv.array_group(seq, function (r1, r2) {
253 | var distance = Math.floor(r1.width * 0.25 + 0.5);
254 |
255 | return r2.x <= r1.x + distance &&
256 | r2.x >= r1.x - distance &&
257 | r2.y <= r1.y + distance &&
258 | r2.y >= r1.y - distance &&
259 | r2.width <= Math.floor(r1.width * 1.5 + 0.5) &&
260 | Math.floor(r2.width * 1.5 + 0.5) >= r1.width;
261 | });
262 | var ncomp = result.cat;
263 | var idx_seq = result.index;
264 | var comps = new Array(ncomp + 1);
265 | for (i = 0; i < comps.length; i++)
266 | comps[i] = {"neighbors" : 0,
267 | "x" : 0,
268 | "y" : 0,
269 | "width" : 0,
270 | "height" : 0,
271 | "confidence" : 0};
272 |
273 | // count number of neighbors
274 | for(i = 0; i < seq.length; i++)
275 | {
276 | var r1 = seq[i];
277 | var idx = idx_seq[i];
278 |
279 | if (comps[idx].neighbors == 0)
280 | comps[idx].confidence = r1.confidence;
281 |
282 | ++comps[idx].neighbors;
283 |
284 | comps[idx].x += r1.x;
285 | comps[idx].y += r1.y;
286 | comps[idx].width += r1.width;
287 | comps[idx].height += r1.height;
288 | comps[idx].confidence = Math.max(comps[idx].confidence, r1.confidence);
289 | }
290 |
291 | var seq2 = [];
292 | // calculate average bounding box
293 | for(i = 0; i < ncomp; i++)
294 | {
295 | var n = comps[i].neighbors;
296 | if (n >= min_neighbors)
297 | seq2.push({"x" : (comps[i].x * 2 + n) / (2 * n),
298 | "y" : (comps[i].y * 2 + n) / (2 * n),
299 | "width" : (comps[i].width * 2 + n) / (2 * n),
300 | "height" : (comps[i].height * 2 + n) / (2 * n),
301 | "neighbors" : comps[i].neighbors,
302 | "confidence" : comps[i].confidence});
303 | }
304 |
305 | var result_seq = [];
306 | // filter out small face rectangles inside large face rectangles
307 | for(i = 0; i < seq2.length; i++)
308 | {
309 | var r1 = seq2[i];
310 | var flag = true;
311 | for(j = 0; j < seq2.length; j++)
312 | {
313 | var r2 = seq2[j];
314 | var distance = Math.floor(r2.width * 0.25 + 0.5);
315 |
316 | if(i != j &&
317 | r1.x >= r2.x - distance &&
318 | r1.y >= r2.y - distance &&
319 | r1.x + r1.width <= r2.x + r2.width + distance &&
320 | r1.y + r1.height <= r2.y + r2.height + distance &&
321 | (r2.neighbors > Math.max(3, r1.neighbors) || r1.neighbors < 3))
322 | {
323 | flag = false;
324 | break;
325 | }
326 | }
327 |
328 | if(flag)
329 | result_seq.push(r1);
330 | }
331 | return result_seq;
332 | }
333 | };
334 |
--------------------------------------------------------------------------------
/src/controllers.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Optional controllers for handling headtrackr events
3 | *
4 | * @author auduno / github.com/auduno
5 | */
6 |
7 | headtrackr.controllers = {};
8 |
9 | // NB! made for three.js revision 48. May not work with other revisions.
10 |
11 | headtrackr.controllers.three = {};
12 |
13 | /**
14 | * Controls a THREE.js camera to create pseudo-3D effect
15 | *
16 | * Needs the position of "screen" in 3d-model to be given up front, and to be static (i.e. absolute) during headtracking
17 | *
18 | * @param {THREE.PerspectiveCamera} camera
19 | * @param {number} scaling The scaling of the "screen" in the 3d model.
20 | * This is the vertical size of screen in 3d-model relative to vertical size of computerscreen in real life
21 | * @param {array} fixedPosition array with attributes x,y,z, position of "screen" in 3d-model
22 | * @param {THREE.Vector3} lookAt the object/position the camera should be pointed towards
23 | * @param {object} params optional object with optional parameters
24 | *
25 | * Optional parameters:
26 | * screenHeight : vertical size of computer screen (default is 20 cm, i.e. typical laptop size)
27 | */
28 | headtrackr.controllers.three.realisticAbsoluteCameraControl = function(camera, scaling, fixedPosition, lookAt, params) {
29 |
30 | if (params === undefined) params = {};
31 | if (params.screenHeight === undefined) {
32 | var screenHeight_cms = 20;
33 | } else {
34 | var screenHeight_cms = params.screenHeight;
35 | }
36 | if (params.damping === undefined) {
37 | params.damping = 1;
38 | }
39 |
40 | camera.position.x = fixedPosition[0];
41 | camera.position.y = fixedPosition[1];
42 | camera.position.z = fixedPosition[2];
43 | camera.lookAt(lookAt);
44 |
45 | var wh = screenHeight_cms * scaling;
46 | var ww = wh * camera.aspect;
47 |
48 | document.addEventListener('headtrackingEvent', function(event) {
49 |
50 | // update camera
51 | var xOffset = event.x > 0 ? 0 : -event.x * 2 * params.damping * scaling;
52 | var yOffset = event.y < 0 ? 0 : event.y * 2 * params.damping * scaling;
53 | camera.setViewOffset(ww + Math.abs(event.x * 2 * params.damping * scaling), wh + Math.abs(event.y * params.damping * 2 * scaling), xOffset, yOffset, ww, wh);
54 |
55 | camera.position.x = fixedPosition[0] + (event.x * scaling * params.damping );
56 | camera.position.y = fixedPosition[1] + (event.y * scaling * params.damping );
57 | camera.position.z = fixedPosition[2] + (event.z * scaling);
58 |
59 | // update lookAt?
60 |
61 | // when changing height of window, we need to change field of view
62 | camera.fov = Math.atan((wh/2 + Math.abs(event.y * scaling * params.damping ))/(Math.abs(event.z*scaling)))*360/Math.PI;
63 | //debugger;
64 |
65 | camera.updateProjectionMatrix();
66 |
67 | }, false);
68 | };
69 |
70 | /**
71 | * Controls a THREE.js camera to create pseudo-3D effect
72 | *
73 | * Places "screen" in 3d-model in relation to original cameraposition at any given time
74 | * Currently not sure if this works properly, or at all
75 | *
76 | * @param {THREE.PerspectiveCamera} camera
77 | * @param {number} scaling The scaling of the "screen" in the 3d model.
78 | * This is the vertical size of screen in 3d-model relative to vertical size of computerscreen in real life
79 | * @param {array} relativeFixedDistance how long in front of (or behind) original cameraposition the fixed frame will be
80 | * @param {object} params optional object with optional parameters
81 | *
82 | * Optional parameters:
83 | * screenHeight : vertical size of computer screen (default is 20 cm, i.e. typical laptop size)
84 | */
85 | headtrackr.controllers.three.realisticRelativeCameraControl = function(camera, scaling, relativeFixedDistance, params) {
86 |
87 | // we assume that the parent of camera is the scene
88 |
89 | if (params === undefined) params = {};
90 | if (params.screenHeight === undefined) {
91 | var screenHeight_cms = 20;
92 | } else {
93 | var screenHeight_cms = params.screenHeight;
94 | }
95 |
96 | var scene = camera.parent;
97 |
98 | var init = true;
99 |
100 | // create an object to offset camera without affecting existing camera interaction
101 | var offset = new THREE.Object3D();
102 | offset.position.set(0,0,0);
103 | offset.add(camera);
104 | scene.add(offset);
105 |
106 | // TODO : we maybe need to offset functions like lookAt as well
107 | // use prototype function replacement for this?
108 |
109 | var wh = screenHeight_cms * scaling;
110 | var ww = wh * camera.aspect;
111 |
112 | // set fov
113 | document.addEventListener('headtrackingEvent', function(event) {
114 |
115 | // update camera
116 | var xOffset = event.x > 0 ? 0 : -event.x * 2 * scaling;
117 | var yOffset = event.y > 0 ? 0 : -event.y * 2 * scaling;
118 | camera.setViewOffset(ww + Math.abs(event.x * 2 * scaling), wh + Math.abs(event.y * 2 * scaling), xOffset, yOffset, ww, wh);
119 |
120 | offset.rotation = camera.rotation;
121 | offset.position.x = 0;
122 | offset.position.y = 0;
123 | offset.position.z = 0;
124 | offset.translateX(event.x * scaling);
125 | offset.translateY(event.y * scaling);
126 | offset.translateZ((event.z * scaling)+relativeFixedDistance);
127 |
128 | //offset.position.x = (event.x * scaling);
129 | //offset.position.y = (event.y * scaling);
130 | //offset.position.z = (event.z * scaling)+relativeFixedDistance;
131 |
132 | // when changing height of window, we need to change field of view
133 | camera.fov = Math.atan((wh/2 + Math.abs(event.y * scaling))/(Math.abs(event.z*scaling)))*360/Math.PI;
134 |
135 | camera.updateProjectionMatrix();
136 |
137 | }, false);
138 | }
139 |
--------------------------------------------------------------------------------
/src/facetrackr.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Library for detecting and tracking the position of a face in a canvas object
3 | *
4 | * usage:
5 | * // create a new tracker
6 | * var ft = new headtrackr.facetrackr.Tracker();
7 | * // initialize it with a canvas
8 | * ft.init(some_canvas);
9 | * // track in canvas
10 | * ft.track();
11 | * // get position of found object
12 | * var currentPos = ft.getTrackObj();
13 | * currentPos.x // x-coordinate of center of object on canvas
14 | * currentPos.y // y-coordinate of center of object on canvas
15 | * currentPos.width // width of object
16 | * currentPos.height // height of object
17 | * currentPos.angle // angle of object in radians
18 | * currentPos.confidence // returns confidence (doesn't work for CS yet)
19 | * currentPos.detection // current detectionmethod (VJ or CS)
20 | * currentPos.time // time spent
21 | *
22 | * @author auduno / github.com/auduno
23 | */
24 |
25 | headtrackr.facetrackr = {};
26 |
27 | /**
28 | * optional parameters to params:
29 | * smoothing : whether to use smoothing on output (default is true)
30 | * smoothingInterval : should be the same as detectionInterval plus time of tracking (default is 35 ms)
31 | * sendEvents : whether to send events (default is true)
32 | * whitebalancing : whether to wait for camera whitebalancing before starting detection (default is true)
33 | * calcAnglss : whether to calculate orientation of tracked object (default for facetrackr is false)
34 | *
35 | * @constructor
36 | */
37 | headtrackr.facetrackr.Tracker = function(params) {
38 |
39 | if (!params) params = {};
40 |
41 | if (params.sendEvents === undefined) params.sendEvents = true;
42 | if (params.whitebalancing === undefined) params.whitebalancing = true;
43 | if (params.debug === undefined) {
44 | params.debug = false;
45 | } else {
46 | if (params.debug.tagName != 'CANVAS') params.debug = false;
47 | }
48 | if (params.whitebalancing) {
49 | var _currentDetection = "WB";
50 | } else {
51 | var _currentDetection = "VJ";
52 | }
53 | if (params.calcAngles == undefined) params.calcAngles = false;
54 |
55 | var _inputcanvas, _curtracked, _cstracker;
56 |
57 | var _confidenceThreshold = -10; // needed confidence before switching to Camshift
58 | var previousWhitebalances = []; // array of previous 10 whitebalance values
59 | var pwbLength = 15;
60 |
61 | this.init = function(inputcanvas) {
62 | _inputcanvas = inputcanvas
63 | // initialize cs tracker
64 | _cstracker = new headtrackr.camshift.Tracker({calcAngles : params.calcAngles});
65 | }
66 |
67 | this.track = function() {
68 | var result;
69 | // do detection
70 | if (_currentDetection == "WB") {
71 | result = checkWhitebalance();
72 | } else if (_currentDetection == "VJ") {
73 | result = doVJDetection();
74 | } else if (_currentDetection == "CS") {
75 | result = doCSDetection();
76 | }
77 |
78 | // check whether whitebalance is stable before starting detection
79 | if (result.detection == "WB") {
80 | if (previousWhitebalances.length >= pwbLength) previousWhitebalances.pop();
81 | previousWhitebalances.unshift(result.wb);
82 | if (previousWhitebalances.length == pwbLength) {
83 | //get max
84 | var max = Math.max.apply(null, previousWhitebalances);
85 | //get min
86 | var min = Math.min.apply(null, previousWhitebalances);
87 |
88 | // if difference between the last ten whitebalances is less than 2,
89 | // we assume whitebalance is stable
90 | if ((max-min) < 2) {
91 | // switch to facedetection
92 | _currentDetection = "VJ";
93 | }
94 | }
95 | }
96 | // check if Viola-Jones has found a viable face
97 | if (result.detection == "VJ" && result.confidence > _confidenceThreshold) {
98 | // switch to Camshift
99 | _currentDetection = "CS";
100 | // when switching, we initalize camshift with current found face
101 | var cRectangle = new headtrackr.camshift.Rectangle(
102 | Math.floor(result.x),
103 | Math.floor(result.y),
104 | Math.floor(result.width),
105 | Math.floor(result.height)
106 | );
107 | _cstracker.initTracker(_inputcanvas, cRectangle);
108 | }
109 |
110 | _curtracked = result;
111 |
112 | if (result.detection == "CS" && params.sendEvents) {
113 | // send events
114 | var evt = document.createEvent("Event");
115 | evt.initEvent("facetrackingEvent", true, true);
116 | evt.height = result.height;
117 | evt.width = result.width;
118 | evt.angle = result.angle;
119 | evt.x = result.x;
120 | evt.y = result.y;
121 | evt.confidence = result.confidence;
122 | evt.detection = result.detection;
123 | evt.time = result.time;
124 | document.dispatchEvent(evt);
125 | }
126 | }
127 |
128 | this.getTrackingObject = function() {
129 | return _curtracked.clone();
130 | }
131 |
132 | // Viola-Jones detection
133 | function doVJDetection() {
134 | // start timing
135 | var start = (new Date).getTime();
136 |
137 | // we seem to have to copy canvas to avoid interference with camshift
138 | // not entirely sure why
139 | // TODO: ways to avoid having to copy canvas every time
140 | var ccvCanvas = document.createElement('canvas');
141 | ccvCanvas.width = _inputcanvas.width;
142 | ccvCanvas.height = _inputcanvas.height;
143 | ccvCanvas.getContext("2d").drawImage(
144 | _inputcanvas, 0, 0, ccvCanvas.width, ccvCanvas.height
145 | );
146 |
147 | var comp = headtrackr.ccv.detect_objects(
148 | headtrackr.ccv.grayscale(ccvCanvas), headtrackr.cascade, 5, 1
149 | );
150 |
151 | // end timing
152 | var diff = (new Date).getTime() - start;
153 |
154 | // loop through found faces and pick the most likely one
155 | // TODO: check amount of neighbors and size as well?
156 | // TODO: choose the face that is most in the center of canvas?
157 | var candidate;
158 | if (comp.length > 0) {
159 | candidate = comp[0];
160 | }
161 | for (var i = 1; i < comp.length; i++) {
162 | if (comp[i].confidence > candidate.confidence) {
163 | candidate = comp[i];
164 | }
165 | }
166 |
167 | // copy information from ccv object to a new trackObj
168 | var result = new headtrackr.facetrackr.TrackObj();
169 | if (!(candidate === undefined)) {
170 | result.width = candidate.width;
171 | result.height = candidate.height;
172 | result.x = candidate.x;
173 | result.y = candidate.y;
174 | result.confidence = candidate.confidence;
175 | }
176 |
177 | // copy timing to object
178 | result.time = diff;
179 | result.detection = "VJ";
180 |
181 | return result;
182 | }
183 |
184 | // Camshift detection
185 | function doCSDetection() {
186 |
187 | // start timing
188 | var start = (new Date).getTime();
189 | // detect
190 | _cstracker.track(_inputcanvas);
191 | var csresult = _cstracker.getTrackObj();
192 |
193 | // if debugging, draw backprojection image on debuggingcanvas
194 | if (params.debug) {
195 | params.debug.getContext('2d').putImageData(_cstracker.getBackProjectionImg(),0,0);
196 | }
197 |
198 | // end timing
199 | var diff = (new Date).getTime() - start;
200 |
201 | // copy information from CS object to a new trackObj
202 | var result = new headtrackr.facetrackr.TrackObj();
203 | result.width = csresult.width;
204 | result.height = csresult.height;
205 | result.x = csresult.x;
206 | result.y = csresult.y;
207 | // TODO: should we adjust this angle to be "clockwise"?
208 | result.angle = csresult.angle;
209 | // TODO: camshift should pass along some sort of confidence?
210 | result.confidence = 1;
211 |
212 | // copy timing to object
213 | result.time = diff;
214 | result.detection = "CS";
215 |
216 | return result;
217 | }
218 |
219 | // Whitebalancing
220 | function checkWhitebalance() {
221 | var result = new headtrackr.facetrackr.TrackObj();
222 | // get whitebalance value
223 | result.wb = headtrackr.getWhitebalance(_inputcanvas);
224 | result.detection = "WB";
225 |
226 | return result
227 | }
228 | };
229 |
230 | /**
231 | * @constructor
232 | */
233 | headtrackr.facetrackr.TrackObj = function() {
234 | this.height = 0;
235 | this.width = 0;
236 | this.angle = 0;
237 | this.x = 0;
238 | this.y = 0;
239 | this.confidence = -10000;
240 | this.detection = '';
241 | this.time = 0;
242 |
243 | this.clone = function() {
244 | var c = new headtrackr.facetrackr.TrackObj();
245 | c.height = this.height;
246 | c.width = this.width;
247 | c.angle = this.angle;
248 | c.x = this.x;
249 | c.y = this.y;
250 | c.confidence = this.confidence;
251 | c.detection = this.detection;
252 | c.time = this.time;
253 | return c;
254 | }
255 | };
256 |
--------------------------------------------------------------------------------
/src/footer.js.txt:
--------------------------------------------------------------------------------
1 |
2 |
3 | return headtrackr;
4 | }));
--------------------------------------------------------------------------------
/src/header.js.txt:
--------------------------------------------------------------------------------
1 | (function (root, factory) {
2 | if (typeof exports === 'object') {
3 | module.exports = factory();
4 | } else if (typeof define === 'function' && define.amd) {
5 | define([],factory);
6 | } else {
7 | root.headtrackr = factory();
8 | }
9 | }(this, function () {
--------------------------------------------------------------------------------
/src/headposition.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Calculates an estimate of the position of the head of the user in relation to screen or camera
3 | * based on input from facetrackrObject
4 | *
5 | * Usage:
6 | * var hp = new headtrackr.headposition.Tracker(facetrackrObject, 640, 480);
7 | * var currentPosition = hp.track(facetrackrObject);
8 | *
9 | * @author auduno / github.com/auduno
10 | */
11 |
12 | headtrackr.headposition = {};
13 |
14 | /**
15 | *
16 | * Parameters to Tracker() are:
17 | * facetrackrObject : a generic object with attributes x, y, width, height, angle
18 | * which describe the position of center of detected face
19 | * camwidth : width of canvas where the face was detected
20 | * camheight : height of canvas where the face was detected
21 | *
22 | * Optional parameters can be passed along like this:
23 | * headtrackr.headposition.Tracker(facetrackrObject, 640, 480, {fov : 60})
24 | *
25 | * Optional parameters:
26 | * fov {number} : horizontal field of view of camera (default is to detect via distance to screen, any fov overrides distance_to_screen)
27 | * distance_to_screen {number} : initial distance from face to camera, in cms (default is 60 cm)
28 | * edgecorrection {boolean} : whether to use heuristic for position of head when detection is on the edge of the screen (default is true)
29 | * distance_from_camera_to_screen : distance from camera to center of screen (default is 11.5 cm, typical for laptops)
30 | *
31 | * Returns a generic object with attributes x, y, z which is estimated headposition in cm in relation to center of screen
32 | *
33 | * @constructor
34 | */
35 | headtrackr.headposition.Tracker = function(facetrackrObj, camwidth, camheight, params) {
36 |
37 | // some assumptions that are used when calculating distances and estimating horizontal fov
38 | // head width = 16 cm
39 | // head height = 19 cm
40 | // when initialized, user is approximately 60 cm from camera
41 |
42 | if (!params) params = {};
43 |
44 | if (params.edgecorrection === undefined) {
45 | var edgecorrection = true;
46 | } else {
47 | var edgecorrection = params.edgecorrection;
48 | }
49 |
50 | this.camheight_cam = camheight;
51 | this.camwidth_cam = camwidth;
52 |
53 | var head_width_cm = 16;
54 | var head_height_cm = 19;
55 |
56 | // angle between side of face and diagonal across
57 | var head_small_angle = Math.atan(head_width_cm/head_height_cm);
58 |
59 | var head_diag_cm = Math.sqrt((head_width_cm*head_width_cm)+(head_height_cm*head_height_cm)); // diagonal of face in real space
60 |
61 | var sin_hsa = Math.sin(head_small_angle); //precalculated sine
62 | var cos_hsa = Math.cos(head_small_angle); //precalculated cosine
63 | var tan_hsa = Math.tan(head_small_angle); //precalculated tan
64 |
65 | // estimate horizontal field of view of camera
66 | var init_width_cam = facetrackrObj.width;
67 | var init_height_cam = facetrackrObj.height;
68 | var head_diag_cam = Math.sqrt((init_width_cam*init_width_cam)+(init_height_cam*init_height_cam));
69 | if (params.fov === undefined) {
70 | // we use the diagonal of the faceobject to estimate field of view of the camera
71 | // we use the diagonal since this is less sensitive to errors in width or height
72 | var head_width_cam = sin_hsa * head_diag_cam;
73 | var camwidth_at_default_face_cm = (this.camwidth_cam/head_width_cam) * head_width_cm;
74 | // we assume user is sitting around 60 cm from camera (normal distance on a laptop)
75 | if (params.distance_to_screen === undefined) {
76 | var distance_to_screen = 60;
77 | } else {
78 | var distance_to_screen = params.distance_to_screen;
79 | }
80 | // calculate estimate of field of view
81 | var fov_width = Math.atan((camwidth_at_default_face_cm/2)/distance_to_screen) * 2;
82 | } else {
83 | var fov_width = params.fov * Math.PI/180;
84 | }
85 |
86 | // precalculate ratio between camwidth and distance
87 | var tan_fov_width = 2 * Math.tan(fov_width/2);
88 |
89 | var x, y, z; // holds current position of head (in cms from center of screen)
90 |
91 | this.track = function(facetrackrObj) {
92 |
93 | var w = facetrackrObj.width;
94 | var h = facetrackrObj.height;
95 | var fx = facetrackrObj.x;
96 | var fy = facetrackrObj.y;
97 |
98 | if (edgecorrection) {
99 | // recalculate head_diag_cam, fx, fy
100 |
101 | var margin = 11;
102 |
103 | var leftDistance = fx-(w/2);
104 | var rightDistance = this.camwidth_cam-(fx+(w/2));
105 | var topDistance = fy-(h/2);
106 | var bottomDistance = this.camheight_cam-(fy+(h/2));
107 |
108 | var onVerticalEdge = (leftDistance < margin || rightDistance < margin);
109 | var onHorizontalEdge = (topDistance < margin || bottomDistance < margin);
110 |
111 | if (onHorizontalEdge) {
112 | if (onVerticalEdge) {
113 | // we are in a corner, use previous diagonal as estimate, i.e. don't change head_diag_cam
114 | var onLeftEdge = (leftDistance < margin);
115 | var onTopEdge = (topDistance < margin);
116 |
117 | if (onLeftEdge) {
118 | fx = w-(head_diag_cam * sin_hsa/2);
119 | } else {
120 | fx = fx-(w/2)+(head_diag_cam * sin_hsa/2);
121 | }
122 |
123 | if (onTopEdge) {
124 | fy = h-(head_diag_cam * cos_hsa/2);
125 | } else {
126 | fy = fy-(h/2)+(head_diag_cam*cos_hsa/2);
127 | }
128 |
129 | } else {
130 | // we are on top or bottom edge of camera, use width instead of diagonal and correct y-position
131 | // fix fy
132 | if (topDistance < margin) {
133 | var originalWeight = topDistance/margin;
134 | var estimateWeight = (margin-topDistance)/margin;
135 | fy = h-(originalWeight*(h/2) + estimateWeight*((w/tan_hsa)/2));
136 | head_diag_cam = estimateWeight*(w/sin_hsa) + originalWeight*(Math.sqrt((w*w)+(h*h)));
137 | } else {
138 | var originalWeight = bottomDistance/margin;
139 | var estimateWeight = (margin-bottomDistance)/margin;
140 | fy = fy-(h/2)+(originalWeight*(h/2) + estimateWeight*((w/tan_hsa)/2));
141 | head_diag_cam = estimateWeight*(w/sin_hsa) + originalWeight*(Math.sqrt((w*w)+(h*h)));
142 | }
143 | }
144 | } else if (onVerticalEdge) {
145 | // we are on side edges of camera, use height and correct x-position
146 | if (leftDistance < margin) {
147 | var originalWeight = leftDistance/margin;
148 | var estimateWeight = (margin-leftDistance)/margin;
149 | head_diag_cam = estimateWeight*(h/cos_hsa) + originalWeight*(Math.sqrt((w*w)+(h*h)));
150 | fx = w-(originalWeight*(w/2)+(estimateWeight)*(h*tan_hsa/2));
151 | } else {
152 | var originalWeight = rightDistance/margin;
153 | var estimateWeight = (margin-rightDistance)/margin;
154 | head_diag_cam = estimateWeight*(h/cos_hsa) + originalWeight*(Math.sqrt((w*w)+(h*h)));
155 | fx = fx-(w/2)+(originalWeight*(w/2) + estimateWeight*(h*tan_hsa/2));
156 | }
157 | } else {
158 | head_diag_cam = Math.sqrt((w*w)+(h*h));
159 | }
160 | } else {
161 | head_diag_cam = Math.sqrt((w*w)+(h*h));
162 | }
163 |
164 | // calculate cm-distance from screen
165 | z = (head_diag_cm*this.camwidth_cam)/(tan_fov_width*head_diag_cam);
166 | // to transform to z_3ds : z_3ds = (head_diag_3ds/head_diag_cm)*z
167 | // i.e. just use ratio
168 |
169 | // calculate cm-position relative to center of screen
170 | x = -((fx/this.camwidth_cam) - 0.5) * z * tan_fov_width;
171 | y = -((fy/this.camheight_cam) - 0.5) * z * tan_fov_width * (this.camheight_cam/this.camwidth_cam);
172 |
173 |
174 | // Transformation from position relative to camera, to position relative to center of screen
175 | if (params.distance_from_camera_to_screen === undefined) {
176 | // default is 11.5 cm approximately
177 | y = y + 11.5;
178 | } else {
179 | y = y + params.distance_from_camera_to_screen;
180 | }
181 |
182 | // send off event
183 | var evt = document.createEvent("Event");
184 | evt.initEvent("headtrackingEvent", true, true);
185 | evt.x = x;
186 | evt.y = y;
187 | evt.z = z;
188 | document.dispatchEvent(evt);
189 |
190 | return new headtrackr.headposition.TrackObj(x,y,z);
191 | }
192 |
193 |
194 | this.getTrackerObj = function() {
195 | return new headtrackr.headposition.TrackObj(x,y,z);
196 | }
197 |
198 | this.getFOV = function() {
199 | return fov_width * 180/Math.PI;
200 | }
201 | };
202 |
203 | /**
204 | * @constructor
205 | */
206 | headtrackr.headposition.TrackObj = function(x,y,z) {
207 | this.x = x;
208 | this.y = y;
209 | this.z = z;
210 |
211 | this.clone = function() {
212 | var c = new headtrackr.headposition.TrackObj();
213 | c.x = this.x;
214 | c.y = this.y;
215 | c.z = this.z;
216 | return c;
217 | }
218 | };
--------------------------------------------------------------------------------
/src/license.js:
--------------------------------------------------------------------------------
1 | /**
2 | * headtrackr library (https://www.github.com/auduno/headtrackr/)
3 | *
4 | * Copyright (c) 2012, Audun Mathias Øygard
5 | *
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
7 | *
8 | * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
9 | *
10 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
11 | *
12 | * This library includes code from Liu Liu's ccv library (https://github.com/liuliu/ccv)
13 | * and ported code from Benjamin Jung's FaceIt actionscript library (http://www.libspark.org/browser/as3/FaceIt/trunk/src/org/libspark/faceit/camshift/Tracker.as)
14 | *
15 | * ccv library license:
16 | *
17 | * Copyright (c) 2010, Liu Liu
18 | * All rights reserved.
19 | *
20 | * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
21 | *
22 | * * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
23 | * * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
24 | * * Neither the name of the authors nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
25 | *
26 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 | *
28 | * FaceIt library license:
29 | *
30 | * Copyright (C)2009 Benjamin Jung
31 | *
32 | * Licensed under the MIT License
33 | *
34 | * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
35 | *
36 | * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
37 | *
38 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 | *
40 | */
41 |
--------------------------------------------------------------------------------
/src/main.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Wrapper for headtrackr library
3 | *
4 | * Usage:
5 | * var htracker = new headtrackr.Tracker();
6 | * htracker.init(videoInput, canvasInput);
7 | * htracker.start();
8 | *
9 | * Optional parameters can be passed to Tracker like this:
10 | * new headtrackr.Tracker({ ui : false, altVideo : "somevideo.ogv" });
11 | *
12 | * Optional parameters:
13 | * ui {boolean} : whether to create messageoverlay with messages like "found face" (default is true)
14 | * altVideo {object} : urls to any alternative videos, if camera is not found or not supported
15 | * the format is : {'ogv' : 'somevideo.ogv', 'mp4' : 'somevideo.mp4', 'webm' : 'somevideo.webm'}
16 | * smoothing {boolean} : whether to use smoothing (default is true)
17 | * debug {canvas} : pass along a canvas to paint output of facedetection, for debugging
18 | * detectionInterval {number} : time we wait before doing a new facedetection (default is 20 ms)
19 | * retryDetection {boolean} : whether to start facedetection again if we lose track of face (default is true)
20 | * fov {number} : horizontal field of view of used camera in degrees (default is to estimate this)
21 | * fadeVideo {boolean} : whether to fade out video when face is detected (default is false)
22 | * cameraOffset {number} : distance from camera to center of screen, used to offset position of head (default is 11.5)
23 | * calcAngles {boolean} : whether to calculate angles when doing facetracking (default is false)
24 | * headPosition {boolean} : whether to calculate headposition (default is true)
25 | *
26 | * @author auduno / github.com/auduno
27 | */
28 |
29 | var headtrackr = {};
30 | headtrackr.rev = 2;
31 |
32 | /**
33 | * @constructor
34 | */
35 | headtrackr.Tracker = function(params) {
36 |
37 | if (!params) params = {};
38 |
39 | if (params.smoothing === undefined) params.smoothing = true;
40 | if (params.retryDetection === undefined) params.retryDetection = true;
41 | if (params.ui === undefined) params.ui = true;
42 | if (params.debug === undefined) {
43 | params.debug = false;
44 | } else {
45 | if (params.debug.tagName != 'CANVAS') {
46 | params.debug = false;
47 | } else {
48 | var debugContext = params.debug.getContext('2d');
49 | }
50 | }
51 | if (params.detectionInterval === undefined) params.detectionInterval = 20;
52 | if (params.fadeVideo === undefined) params.fadeVideo = false;
53 | if (params.cameraOffset === undefined) params.cameraOffset = 11.5;
54 | if (params.calcAngles === undefined) params.calcAngles = false;
55 | if (params.headPosition === undefined) params.headPosition = true;
56 |
57 | var ui, smoother, facetracker, headposition, canvasContext, videoElement, detector;
58 | var detectionTimer;
59 | var fov = 0;
60 | var initialized = true;
61 | var run = false;
62 | var faceFound = false;
63 | var firstRun = true;
64 | var videoFaded = false;
65 | var headDiagonal = [];
66 |
67 | this.status = "";
68 | this.stream = undefined;
69 |
70 | var statusEvent = document.createEvent("Event");
71 | statusEvent.initEvent("headtrackrStatus", true, true);
72 |
73 | var headtrackerStatus = function(message) {
74 | statusEvent.status = message;
75 | document.dispatchEvent(statusEvent);
76 | this.status = message;
77 | }.bind(this);
78 |
79 | var insertAltVideo = function(video) {
80 | if (params.altVideo !== undefined) {
81 | if (supports_video()) {
82 | if (params.altVideo.ogv && supports_ogg_theora_video()) {
83 | video.src = params.altVideo.ogv;
84 | } else if (params.altVideo.mp4 && supports_h264_baseline_video()) {
85 | video.src = params.altVideo.mp4;
86 | } else if (params.altVideo.webm && supports_webm_video()) {
87 | video.src = params.altVideo.webm;
88 | } else {
89 | return false;
90 | }
91 | video.play();
92 | return true;
93 | }
94 | } else {
95 | return false;
96 | }
97 | }
98 |
99 | this.init = function(video, canvas, setupVideo) {
100 | if (setupVideo === undefined || setupVideo == true) {
101 | navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
102 | window.URL = window.URL || window.webkitURL || window.msURL || window.mozURL;
103 | // check for camerasupport
104 | if (navigator.getUserMedia) {
105 | headtrackerStatus("getUserMedia");
106 |
107 | // chrome 19 shim
108 | var videoSelector = {video : true};
109 | if (window.navigator.appVersion.match(/Chrome\/(.*?) /)) {
110 | var chromeVersion = parseInt(window.navigator.appVersion.match(/Chrome\/(\d+)\./)[1], 10);
111 | if (chromeVersion < 20) {
112 | videoSelector = "video";
113 | }
114 | };
115 |
116 | // opera shim
117 | if (window.opera) {
118 | window.URL = window.URL || {};
119 | if (!window.URL.createObjectURL) window.URL.createObjectURL = function(obj) {return obj;};
120 | }
121 |
122 | // set up stream
123 | navigator.getUserMedia(videoSelector, (function( stream ) {
124 | headtrackerStatus("camera found");
125 | this.stream = stream;
126 | if (video.mozCaptureStream) {
127 | video.mozSrcObject = stream;
128 | } else {
129 | video.src = (window.URL && window.URL.createObjectURL(stream)) || stream;
130 | }
131 | video.play();
132 | }).bind(this), function() {
133 | headtrackerStatus("no camera");
134 | insertAltVideo(video);
135 | });
136 | } else {
137 | headtrackerStatus("no getUserMedia");
138 | if (!insertAltVideo(video)) {
139 | return false;
140 | }
141 | }
142 |
143 | // resize video when it is playing
144 | video.addEventListener('playing', function() {
145 | if(video.width > video.height) {
146 | video.width = 320;
147 | } else {
148 | video.height = 240;
149 | }
150 | }, false);
151 | }
152 |
153 | videoElement = video;
154 | canvasElement = canvas;
155 | canvasContext = canvas.getContext("2d");
156 |
157 | // create ui if needed
158 | if (params.ui) {
159 | ui = new headtrackr.Ui();
160 | }
161 |
162 | // create smoother if enabled
163 | smoother = new headtrackr.Smoother(0.35, params.detectionInterval+15);
164 |
165 | this.initialized = true;
166 | }
167 |
168 | var track = function() {
169 | // Copy video to canvas
170 | canvasContext.drawImage(videoElement, 0, 0, canvasElement.width, canvasElement.height);
171 |
172 | // if facetracking hasn't started, initialize facetrackr
173 | if (facetracker === undefined) {
174 | facetracker = new headtrackr.facetrackr.Tracker({debug : params.debug, calcAngles : params.calcAngles});
175 | facetracker.init(canvasElement);
176 | }
177 |
178 | // track face
179 | facetracker.track()
180 | var faceObj = facetracker.getTrackingObject({debug : params.debug});
181 |
182 | if (faceObj.detection == "WB") headtrackerStatus("whitebalance");
183 | if (firstRun && faceObj.detection == "VJ") headtrackerStatus("detecting");
184 |
185 | // check if we have a detection first
186 | if (!(faceObj.confidence == 0)) {
187 | if (faceObj.detection == "VJ") {
188 | if (detectionTimer === undefined) {
189 | // start timing
190 | detectionTimer = (new Date).getTime();
191 | }
192 | if (((new Date).getTime() - detectionTimer) > 5000) {
193 | headtrackerStatus("hints");
194 | }
195 |
196 | var x = (faceObj.x + faceObj.width/2); //midpoint
197 | var y = (faceObj.y + faceObj.height/2); //midpoint
198 |
199 | if (params.debug) {
200 | // draw detected face on debuggercanvas
201 | debugContext.strokeStyle = "#0000CC";
202 | debugContext.strokeRect(faceObj.x, faceObj.y, faceObj.width, faceObj.height);
203 | }
204 | }
205 | if (faceObj.detection == "CS") {
206 | var x = faceObj.x; //midpoint
207 | var y = faceObj.y; //midpoint
208 |
209 | if (detectionTimer !== undefined) detectionTimer = undefined;
210 |
211 | if (params.debug) {
212 | // draw tracked face on debuggercanvas
213 | debugContext.translate(faceObj.x, faceObj.y)
214 | debugContext.rotate(faceObj.angle-(Math.PI/2));
215 | debugContext.strokeStyle = "#00CC00";
216 | debugContext.strokeRect((-(faceObj.width/2)) >> 0, (-(faceObj.height/2)) >> 0, faceObj.width, faceObj.height);
217 | debugContext.rotate((Math.PI/2)-faceObj.angle);
218 | debugContext.translate(-faceObj.x, -faceObj.y);
219 | }
220 |
221 | // fade out video if it's showing
222 | if (!videoFaded && params.fadeVideo) {
223 | fadeVideo();
224 | videoFaded = true;
225 | }
226 |
227 | this.status = 'tracking';
228 |
229 | //check if we've lost tracking of face
230 | if (faceObj.width == 0 || faceObj.height == 0) {
231 | if (params.retryDetection) {
232 | // retry facedetection
233 | headtrackerStatus("redetecting");
234 |
235 | facetracker = new headtrackr.facetrackr.Tracker({whitebalancing : false, debug: params.debug, calcAngles : params.calcAngles});
236 | facetracker.init(canvasElement);
237 | faceFound = false;
238 | headposition = undefined;
239 |
240 | // show video again if it's not already showing
241 | if (videoFaded) {
242 | videoElement.style.opacity = 1;
243 | videoFaded = false;
244 | }
245 | } else {
246 | headtrackerStatus("lost");
247 | this.stop();
248 | }
249 | } else {
250 | if (!faceFound) {
251 | headtrackerStatus("found");
252 | faceFound = true;
253 | }
254 |
255 | if (params.smoothing) {
256 | // smooth values
257 | if (!smoother.initialized) {
258 | smoother.init(faceObj);
259 | }
260 | faceObj = smoother.smooth(faceObj);
261 | }
262 |
263 | // get headposition
264 | if (headposition === undefined && params.headPosition) {
265 | // wait until headdiagonal is stable before initializing headposition
266 | var stable = false;
267 |
268 | // calculate headdiagonal
269 | var headdiag = Math.sqrt(faceObj.width*faceObj.width + faceObj.height*faceObj.height);
270 |
271 | if (headDiagonal.length < 6) {
272 | headDiagonal.push(headdiag);
273 | } else {
274 | headDiagonal.splice(0,1);
275 | headDiagonal.push(headdiag);
276 | if ((Math.max.apply(null, headDiagonal) - Math.min.apply(null, headDiagonal)) < 5) {
277 | stable = true;
278 | }
279 | }
280 |
281 | if (stable) {
282 | if (firstRun) {
283 | if (params.fov === undefined) {
284 | headposition = new headtrackr.headposition.Tracker(faceObj, canvasElement.width, canvasElement.height, {distance_from_camera_to_screen : params.cameraOffset});
285 | } else {
286 | headposition = new headtrackr.headposition.Tracker(faceObj, canvasElement.width, canvasElement.height, {fov : params.fov, distance_from_camera_to_screen : params.cameraOffset});
287 | }
288 | fov = headposition.getFOV();
289 | firstRun = false;
290 | } else {
291 | headposition = new headtrackr.headposition.Tracker(faceObj, canvasElement.width, canvasElement.height, {fov : fov, distance_from_camera_to_screen : params.cameraOffset});
292 | }
293 | headposition.track(faceObj);
294 | }
295 | } else if (params.headPosition) {
296 | headposition.track(faceObj);
297 | }
298 | }
299 | }
300 | }
301 |
302 | if (run) {
303 | detector = window.setTimeout(track, params.detectionInterval);
304 | }
305 | }.bind(this);
306 |
307 | var starter = function() {
308 | // does some safety checks before starting
309 |
310 | // sometimes canvasContext is not available yet, so try and catch if it's not there...
311 | try {
312 | canvasContext.drawImage(videoElement, 0, 0, canvasElement.width, canvasElement.height);
313 |
314 | // in some cases, the video sends events before starting to draw
315 | // so check that we have something on video before starting to track
316 | var canvasContent = headtrackr.getWhitebalance(canvasElement);
317 | if (canvasContent > 0) {
318 | run = true;
319 | track();
320 | } else {
321 | window.setTimeout(starter, 100);
322 | }
323 | } catch (err) {
324 | window.setTimeout(starter, 100);
325 | }
326 | }
327 |
328 | this.start = function() {
329 | // check if initialized
330 | if (!this.initialized) return false;
331 |
332 | // check if video is playing, if not, return false
333 | if (!(videoElement.currentTime > 0 && !videoElement.paused && !videoElement.ended)) {
334 |
335 | run = true;
336 | //set event
337 | videoElement.addEventListener('playing', starter, false);
338 |
339 | return true;
340 | } else {
341 | starter();
342 | }
343 |
344 | return true;
345 | }
346 |
347 | this.stop = function() {
348 | window.clearTimeout(detector);
349 | run = false;
350 | headtrackerStatus("stopped");
351 | facetracker = undefined;
352 | faceFound = false;
353 |
354 | return true;
355 | }
356 |
357 | this.stopStream = function() {
358 | if (this.stream !== undefined) {
359 | this.stream.stop();
360 | }
361 | }
362 |
363 | this.getFOV = function() {
364 | return fov;
365 | }
366 |
367 | // fade out videoElement
368 | var fadeVideo = function() {
369 | if (videoElement.style.opacity == "") {
370 | videoElement.style.opacity = 0.98;
371 | window.setTimeout(fadeVideo, 50);
372 | } else if (videoElement.style.opacity > 0.30) {
373 | videoElement.style.opacity -= 0.02;
374 | window.setTimeout(fadeVideo, 50);
375 | } else {
376 | videoElement.style.opacity = 0.3;
377 | }
378 | }
379 | };
380 |
381 | // bind shim
382 | // from https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Function/bind
383 |
384 | if (!Function.prototype.bind) {
385 | Function.prototype.bind = function (oThis) {
386 | if (typeof this !== "function") {
387 | // closest thing possible to the ECMAScript 5 internal IsCallable function
388 | throw new TypeError("Function.prototype.bind - what is trying to be bound is not callable");
389 | }
390 |
391 | var aArgs = Array.prototype.slice.call(arguments, 1),
392 | fToBind = this,
393 | fNOP = function () {},
394 | fBound = function () {
395 | return fToBind.apply(this instanceof fNOP
396 | ? this
397 | : oThis || window,
398 | aArgs.concat(Array.prototype.slice.call(arguments)));
399 | };
400 |
401 | fNOP.prototype = this.prototype;
402 | fBound.prototype = new fNOP();
403 |
404 | return fBound;
405 | };
406 | }
407 |
408 | // video support utility functions
409 |
410 | function supports_video() {
411 | return !!document.createElement('video').canPlayType;
412 | }
413 |
414 | function supports_h264_baseline_video() {
415 | if (!supports_video()) { return false; }
416 | var v = document.createElement("video");
417 | return v.canPlayType('video/mp4; codecs="avc1.42E01E, mp4a.40.2"');
418 | }
419 |
420 | function supports_ogg_theora_video() {
421 | if (!supports_video()) { return false; }
422 | var v = document.createElement("video");
423 | return v.canPlayType('video/ogg; codecs="theora, vorbis"');
424 | }
425 |
426 | function supports_webm_video() {
427 | if (!supports_video()) { return false; }
428 | var v = document.createElement("video");
429 | return v.canPlayType('video/webm; codecs="vp8, vorbis"');
430 | }
--------------------------------------------------------------------------------
/src/smoother.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Smoother for smoothing tracked positions of face
3 | *
4 | * Double Exponential Smoothing-based Prediction
5 | * see: http://www.cs.brown.edu/people/jjl/pubs/kfvsexp_final_laviola.pdf
6 | * "Double Exponential Smoothing: An alternative to Kalman Filter-based Predictive Tracking"
7 | *
8 | * @author auduno / github.com/auduno
9 | * @param {number} a Smoothing parameter, between 0 and 1. 0 is max smoothing, 1 no smoothing.
10 | * @param {number} interval The ms interval between tracking events
11 | * @constructor
12 | */
13 | headtrackr.Smoother = function(alpha, interval) {
14 |
15 | // alpha = 0.35 smoothes ok while not introducing too much lag
16 |
17 | var sp, sp2, sl, newPositions, positions;
18 | var updateTime = new Date();
19 |
20 | this.initialized = false;
21 |
22 | // whether to use linear interpolation for times in intervals
23 | this.interpolate = false;
24 |
25 | this.init = function(initPos) {
26 | this.initialized = true;
27 | sp = [initPos.x, initPos.y, initPos.z, initPos.width, initPos.height];
28 | sp2 = sp;
29 | sl = sp.length;
30 | }
31 |
32 | this.smooth = function(pos) {
33 |
34 | positions = [pos.x, pos.y, pos.z, pos.width, pos.height];
35 |
36 | if (this.initialized) {
37 | // update
38 | for (var i = 0;i < sl;i++) {
39 | sp[i] = alpha*positions[i]+(1-alpha)*sp[i];
40 | sp2[i] = alpha*sp[i]+(1-alpha)*sp2[i];
41 | }
42 |
43 | // set time
44 | updateTime = new Date();
45 |
46 | var msDiff = (new Date())-updateTime;
47 | var newPositions = predict(msDiff);
48 |
49 | pos.x = newPositions[0];
50 | pos.y = newPositions[1];
51 | pos.z = newPositions[2];
52 | pos.width = newPositions[3];
53 | pos.height = newPositions[4];
54 |
55 | return pos;
56 | } else {
57 | return false;
58 | }
59 | }
60 |
61 | function predict(time) {
62 |
63 | var retPos = [];
64 |
65 | if (this.interpolate) {
66 | var step = time/interval;
67 | var stepLo = step >> 0;
68 | var ratio = alpha/(1-alpha);
69 |
70 | var a = (step-stepLo)*ratio;
71 | var b = (2 + stepLo*ratio);
72 | var c = (1 + stepLo*ratio);
73 |
74 | for (var i = 0;i < sl;i++) {
75 | retPos[i] = a*(sp[i]-sp2[i]) + b*sp[i] - c*sp2[i];
76 | }
77 | } else {
78 | var step = time/interval >> 0;
79 | var ratio = (alpha*step)/(1-alpha);
80 | var a = 2 + ratio;
81 | var b = 1 + ratio;
82 | for (var i = 0;i < sl;i++) {
83 | retPos[i] = a*sp[i] - b*sp2[i];
84 | }
85 | }
86 |
87 | return retPos;
88 | }
89 | }
--------------------------------------------------------------------------------
/src/ui.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @author auduno / github.com/auduno
3 | * @constructor
4 | */
5 |
6 | headtrackr.Ui = function() {
7 |
8 | var timeout;
9 |
10 | // create element and attach to body
11 | var d = document.createElement('div'),
12 | d2 = document.createElement('div'),
13 | p = document.createElement('p');
14 | d.setAttribute('id', 'headtrackerMessageDiv');
15 |
16 | d.style.left = "20%";
17 | d.style.right = "20%";
18 | d.style.top = "30%";
19 | d.style.fontSize = "90px";
20 | d.style.color = "#777";
21 | d.style.position = "absolute";
22 | d.style.fontFamily = "Helvetica, Arial, sans-serif";
23 | d.style.zIndex = '100002';
24 |
25 | d2.style.marginLeft = "auto";
26 | d2.style.marginRight = "auto";
27 | d2.style.width = "100%";
28 | d2.style.textAlign = "center";
29 | d2.style.color = "#fff";
30 | d2.style.backgroundColor = "#444";
31 | d2.style.opacity = "0.5";
32 |
33 | p.setAttribute('id', 'headtrackerMessage');
34 | d2.appendChild(p);
35 | d.appendChild(d2);
36 | document.body.appendChild(d);
37 |
38 | var supportMessages = {
39 | "no getUserMedia" : "getUserMedia is not supported in your browser :(",
40 | "no camera" : "no camera found :("
41 | };
42 |
43 | var statusMessages = {
44 | "whitebalance" : "Waiting for camera whitebalancing",
45 | "detecting" : "Please wait while camera is detecting your face...",
46 | "hints" : "We seem to have some problems detecting your face. Please make sure that your face is well and evenly lighted, and that your camera is working.",
47 | "redetecting" : "Lost track of face, trying to detect again..",
48 | "lost" : "Lost track of face :(",
49 | "found" : "Face found! Move your head!"
50 | };
51 |
52 | var override = false;
53 |
54 | // function to call messages (and to fade them out after a time)
55 | document.addEventListener("headtrackrStatus", function(event) {
56 | if (event.status in statusMessages) {
57 | window.clearTimeout(timeout);
58 | if (!override) {
59 | var messagep = document.getElementById('headtrackerMessage');
60 | messagep.innerHTML = statusMessages[event.status];
61 | timeout = window.setTimeout(function() {messagep.innerHTML = ''; }, 3000);
62 | }
63 | } else if (event.status in supportMessages) {
64 | override = true;
65 | window.clearTimeout(timeout);
66 | var messagep = document.getElementById('headtrackerMessage');
67 | messagep.innerHTML = supportMessages[event.status];
68 | window.setTimeout(function() {messagep.innerHTML = 'added fallback video for demo'; }, 2000);
69 | window.setTimeout(function() {messagep.innerHTML = '';override = false;}, 4000);
70 | }
71 | }, true);
72 |
73 | }
--------------------------------------------------------------------------------
/src/whitebalance.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @author auduno / github.com/auduno
3 | */
4 |
5 | headtrackr.getWhitebalance = function(canvas) {
6 |
7 | // returns average gray value in canvas
8 |
9 | var avggray,avgr,avgb,avgg;
10 |
11 | var canvasContext = canvas.getContext('2d');
12 | var image = canvasContext.getImageData(0, 0, canvas.width, canvas.height);
13 | var id = image.data;
14 | var imagesize = image.width * image.height;
15 | var r = g = b = 0;
16 |
17 | for (var i = 0;i < imagesize;i++) {
18 | r += id[4*i];
19 | g += id[(4*i)+1];
20 | b += id[(4*i)+2];
21 | }
22 |
23 | avgr = r/imagesize;
24 | avgg = g/imagesize;
25 | avgb = b/imagesize;
26 | avggray = (avgr+avgg+avgb)/3;
27 |
28 | return avggray;
29 |
30 | }
--------------------------------------------------------------------------------