├── hint.mp3
├── README.md
├── js
├── mouse.js
├── globals.js
├── heat.js
├── main.js
├── ui.js
├── facetracker.js
├── training.js
└── dataset.js
├── LICENSE
├── .gitignore
├── blogcode
├── index.html
└── main.js
├── style.css
├── index.html
└── normalize.css
/hint.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cpury/lookie-lookie/HEAD/hint.mp3
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # lookie-lookie
2 |
3 | This is a demo project to try out TensorFlow.js. It's a website that learns to
4 | track your eye movement inside the browser. No backend neccessary.
5 |
6 | [Demo](https://cpury.github.io/lookie-lookie/) \| [Blog post](https://cpury.github.io/learning-where-you-are-looking-at/)
7 |
8 | ## How to use
9 |
10 | Open index.html in a modern browser like Chrome or Firefox. A tutorial will
11 | guide you through it.
12 |
--------------------------------------------------------------------------------
/js/mouse.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function() {
2 | window.mouse = {
3 | mousePosX: 0.5,
4 | mousePosY: 0.5,
5 |
6 | handleMouseMove: function(event) {
7 | mouse.mousePosX = event.clientX / $('body').width();
8 | mouse.mousePosY = event.clientY / $('body').height();
9 | },
10 |
11 | getMousePos: function() {
12 | return [mouse.mousePosX, mouse.mousePosY];
13 | },
14 | };
15 |
16 | document.onmousemove = mouse.handleMouseMove;
17 | });
18 |
--------------------------------------------------------------------------------
/js/globals.js:
--------------------------------------------------------------------------------
1 | // video support utility functions
2 | window.supports_video = function() {
3 | return !!document.createElement('video').canPlayType;
4 | };
5 |
6 | window.supports_h264_baseline_video = function() {
7 | if (!supports_video()) {
8 | return false;
9 | }
10 | const v = document.createElement('video');
11 | return v.canPlayType('video/mp4; codecs="avc1.42E01E, mp4a.40.2"');
12 | };
13 |
14 | window.supports_webm_video = function() {
15 | if (!supports_video()) {
16 | return false;
17 | }
18 | const v = document.createElement('video');
19 | return v.canPlayType('video/webm; codecs="vp8"');
20 | };
21 |
22 | navigator.getUserMedia =
23 | navigator.getUserMedia ||
24 | navigator.webkitGetUserMedia ||
25 | navigator.mozGetUserMedia ||
26 | navigator.msGetUserMedia;
27 | window.URL = window.URL || window.webkitURL || window.msURL || window.mozURL;
28 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Max
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Created by https://www.gitignore.io/api/osx,linux,windows
3 |
4 | ### Linux ###
5 | *~
6 |
7 | # temporary files which can be created if a process still has a handle open of a deleted file
8 | .fuse_hidden*
9 |
10 | # KDE directory preferences
11 | .directory
12 |
13 | # Linux trash folder which might appear on any partition or disk
14 | .Trash-*
15 |
16 | # .nfs files are created when an open file is removed but is still being accessed
17 | .nfs*
18 |
19 | ### OSX ###
20 | *.DS_Store
21 | .AppleDouble
22 | .LSOverride
23 |
24 | # Icon must end with two \r
25 | Icon
26 |
27 | # Thumbnails
28 | ._*
29 |
30 | # Files that might appear in the root of a volume
31 | .DocumentRevisions-V100
32 | .fseventsd
33 | .Spotlight-V100
34 | .TemporaryItems
35 | .Trashes
36 | .VolumeIcon.icns
37 | .com.apple.timemachine.donotpresent
38 |
39 | # Directories potentially created on remote AFP share
40 | .AppleDB
41 | .AppleDesktop
42 | Network Trash Folder
43 | Temporary Items
44 | .apdisk
45 |
46 | ### Windows ###
47 | # Windows thumbnail cache files
48 | Thumbs.db
49 | ehthumbs.db
50 | ehthumbs_vista.db
51 |
52 | # Folder config file
53 | Desktop.ini
54 |
55 | # Recycle Bin used on file shares
56 | $RECYCLE.BIN/
57 |
58 | # Windows Installer files
59 | *.cab
60 | *.msi
61 | *.msm
62 | *.msp
63 |
64 | # Windows shortcuts
65 | *.lnk
66 |
67 |
68 | # End of https://www.gitignore.io/api/osx,linux,windows
69 | node_modules
70 |
--------------------------------------------------------------------------------
/blogcode/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
13 |
14 |
15 |
22 |
23 | Train!
24 |
33 |
34 |
35 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/js/heat.js:
--------------------------------------------------------------------------------
1 | window.heatmap = {
2 | getHeatColor: function(value, alpha) {
3 | // Adapted from https://stackoverflow.com/a/17268489/1257278
4 | if (typeof alpha == 'undefined') {
5 | alpha = 1.0;
6 | }
7 | const hue = ((1 - value) * 120).toString(10);
8 | return 'hsla(' + hue + ',100%,50%,' + alpha + ')';
9 | },
10 |
11 | fillHeatmap: function(data, model, ctx, width, height, radius) {
12 | // Go through a dataset and fill the context with the corresponding circles.
13 | const predictions = model.predict(data.x).arraySync();
14 |
15 | let trueX, trueY, predX, predY, errorX, errorY, error, pointX, pointY;
16 |
17 | for (let i = 0; i < data.n; i++) {
18 | const dataY = data.y.arraySync();
19 |
20 | trueX = dataY[i][0];
21 | trueY = dataY[i][1];
22 | predX = predictions[i][0];
23 | predY = predictions[i][1];
24 | errorX = Math.pow(predX - trueX, 2);
25 | errorY = Math.pow(predY - trueY, 2);
26 | error = Math.min(Math.sqrt(Math.sqrt(errorX + errorY)), 1);
27 |
28 | pointX = Math.floor((trueX + 0.5) * width);
29 | pointY = Math.floor((trueY + 0.5) * height);
30 |
31 | ctx.beginPath();
32 | ctx.fillStyle = this.getHeatColor(error, 0.5);
33 | ctx.arc(pointX, pointY, radius, 0, 2 * Math.PI);
34 | ctx.fill();
35 | }
36 | },
37 |
38 | drawHeatmap: function(dataset, model) {
39 | $('#draw-heatmap').prop('disabled', true);
40 | $('#draw-heatmap').html('In Progress...');
41 |
42 | const heatmap = $('#heatMap')[0];
43 | const ctx = heatmap.getContext('2d');
44 |
45 | const width = $('body').width();
46 | const height = $('body').height();
47 |
48 | heatmap.width = width;
49 | heatmap.height = height;
50 | ctx.clearRect(0, 0, width, height);
51 |
52 | this.fillHeatmap(dataset.val, model, ctx, width, height, 30);
53 | this.fillHeatmap(dataset.train, model, ctx, width, height, 15);
54 |
55 | $('#clear-heatmap').prop('disabled', false);
56 | $('#draw-heatmap').prop('disabled', false);
57 | $('#draw-heatmap').html('Draw Heatmap');
58 | },
59 |
60 | clearHeatmap: function() {
61 | $('#clear-heatmap').prop('disabled', true);
62 |
63 | const heatmap = $('#heatMap')[0];
64 | const ctx = heatmap.getContext('2d');
65 |
66 | ctx.clearRect(0, 0, heatmap.width, heatmap.height);
67 | $('#clear-heatmap').prop('disabled', false);
68 | },
69 | };
70 |
--------------------------------------------------------------------------------
/js/main.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function() {
2 | const $target = $('#target');
3 | const targetSize = $target.outerWidth();
4 |
5 | function moveTarget() {
6 | // Move the model target to where we predict the user is looking to
7 | if (training.currentModel == null || training.inTraining) {
8 | return;
9 | }
10 |
11 | training.getPrediction().then(prediction => {
12 | const left = prediction[0] * ($('body').width() - targetSize);
13 | const top = prediction[1] * ($('body').height() - targetSize);
14 |
15 | $target.css('left', left + 'px');
16 | $target.css('top', top + 'px');
17 | });
18 | }
19 |
20 | setInterval(moveTarget, 100);
21 |
22 | function download(content, fileName, contentType) {
23 | const a = document.createElement('a');
24 | const file = new Blob([content], {
25 | type: contentType,
26 | });
27 | a.href = URL.createObjectURL(file);
28 | a.download = fileName;
29 | a.click();
30 | }
31 |
32 | // Map functions to keys and buttons:
33 |
34 | $('body').keyup(function(e) {
35 | // On space key:
36 | if (e.keyCode === 32 && ui.readyToCollect) {
37 | dataset.captureExample();
38 |
39 | e.preventDefault();
40 | return false;
41 | }
42 | });
43 |
44 | $('#start-training').click(function(e) {
45 | training.fitModel();
46 | });
47 |
48 | $('#reset-model').click(function(e) {
49 | training.resetModel();
50 | });
51 |
52 | $('#draw-heatmap').click(function(e) {
53 | heatmap.drawHeatmap(dataset, training.currentModel);
54 | });
55 |
56 | $('#clear-heatmap').click(function(e) {
57 | heatmap.clearHeatmap();
58 | });
59 |
60 | $('#store-data').click(function(e) {
61 | const data = dataset.toJSON();
62 | const json = JSON.stringify(data);
63 | download(json, 'dataset.json', 'text/plain');
64 | });
65 |
66 | $('#load-data').click(function(e) {
67 | $('#data-uploader').trigger('click');
68 | });
69 |
70 | $('#data-uploader').change(function(e) {
71 | const file = e.target.files[0];
72 | const reader = new FileReader();
73 |
74 | reader.onload = function() {
75 | const data = reader.result;
76 | const json = JSON.parse(data);
77 | dataset.fromJSON(json);
78 | };
79 |
80 | reader.readAsBinaryString(file);
81 | });
82 |
83 | $('#store-model').click(async function(e) {
84 | await training.currentModel.save('downloads://model');
85 | });
86 |
87 | $('#load-model').click(function(e) {
88 | $('#model-uploader').trigger('click');
89 | });
90 |
91 | $('#model-uploader').change(async function(e) {
92 | const files = e.target.files;
93 | training.currentModel = await tf.loadLayersModel(
94 | tf.io.browserFiles([files[0], files[1]]),
95 | );
96 | ui.onFinishTraining();
97 | });
98 | });
99 |
--------------------------------------------------------------------------------
/style.css:
--------------------------------------------------------------------------------
1 | body, html {
2 | width: 100%;
3 | height: 100%;
4 | overflow: hidden;
5 | font-family: 'Roboto', sans-serif;
6 | color: rgba(0, 0, 0, 0.8);
7 | }
8 |
9 | #webcam, #overlay {
10 | position: absolute;
11 | top: 0;
12 | left: 0;
13 | }
14 |
15 | #eyes {
16 | position: absolute;
17 | top: 0;
18 | left: 400px;
19 | }
20 |
21 | #target {
22 | background-color: lightgreen;
23 | position: absolute;
24 | border-radius: 50%;
25 | height: 40px;
26 | width: 40px;
27 | transition: all 0.1s ease;
28 | box-shadow: 0 0 20px 10px white;
29 | border: 4px solid rgba(0,0,0,0.5);
30 | opacity: 0;
31 | }
32 |
33 | footer {
34 | position: absolute;
35 | bottom: 0;
36 | width: 100%;
37 | text-align: center;
38 | background-color: #05668d;
39 | color: #ebf2fa;
40 | font-size: 16pt;
41 | padding: 0.5em;
42 | }
43 |
44 | a {
45 | color: inherit;
46 | }
47 |
48 | @-webkit-keyframes flash {
49 | from,
50 | 50%,
51 | to {
52 | opacity: 1;
53 | }
54 |
55 | 25%,
56 | 75% {
57 | opacity: 0;
58 | }
59 | }
60 |
61 | @keyframes flash {
62 | from,
63 | 50%,
64 | to {
65 | opacity: 1;
66 | }
67 |
68 | 25%,
69 | 75% {
70 | opacity: 0;
71 | }
72 | }
73 |
74 | #info {
75 | position: absolute;
76 | top: 50%;
77 | left: 50%;
78 | transform: translate(-50%, -50%);
79 | font-size: 24pt;
80 | background-color: lightgreen;
81 | padding: 1em;
82 | border-radius: 10px;
83 | max-width: 500px;
84 | transition: all 1s;
85 | -webkit-animation-duration: 1s;
86 | animation-duration: 1s;
87 | -webkit-animation-fill-mode: both;
88 | animation-fill-mode: both;
89 | }
90 |
91 | #info h3 {
92 | margin: 0.2em 0;
93 | }
94 |
95 | #info.flash {
96 | -webkit-animation-name: flash;
97 | animation-name: flash;
98 | }
99 |
100 | #training {
101 | position: absolute;
102 | top: 0;
103 | right: 0;
104 | width: 400px;
105 | background-color: lightblue;
106 | padding: 0.5em;
107 | }
108 |
109 | #training table {
110 | width: 100%;
111 | font-size: 16pt;
112 | font-family: 'Source Code Pro', monospace;
113 | }
114 |
115 | #training table td:nth-child(2) {
116 | text-align: right;
117 | }
118 |
119 | .buttonwrap {
120 | text-align: center;
121 | }
122 |
123 | button {
124 | position: relative;
125 | z-index: 11;
126 | cursor: pointer;
127 | font-size: 10pt;
128 | padding: 0.125em 0em;
129 | margin: 0.1em;
130 | width: 140px;
131 | font-family: 'Source Code Pro', monospace;
132 | }
133 |
134 | #heatMap {
135 | position: absolute;
136 | top: 0;
137 | left: 0;
138 | height: 100%;
139 | width: 100%;
140 | z-index: 10;
141 | pointer-events: none;
142 | }
143 |
144 | #data-uploader, #model-uploader {
145 | display: none;
146 | }
147 |
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Lookie Lookie!
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
20 |
21 |
22 |
23 |
24 |
25 |
Hi there! 😃
26 | This is Lookie Lookie.
27 | Please enable your webcam.
28 |
29 |
30 |
31 |
32 |
33 | Training examples
34 | 0
35 |
36 |
37 | Validation examples
38 | 0
39 |
40 |
41 | Epochs trained
42 | 0
43 |
44 |
45 | Training loss
46 | ?
47 |
48 |
49 | Validation loss
50 | ?
51 |
52 |
53 |
73 |
74 |
75 |
76 |
77 |
78 |
79 | Created with TensorFlow.js by Max Schumacher.
80 | Source
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
--------------------------------------------------------------------------------
/js/ui.js:
--------------------------------------------------------------------------------
1 | window.ui = {
2 | state: 'loading',
3 | readyToCollect: false,
4 | nExamples: 0,
5 | nTrainings: 0,
6 |
7 | setContent: function(key, value) {
8 | // Set an element's content based on the data-content key.
9 | $('[data-content="' + key + '"]').html(value);
10 | },
11 |
12 | showInfo: function(text, dontFlash) {
13 | // Show info and beep / flash.
14 | this.setContent('info', text);
15 | if (!dontFlash) {
16 | $('#info').addClass('flash');
17 | new Audio('hint.mp3').play();
18 | setTimeout(function() {
19 | $('#info').removeClass('flash');
20 | }, 1000);
21 | }
22 | },
23 |
24 | onWebcamEnabled: function() {
25 | this.state = 'finding face';
26 | this.showInfo("Thanks! Now let's find your face! 🤨", true);
27 | },
28 |
29 | onFoundFace: function() {
30 | if (this.state == 'finding face') {
31 | this.state = 'collecting';
32 | this.readyToCollect = true;
33 | this.showInfo(
34 | "Let's start! 🙂 " +
35 | 'Collect data points by moving your mouse over the screen, following the cursor with your eyes and hitting the space key repeatedly 👀',
36 | true,
37 | );
38 | }
39 | },
40 |
41 | onAddExample: function(nTrain, nVal) {
42 | // Call this when an example is added.
43 | this.nExamples = nTrain + nVal;
44 | this.setContent('n-train', nTrain);
45 | this.setContent('n-val', nVal);
46 | if (nTrain >= 2) {
47 | $('#start-training').prop('disabled', false);
48 | }
49 | if (this.state == 'collecting' && this.nExamples == 5) {
50 | this.showInfo(
51 | 'Keep going! ' +
52 | 'You need to collect at least 20 data points to start seeing results.',
53 | );
54 | }
55 | if (this.state == 'collecting' && this.nExamples == 25) {
56 | this.showInfo(
57 | 'Great job! 👌 ' +
58 | "Now that you have a handful of examples, let's train the neural network! " +
59 | 'Hit the training button in the top right corner!',
60 | );
61 | }
62 | if (this.state == 'trained' && this.nExamples == 50) {
63 | this.showInfo(
64 | 'Fantastic 👏 ' +
65 | "You've collected lots of examples. Let's try training again!",
66 | );
67 | }
68 | if (nTrain > 0 && nVal > 0) {
69 | $('#store-data').prop('disabled', false);
70 | }
71 | },
72 |
73 | onFinishTraining: function() {
74 | // Call this when training is finished.
75 | this.nTrainings += 1;
76 | $('#target').css('opacity', '0.9');
77 | $('#draw-heatmap').prop('disabled', false);
78 | $('#reset-model').prop('disabled', false);
79 | $('#store-model').prop('disabled', false);
80 |
81 | if (this.nTrainings == 1) {
82 | this.state = 'trained';
83 | this.showInfo(
84 | 'Awesome! 😍 ' +
85 | 'The green target should start following your eyes around. ' +
86 | "I guess it's still very bad... 😅 " +
87 | "Let's collect more training data! Keep following the mouse cursor and hitting space.",
88 | );
89 | } else if (this.nTrainings == 2) {
90 | this.state = 'trained_twice';
91 | this.showInfo(
92 | 'Getting better! 🚀 ' +
93 | 'Keep collecting and retraining! ' +
94 | 'You can also draw a heatmap that shows you where your ' +
95 | 'model has its strong and weak points.',
96 | );
97 | } else if (this.nTrainings == 3) {
98 | this.state = 'trained_thrice';
99 | this.showInfo(
100 | 'If your model is overfitting, remember you can reset it anytime 👻',
101 | );
102 | } else if (this.nTrainings == 4) {
103 | this.state = 'trained_thrice';
104 | this.showInfo(
105 | 'Have fun! ' +
106 | 'Check out more of my stuff at cpury.github.io 😄',
107 | );
108 | }
109 | },
110 | };
111 |
--------------------------------------------------------------------------------
/js/facetracker.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function() {
2 | const video = document.getElementById('webcam');
3 | const overlay = document.getElementById('overlay');
4 |
5 | window.facetracker = {
6 | video: video,
7 | videoWidthExternal: video.width,
8 | videoHeightExternal: video.height,
9 | videoWidthInternal: video.videoWidth,
10 | videoHeightInternal: video.videoHeight,
11 | overlay: overlay,
12 | overlayCC: overlay.getContext('2d'),
13 |
14 | trackingStarted: false,
15 | currentPosition: null,
16 | currentEyeRect: null,
17 |
18 | adjustVideoProportions: function() {
19 | // resize overlay and video if proportions of video are not 4:3
20 | // keep same height, just change width
21 | facetracker.videoWidthInternal = video.videoWidth;
22 | facetracker.videoHeightInternal = video.videoHeight;
23 | const proportion =
24 | facetracker.videoWidthInternal / facetracker.videoHeightInternal;
25 | facetracker.videoWidthExternal = Math.round(
26 | facetracker.videoHeightExternal * proportion,
27 | );
28 | facetracker.video.width = facetracker.videoWidthExternal;
29 | facetracker.overlay.width = facetracker.videoWidthExternal;
30 | },
31 |
32 | gumSuccess: function(stream) {
33 | ui.onWebcamEnabled();
34 |
35 | // add camera stream if getUserMedia succeeded
36 | if ('srcObject' in facetracker.video) {
37 | facetracker.video.srcObject = stream;
38 | } else {
39 | facetracker.video.src =
40 | window.URL && window.URL.createObjectURL(stream);
41 | }
42 |
43 | facetracker.video.onloadedmetadata = function() {
44 | facetracker.adjustVideoProportions();
45 | facetracker.video.play();
46 | };
47 |
48 | facetracker.video.onresize = function() {
49 | facetracker.adjustVideoProportions();
50 | if (facetracker.trackingStarted) {
51 | facetracker.ctrack.stop();
52 | facetracker.ctrack.reset();
53 | facetracker.ctrack.start(facetracker.video);
54 | }
55 | };
56 | },
57 |
58 | gumFail: function() {
59 | ui.showInfo(
60 | 'There was some problem trying to fetch video from your webcam 😭',
61 | true,
62 | );
63 | },
64 |
65 | startVideo: function() {
66 | // start video
67 | facetracker.video.play();
68 | // start tracking
69 | facetracker.ctrack.start(facetracker.video);
70 | facetracker.trackingStarted = true;
71 | // start loop to draw face
72 | facetracker.positionLoop();
73 | },
74 |
75 | positionLoop: function() {
76 | // Check if a face is detected, and if so, track it.
77 | requestAnimationFrame(facetracker.positionLoop);
78 | facetracker.currentPosition = facetracker.ctrack.getCurrentPosition();
79 | facetracker.overlayCC.clearRect(
80 | 0,
81 | 0,
82 | facetracker.videoWidthExternal,
83 | facetracker.videoHeightExternal,
84 | );
85 | if (facetracker.currentPosition) {
86 | facetracker.trackFace(facetracker.currentPosition);
87 | facetracker.ctrack.draw(facetracker.overlay);
88 | ui.onFoundFace();
89 | }
90 | },
91 |
92 | getEyesRect: function(position) {
93 | // Given a tracked face, returns a rectangle surrounding the eyes.
94 | const minX = position[19][0] + 3;
95 | const maxX = position[15][0] - 3;
96 | const minY =
97 | Math.min(
98 | position[20][1],
99 | position[21][1],
100 | position[17][1],
101 | position[16][1],
102 | ) + 6;
103 | const maxY =
104 | Math.max(
105 | position[23][1],
106 | position[26][1],
107 | position[31][1],
108 | position[28][1],
109 | ) + 3;
110 |
111 | const width = maxX - minX;
112 | const height = maxY - minY - 5;
113 |
114 | return [minX, minY, width, height * 1.25];
115 | },
116 |
117 | trackFace: function(position) {
118 | // Given a tracked face, crops out the eyes and draws them in the eyes canvas.
119 | const rect = facetracker.getEyesRect(position);
120 | facetracker.currentEyeRect = rect;
121 |
122 | const eyesCanvas = document.getElementById('eyes');
123 | const eyesCtx = eyesCanvas.getContext('2d');
124 |
125 | // Resize because the underlying video might be a different resolution:
126 | const resizeFactorX =
127 | facetracker.videoWidthInternal / facetracker.videoWidthExternal;
128 | const resizeFactorY =
129 | facetracker.videoHeightInternal / facetracker.videoHeightExternal;
130 |
131 | facetracker.overlayCC.strokeStyle = 'red';
132 | facetracker.overlayCC.strokeRect(rect[0], rect[1], rect[2], rect[3]);
133 | eyesCtx.drawImage(
134 | facetracker.video,
135 | rect[0] * resizeFactorX,
136 | rect[1] * resizeFactorY,
137 | rect[2] * resizeFactorX,
138 | rect[3] * resizeFactorY,
139 | 0,
140 | 0,
141 | eyesCanvas.width,
142 | eyesCanvas.height,
143 | );
144 | },
145 | };
146 |
147 | video.addEventListener('canplay', facetracker.startVideo, false);
148 |
149 | // set up video
150 | if (navigator.mediaDevices) {
151 | navigator.mediaDevices
152 | .getUserMedia({
153 | video: true,
154 | })
155 | .then(facetracker.gumSuccess)
156 | .catch(facetracker.gumFail);
157 | } else if (navigator.getUserMedia) {
158 | navigator.getUserMedia(
159 | {
160 | video: true,
161 | },
162 | facetracker.gumSuccess,
163 | facetracker.gumFail,
164 | );
165 | } else {
166 | ui.showInfo(
167 | 'Your browser does not seem to support getUserMedia. 😭 This will probably only work in Chrome or Firefox.',
168 | true,
169 | );
170 | }
171 |
172 | facetracker.ctrack = new clm.tracker();
173 | facetracker.ctrack.init();
174 | });
175 |
--------------------------------------------------------------------------------
/js/training.js:
--------------------------------------------------------------------------------
1 | window.training = {
2 | currentModel: null,
3 | inTraining: false,
4 | epochsTrained: 0,
5 |
6 | createModel: function() {
7 | const inputImage = tf.input({
8 | name: 'image',
9 | shape: [dataset.inputHeight, dataset.inputWidth, 3],
10 | });
11 | const inputMeta = tf.input({
12 | name: 'metaInfos',
13 | shape: [4],
14 | });
15 |
16 | const conv = tf.layers
17 | .conv2d({
18 | kernelSize: 5,
19 | filters: 20,
20 | strides: 1,
21 | activation: 'relu',
22 | kernelInitializer: 'varianceScaling',
23 | })
24 | .apply(inputImage);
25 |
26 | const maxpool = tf.layers
27 | .maxPooling2d({
28 | poolSize: [2, 2],
29 | strides: [2, 2],
30 | })
31 | .apply(conv);
32 |
33 | const flat = tf.layers.flatten().apply(maxpool);
34 |
35 | const dropout = tf.layers.dropout(0.2).apply(flat);
36 |
37 | const concat = tf.layers.concatenate().apply([dropout, inputMeta]);
38 |
39 | const output = tf.layers
40 | .dense({
41 | units: 2,
42 | activation: 'tanh',
43 | kernelInitializer: 'varianceScaling',
44 | })
45 | .apply(concat);
46 |
47 | const model = tf.model({
48 | inputs: [inputImage, inputMeta],
49 | outputs: output,
50 | });
51 |
52 | return model;
53 | },
54 |
55 | fitModel: function() {
56 | // TODO Set params in UI?
57 | this.inTraining = true;
58 | const epochs = 10;
59 |
60 | let batchSize = Math.floor(dataset.train.n * 0.1);
61 | batchSize = Math.max(2, Math.min(batchSize, 64));
62 |
63 | $('#start-training').prop('disabled', true);
64 | $('#start-training').html('In Progress...');
65 |
66 | if (training.currentModel == null) {
67 | training.currentModel = training.createModel();
68 | }
69 |
70 | console.info('Training on', dataset.train.n, 'samples');
71 |
72 | ui.state = 'training';
73 |
74 | let bestEpoch = -1;
75 | let bestTrainLoss = Number.MAX_SAFE_INTEGER;
76 | let bestValLoss = Number.MAX_SAFE_INTEGER;
77 | const bestModelPath = 'localstorage://best-model';
78 |
79 | training.currentModel.compile({
80 | optimizer: tf.train.adam(0.0005),
81 | loss: 'meanSquaredError',
82 | });
83 |
84 | training.currentModel.fit(dataset.train.x, dataset.train.y, {
85 | batchSize: batchSize,
86 | epochs: epochs,
87 | shuffle: true,
88 | validationData: [dataset.val.x, dataset.val.y],
89 | callbacks: {
90 | onEpochEnd: async function(epoch, logs) {
91 | console.info('Epoch', epoch, 'losses:', logs);
92 | training.epochsTrained += 1;
93 | ui.setContent('n-epochs', training.epochsTrained);
94 | ui.setContent('train-loss', logs.loss.toFixed(5));
95 | ui.setContent('val-loss', logs.val_loss.toFixed(5));
96 |
97 | if (logs.val_loss < bestValLoss) {
98 | // Save model
99 | bestEpoch = epoch;
100 | bestTrainLoss = logs.loss;
101 | bestValLoss = logs.val_loss;
102 |
103 | // Store best model:
104 | await training.currentModel.save(bestModelPath);
105 | }
106 |
107 | return await tf.nextFrame();
108 | },
109 | onTrainEnd: async function() {
110 | console.info('Finished training');
111 |
112 | // Load best model:
113 | training.epochsTrained -= epochs - bestEpoch;
114 | console.info('Loading best epoch:', training.epochsTrained);
115 | ui.setContent('n-epochs', training.epochsTrained);
116 | ui.setContent('train-loss', bestTrainLoss.toFixed(5));
117 | ui.setContent('val-loss', bestValLoss.toFixed(5));
118 |
119 | training.currentModel = await tf.loadLayersModel(bestModelPath);
120 |
121 | $('#start-training').prop('disabled', false);
122 | $('#start-training').html('Start Training');
123 | training.inTraining = false;
124 | ui.onFinishTraining();
125 | },
126 | },
127 | });
128 | },
129 |
130 | resetModel: function() {
131 | $('#reset-model').prop('disabled', true);
132 | training.currentModel = null;
133 | training.epochsTrained = 0;
134 | ui.setContent('n-epochs', training.epochsTrained);
135 | ui.setContent('train-loss', '?');
136 | ui.setContent('val-loss', '?');
137 | $('#reset-model').prop('disabled', false);
138 | },
139 |
140 | getPrediction: async function() {
141 | // Return relative x, y where we expect the user to look right now.
142 | const rawImg = dataset.getImage();
143 | const img = await dataset.convertImage(rawImg);
144 | const metaInfos = dataset.getMetaInfos();
145 | const prediction = training.currentModel.predict([img, metaInfos]);
146 | const predictionData = await prediction.data();
147 |
148 | tf.dispose([img, metaInfos, prediction]);
149 |
150 | return [predictionData[0] + 0.5, predictionData[1] + 0.5];
151 | },
152 |
153 | drawSingleFilter: function(weights, filterId, canvas) {
154 | const canvasCtx = canvas.getContext('2d');
155 | const kernelSize = weights.shape[0];
156 | const pixelSize = canvas.width / kernelSize;
157 |
158 | let x, y;
159 | let min = 10000;
160 | let max = -10000;
161 | let value;
162 |
163 | // First, find min and max:
164 | for (x = 0; x < kernelSize; x++) {
165 | for (y = 0; y < kernelSize; y++) {
166 | value = weights.arraySync()[x][y][0][filterId];
167 | if (value < min) min = value;
168 | if (value > max) max = value;
169 | }
170 | }
171 |
172 | for (x = 0; x < kernelSize; x++) {
173 | for (y = 0; y < kernelSize; y++) {
174 | value = weights.arraySync()[x][y][0][filterId];
175 | value = ((value - min) / (max - min)) * 255;
176 |
177 | canvasCtx.fillStyle = 'rgb(' + value + ',' + value + ',' + value + ')';
178 | canvasCtx.fillRect(x * pixelSize, y * pixelSize, pixelSize, pixelSize);
179 | }
180 | }
181 | },
182 |
183 | visualizePixels: function(canvas) {
184 | const model = training.currentModel;
185 | const convLayer = model.layers[1];
186 | const weights = convLayer.weights[0].read();
187 | const bias = convLayer.weights[1].read();
188 | const filterId = 1;
189 |
190 | training.drawSingleFilter(weights, filterId, canvas);
191 | },
192 | };
193 |
--------------------------------------------------------------------------------
/js/dataset.js:
--------------------------------------------------------------------------------
1 | window.dataset = {
2 | inputWidth: $('#eyes').width(),
3 | inputHeight: $('#eyes').height(),
4 | train: {
5 | n: 0,
6 | x: null,
7 | y: null,
8 | },
9 | val: {
10 | n: 0,
11 | x: null,
12 | y: null,
13 | },
14 |
15 | getImage: function() {
16 | // Capture the current image in the eyes canvas as a tensor.
17 | return tf.tidy(function() {
18 | const image = tf.browser.fromPixels(document.getElementById('eyes'));
19 | const batchedImage = image.expandDims(0);
20 | return batchedImage
21 | .toFloat()
22 | .div(tf.scalar(127))
23 | .sub(tf.scalar(1));
24 | });
25 | },
26 |
27 | getMetaInfos: function(mirror) {
28 | // Get some meta info about the rectangle as a tensor:
29 | // - middle x, y of the eye rectangle, relative to video size
30 | // - size of eye rectangle, relative to video size
31 | // - angle of rectangle (TODO)
32 | let x = facetracker.currentEyeRect[0] + facetracker.currentEyeRect[2] / 2;
33 | let y = facetracker.currentEyeRect[1] + facetracker.currentEyeRect[3] / 2;
34 |
35 | x = (x / facetracker.videoWidthExternal) * 2 - 1;
36 | y = (y / facetracker.videoHeightExternal) * 2 - 1;
37 |
38 | const rectWidth =
39 | facetracker.currentEyeRect[2] / facetracker.videoWidthExternal;
40 | const rectHeight =
41 | facetracker.currentEyeRect[3] / facetracker.videoHeightExternal;
42 |
43 | if (mirror) {
44 | x = 1 - x;
45 | y = 1 - y;
46 | }
47 | return tf.tidy(function() {
48 | return tf.tensor1d([x, y, rectWidth, rectHeight]).expandDims(0);
49 | });
50 | },
51 |
52 | whichDataset: function() {
53 | // Returns 'train' or 'val' depending on what makes sense / is random.
54 | if (dataset.train.n == 0) {
55 | return 'train';
56 | }
57 | if (dataset.val.n == 0) {
58 | return 'val';
59 | }
60 | return Math.random() < 0.2 ? 'val' : 'train';
61 | },
62 |
63 | rgbToGrayscale(imageArray, n, x, y) {
64 | // Given an rgb array and positions, returns a grayscale value.
65 | // Inspired by http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0029740
66 | let r = (imageArray[n][x][y][0] + 1) / 2;
67 | let g = (imageArray[n][x][y][1] + 1) / 2;
68 | let b = (imageArray[n][x][y][2] + 1) / 2;
69 |
70 | // Gamma correction:
71 | const exponent = 1 / 2.2;
72 | r = Math.pow(r, exponent);
73 | g = Math.pow(g, exponent);
74 | b = Math.pow(b, exponent);
75 |
76 | // Gleam:
77 | const gleam = (r + g + b) / 3;
78 | return gleam * 2 - 1;
79 | },
80 |
81 | convertImage: async function(image) {
82 | // Convert to grayscale and add spatial info
83 | const imageShape = image.shape;
84 | const imageArray = await image.array();
85 | const w = imageShape[1];
86 | const h = imageShape[2];
87 |
88 | const data = [new Array(w)];
89 | const promises = [];
90 | for (let x = 0; x < w; x++) {
91 | data[0][x] = new Array(h);
92 |
93 | for (let y = 0; y < h; y++) {
94 | const grayValue = dataset.rgbToGrayscale(imageArray, 0, x, y);
95 | data[0][x][y] = [grayValue, (x / w) * 2 - 1, (y / h) * 2 - 1];
96 | }
97 | }
98 |
99 | await Promise.all(promises);
100 |
101 | return tf.tensor(data);
102 | },
103 |
104 | addToDataset: function(image, metaInfos, target, key) {
105 | // Add the given x, y to either 'train' or 'val'.
106 | const set = dataset[key];
107 |
108 | if (set.x == null) {
109 | set.x = [tf.keep(image), tf.keep(metaInfos)];
110 | set.y = tf.keep(target);
111 | } else {
112 | const oldImage = set.x[0];
113 | set.x[0] = tf.keep(oldImage.concat(image, 0));
114 |
115 | const oldEyePos = set.x[1];
116 | set.x[1] = tf.keep(oldEyePos.concat(metaInfos, 0));
117 |
118 | const oldY = set.y;
119 | set.y = tf.keep(oldY.concat(target, 0));
120 |
121 | tf.dispose([oldImage, oldEyePos, oldY, target]);
122 | }
123 |
124 | set.n += 1;
125 | },
126 |
127 | addExample: async function(image, metaInfos, target, dontDispose) {
128 | // Given an image, eye pos and target coordinates, adds them to our dataset.
129 | target[0] = target[0] - 0.5;
130 | target[1] = target[1] - 0.5;
131 | target = tf.keep(
132 | tf.tidy(function() {
133 | return tf.tensor1d(target).expandDims(0);
134 | }),
135 | );
136 | const key = dataset.whichDataset();
137 |
138 | const convertedImage = await dataset.convertImage(image);
139 |
140 | dataset.addToDataset(convertedImage, metaInfos, target, key);
141 |
142 | ui.onAddExample(dataset.train.n, dataset.val.n);
143 |
144 | if (!dontDispose) {
145 | tf.dispose(image, metaInfos);
146 | }
147 | },
148 |
149 | captureExample: function() {
150 | // Take the latest image from the eyes canvas and add it to our dataset.
151 | // Takes the coordinates of the mouse.
152 | tf.tidy(function() {
153 | const img = dataset.getImage();
154 | const mousePos = mouse.getMousePos();
155 | const metaInfos = tf.keep(dataset.getMetaInfos());
156 | dataset.addExample(img, metaInfos, mousePos);
157 | });
158 | },
159 |
160 | toJSON: function() {
161 | const tensorToArray = function(t) {
162 | const typedArray = t.dataSync();
163 | return Array.prototype.slice.call(typedArray);
164 | };
165 |
166 | return {
167 | inputWidth: dataset.inputWidth,
168 | inputHeight: dataset.inputHeight,
169 | train: {
170 | shapes: {
171 | x0: dataset.train.x[0].shape,
172 | x1: dataset.train.x[1].shape,
173 | y: dataset.train.y.shape,
174 | },
175 | n: dataset.train.n,
176 | x: dataset.train.x && [
177 | tensorToArray(dataset.train.x[0]),
178 | tensorToArray(dataset.train.x[1]),
179 | ],
180 | y: tensorToArray(dataset.train.y),
181 | },
182 | val: {
183 | shapes: {
184 | x0: dataset.val.x[0].shape,
185 | x1: dataset.val.x[1].shape,
186 | y: dataset.val.y.shape,
187 | },
188 | n: dataset.val.n,
189 | x: dataset.val.x && [
190 | tensorToArray(dataset.val.x[0]),
191 | tensorToArray(dataset.val.x[1]),
192 | ],
193 | y: tensorToArray(dataset.val.y),
194 | },
195 | };
196 | },
197 |
198 | fromJSON: function(data) {
199 | dataset.inputWidth = data.inputWidth;
200 | dataset.inputHeight = data.inputHeight;
201 | dataset.train.n = data.train.n;
202 | dataset.train.x = data.train.x && [
203 | tf.tensor(data.train.x[0], data.train.shapes.x0),
204 | tf.tensor(data.train.x[1], data.train.shapes.x1),
205 | ];
206 | dataset.train.y = tf.tensor(data.train.y, data.train.shapes.y);
207 | dataset.val.n = data.val.n;
208 | dataset.val.x = data.val.x && [
209 | tf.tensor(data.val.x[0], data.val.shapes.x0),
210 | tf.tensor(data.val.x[1], data.val.shapes.x1),
211 | ];
212 | dataset.val.y = tf.tensor(data.val.y, data.val.shapes.y);
213 |
214 | ui.onAddExample(dataset.train.n, dataset.val.n);
215 | },
216 | };
217 |
--------------------------------------------------------------------------------
/blogcode/main.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function() {
2 | const video = $('#webcam')[0];
3 | const overlay = $('#overlay')[0];
4 | const overlayCC = overlay.getContext('2d');
5 | const ctrack = new clm.tracker();
6 | ctrack.init();
7 |
8 | function getEyesRectangle(positions) {
9 | const minX = positions[23][0] - 5;
10 | const maxX = positions[28][0] + 5;
11 | const minY = positions[24][1] - 5;
12 | const maxY = positions[26][1] + 5;
13 |
14 | const width = maxX - minX;
15 | const height = maxY - minY;
16 |
17 | return [minX, minY, width, height];
18 | }
19 |
20 | function trackingLoop() {
21 | // Check if a face is detected, and if so, track it.
22 | requestAnimationFrame(trackingLoop);
23 | let currentPosition = ctrack.getCurrentPosition();
24 |
25 | overlayCC.clearRect(0, 0, 400, 300);
26 | if (currentPosition) {
27 | // Draw facial mask on overlay canvas:
28 | ctrack.draw(overlay);
29 |
30 | // Get the eyes rectangle and draw it in red:
31 | const eyesRect = getEyesRectangle(currentPosition);
32 | overlayCC.strokeStyle = 'red';
33 | overlayCC.strokeRect(eyesRect[0], eyesRect[1], eyesRect[2], eyesRect[3]);
34 |
35 | // The video might internally have a different size, so we need these
36 | // factors to rescale the eyes rectangle before cropping:
37 | const resizeFactorX = video.videoWidth / video.width;
38 | const resizeFactorY = video.videoHeight / video.height;
39 |
40 | // Crop the eyes from the video and paste them in the eyes canvas:
41 | const eyesCanvas = $('#eyes')[0];
42 | const eyesCC = eyesCanvas.getContext('2d');
43 |
44 | eyesCC.drawImage(
45 | video,
46 | eyesRect[0] * resizeFactorX,
47 | eyesRect[1] * resizeFactorY,
48 | eyesRect[2] * resizeFactorX,
49 | eyesRect[3] * resizeFactorY,
50 | 0,
51 | 0,
52 | eyesCanvas.width,
53 | eyesCanvas.height,
54 | );
55 | }
56 | }
57 |
58 | function onStreaming(stream) {
59 | video.srcObject = stream;
60 | ctrack.start(video);
61 | trackingLoop();
62 | }
63 |
64 | navigator.mediaDevices
65 | .getUserMedia({
66 | video: true,
67 | })
68 | .then(onStreaming);
69 |
70 | // Track mouse movement:
71 | const mouse = {
72 | x: 0,
73 | y: 0,
74 |
75 | handleMouseMove: function(event) {
76 | // Get the mouse position and normalize it to [-1, 1]
77 | mouse.x = (event.clientX / $(window).width()) * 2 - 1;
78 | mouse.y = (event.clientY / $(window).height()) * 2 - 1;
79 | },
80 | };
81 |
82 | document.onmousemove = mouse.handleMouseMove;
83 |
84 | function getImage() {
85 | // Capture the current image in the eyes canvas as a tensor.
86 | return tf.tidy(function() {
87 | const image = tf.browser.fromPixels($('#eyes')[0]);
88 | // Add a batch dimension:
89 | const batchedImage = image.expandDims(0);
90 | // Normalize and return it:
91 | return batchedImage
92 | .toFloat()
93 | .div(tf.scalar(127))
94 | .sub(tf.scalar(1));
95 | });
96 | }
97 |
98 | const dataset = {
99 | train: {
100 | n: 0,
101 | x: null,
102 | y: null,
103 | },
104 | val: {
105 | n: 0,
106 | x: null,
107 | y: null,
108 | },
109 | };
110 |
111 | function captureExample() {
112 | // Take the latest image from the eyes canvas and add it to our dataset.
113 | tf.tidy(function() {
114 | const image = getImage();
115 | const mousePos = tf.tensor1d([mouse.x, mouse.y]).expandDims(0);
116 |
117 | // Choose whether to add it to training (80%) or validation (20%) set:
118 | const subset = dataset[Math.random() > 0.2 ? 'train' : 'val'];
119 |
120 | if (subset.x == null) {
121 | // Create new tensors
122 | subset.x = tf.keep(image);
123 | subset.y = tf.keep(mousePos);
124 | } else {
125 | // Concatenate it to existing tensor
126 | const oldX = subset.x;
127 | const oldY = subset.y;
128 |
129 | subset.x = tf.keep(oldX.concat(image, 0));
130 | subset.y = tf.keep(oldY.concat(mousePos, 0));
131 | }
132 |
133 | // Increase counter
134 | subset.n += 1;
135 | });
136 | }
137 |
138 | $('body').keyup(function(event) {
139 | // On space key:
140 | if (event.keyCode == 32) {
141 | captureExample();
142 |
143 | event.preventDefault();
144 | return false;
145 | }
146 | });
147 |
148 | let currentModel;
149 |
150 | function createModel() {
151 | const model = tf.sequential();
152 |
153 | model.add(
154 | tf.layers.conv2d({
155 | kernelSize: 5,
156 | filters: 20,
157 | strides: 1,
158 | activation: 'relu',
159 | inputShape: [$('#eyes').height(), $('#eyes').width(), 3],
160 | }),
161 | );
162 |
163 | model.add(
164 | tf.layers.maxPooling2d({
165 | poolSize: [2, 2],
166 | strides: [2, 2],
167 | }),
168 | );
169 |
170 | model.add(tf.layers.flatten());
171 |
172 | model.add(tf.layers.dropout(0.2));
173 |
174 | // Two output values x and y
175 | model.add(
176 | tf.layers.dense({
177 | units: 2,
178 | activation: 'tanh',
179 | }),
180 | );
181 |
182 | // Use ADAM optimizer with learning rate of 0.0005 and MSE loss
183 | model.compile({
184 | optimizer: tf.train.adam(0.0005),
185 | loss: 'meanSquaredError',
186 | });
187 |
188 | return model;
189 | }
190 |
191 | function fitModel() {
192 | let batchSize = Math.floor(dataset.train.n * 0.1);
193 | if (batchSize < 4) {
194 | batchSize = 4;
195 | } else if (batchSize > 64) {
196 | batchSize = 64;
197 | }
198 |
199 | if (currentModel == null) {
200 | currentModel = createModel();
201 | }
202 |
203 | currentModel.fit(dataset.train.x, dataset.train.y, {
204 | batchSize: batchSize,
205 | epochs: 20,
206 | shuffle: true,
207 | validationData: [dataset.val.x, dataset.val.y],
208 | });
209 | }
210 |
211 | $('#train').click(function() {
212 | fitModel();
213 | });
214 |
215 | function moveTarget() {
216 | if (currentModel == null) {
217 | return;
218 | }
219 | tf.tidy(function() {
220 | const image = getImage();
221 | const prediction = currentModel.predict(image);
222 |
223 | // Convert normalized position back to screen position:
224 | const targetWidth = $('#target').outerWidth();
225 | const targetHeight = $('#target').outerHeight();
226 |
227 | // It's okay to run this async, since we don't have to wait for it.
228 | prediction.data().then(prediction => {
229 | const x = ((prediction[0] + 1) / 2) * ($(window).width() - targetWidth);
230 | const y =
231 | ((prediction[1] + 1) / 2) * ($(window).height() - targetHeight);
232 |
233 | // Move target there:
234 | const $target = $('#target');
235 | $target.css('left', x + 'px');
236 | $target.css('top', y + 'px');
237 | });
238 | });
239 | }
240 |
241 | setInterval(moveTarget, 100);
242 | });
243 |
--------------------------------------------------------------------------------
/normalize.css:
--------------------------------------------------------------------------------
1 | /*! normalize.css v8.0.0 | MIT License | github.com/necolas/normalize.css */
2 |
3 | /* Document
4 | ========================================================================== */
5 |
6 | /**
7 | * 1. Correct the line height in all browsers.
8 | * 2. Prevent adjustments of font size after orientation changes in iOS.
9 | */
10 |
11 | html {
12 | line-height: 1.15; /* 1 */
13 | -webkit-text-size-adjust: 100%; /* 2 */
14 | }
15 |
16 | /* Sections
17 | ========================================================================== */
18 |
19 | /**
20 | * Remove the margin in all browsers.
21 | */
22 |
23 | body {
24 | margin: 0;
25 | }
26 |
27 | /**
28 | * Correct the font size and margin on `h1` elements within `section` and
29 | * `article` contexts in Chrome, Firefox, and Safari.
30 | */
31 |
32 | h1 {
33 | font-size: 2em;
34 | margin: 0.67em 0;
35 | }
36 |
37 | /* Grouping content
38 | ========================================================================== */
39 |
40 | /**
41 | * 1. Add the correct box sizing in Firefox.
42 | * 2. Show the overflow in Edge and IE.
43 | */
44 |
45 | hr {
46 | box-sizing: content-box; /* 1 */
47 | height: 0; /* 1 */
48 | overflow: visible; /* 2 */
49 | }
50 |
51 | /**
52 | * 1. Correct the inheritance and scaling of font size in all browsers.
53 | * 2. Correct the odd `em` font sizing in all browsers.
54 | */
55 |
56 | pre {
57 | font-family: monospace, monospace; /* 1 */
58 | font-size: 1em; /* 2 */
59 | }
60 |
61 | /* Text-level semantics
62 | ========================================================================== */
63 |
64 | /**
65 | * Remove the gray background on active links in IE 10.
66 | */
67 |
68 | a {
69 | background-color: transparent;
70 | }
71 |
72 | /**
73 | * 1. Remove the bottom border in Chrome 57-
74 | * 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.
75 | */
76 |
77 | abbr[title] {
78 | border-bottom: none; /* 1 */
79 | text-decoration: underline; /* 2 */
80 | text-decoration: underline dotted; /* 2 */
81 | }
82 |
83 | /**
84 | * Add the correct font weight in Chrome, Edge, and Safari.
85 | */
86 |
87 | b,
88 | strong {
89 | font-weight: bolder;
90 | }
91 |
92 | /**
93 | * 1. Correct the inheritance and scaling of font size in all browsers.
94 | * 2. Correct the odd `em` font sizing in all browsers.
95 | */
96 |
97 | code,
98 | kbd,
99 | samp {
100 | font-family: monospace, monospace; /* 1 */
101 | font-size: 1em; /* 2 */
102 | }
103 |
104 | /**
105 | * Add the correct font size in all browsers.
106 | */
107 |
108 | small {
109 | font-size: 80%;
110 | }
111 |
112 | /**
113 | * Prevent `sub` and `sup` elements from affecting the line height in
114 | * all browsers.
115 | */
116 |
117 | sub,
118 | sup {
119 | font-size: 75%;
120 | line-height: 0;
121 | position: relative;
122 | vertical-align: baseline;
123 | }
124 |
125 | sub {
126 | bottom: -0.25em;
127 | }
128 |
129 | sup {
130 | top: -0.5em;
131 | }
132 |
133 | /* Embedded content
134 | ========================================================================== */
135 |
136 | /**
137 | * Remove the border on images inside links in IE 10.
138 | */
139 |
140 | img {
141 | border-style: none;
142 | }
143 |
144 | /* Forms
145 | ========================================================================== */
146 |
147 | /**
148 | * 1. Change the font styles in all browsers.
149 | * 2. Remove the margin in Firefox and Safari.
150 | */
151 |
152 | button,
153 | input,
154 | optgroup,
155 | select,
156 | textarea {
157 | font-family: inherit; /* 1 */
158 | font-size: 100%; /* 1 */
159 | line-height: 1.15; /* 1 */
160 | margin: 0; /* 2 */
161 | }
162 |
163 | /**
164 | * Show the overflow in IE.
165 | * 1. Show the overflow in Edge.
166 | */
167 |
168 | button,
169 | input { /* 1 */
170 | overflow: visible;
171 | }
172 |
173 | /**
174 | * Remove the inheritance of text transform in Edge, Firefox, and IE.
175 | * 1. Remove the inheritance of text transform in Firefox.
176 | */
177 |
178 | button,
179 | select { /* 1 */
180 | text-transform: none;
181 | }
182 |
183 | /**
184 | * Correct the inability to style clickable types in iOS and Safari.
185 | */
186 |
187 | button,
188 | [type="button"],
189 | [type="reset"],
190 | [type="submit"] {
191 | -webkit-appearance: button;
192 | }
193 |
194 | /**
195 | * Remove the inner border and padding in Firefox.
196 | */
197 |
198 | button::-moz-focus-inner,
199 | [type="button"]::-moz-focus-inner,
200 | [type="reset"]::-moz-focus-inner,
201 | [type="submit"]::-moz-focus-inner {
202 | border-style: none;
203 | padding: 0;
204 | }
205 |
206 | /**
207 | * Restore the focus styles unset by the previous rule.
208 | */
209 |
210 | button:-moz-focusring,
211 | [type="button"]:-moz-focusring,
212 | [type="reset"]:-moz-focusring,
213 | [type="submit"]:-moz-focusring {
214 | outline: 1px dotted ButtonText;
215 | }
216 |
217 | /**
218 | * Correct the padding in Firefox.
219 | */
220 |
221 | fieldset {
222 | padding: 0.35em 0.75em 0.625em;
223 | }
224 |
225 | /**
226 | * 1. Correct the text wrapping in Edge and IE.
227 | * 2. Correct the color inheritance from `fieldset` elements in IE.
228 | * 3. Remove the padding so developers are not caught out when they zero out
229 | * `fieldset` elements in all browsers.
230 | */
231 |
232 | legend {
233 | box-sizing: border-box; /* 1 */
234 | color: inherit; /* 2 */
235 | display: table; /* 1 */
236 | max-width: 100%; /* 1 */
237 | padding: 0; /* 3 */
238 | white-space: normal; /* 1 */
239 | }
240 |
241 | /**
242 | * Add the correct vertical alignment in Chrome, Firefox, and Opera.
243 | */
244 |
245 | progress {
246 | vertical-align: baseline;
247 | }
248 |
249 | /**
250 | * Remove the default vertical scrollbar in IE 10+.
251 | */
252 |
253 | textarea {
254 | overflow: auto;
255 | }
256 |
257 | /**
258 | * 1. Add the correct box sizing in IE 10.
259 | * 2. Remove the padding in IE 10.
260 | */
261 |
262 | [type="checkbox"],
263 | [type="radio"] {
264 | box-sizing: border-box; /* 1 */
265 | padding: 0; /* 2 */
266 | }
267 |
268 | /**
269 | * Correct the cursor style of increment and decrement buttons in Chrome.
270 | */
271 |
272 | [type="number"]::-webkit-inner-spin-button,
273 | [type="number"]::-webkit-outer-spin-button {
274 | height: auto;
275 | }
276 |
277 | /**
278 | * 1. Correct the odd appearance in Chrome and Safari.
279 | * 2. Correct the outline style in Safari.
280 | */
281 |
282 | [type="search"] {
283 | -webkit-appearance: textfield; /* 1 */
284 | outline-offset: -2px; /* 2 */
285 | }
286 |
287 | /**
288 | * Remove the inner padding in Chrome and Safari on macOS.
289 | */
290 |
291 | [type="search"]::-webkit-search-decoration {
292 | -webkit-appearance: none;
293 | }
294 |
295 | /**
296 | * 1. Correct the inability to style clickable types in iOS and Safari.
297 | * 2. Change font properties to `inherit` in Safari.
298 | */
299 |
300 | ::-webkit-file-upload-button {
301 | -webkit-appearance: button; /* 1 */
302 | font: inherit; /* 2 */
303 | }
304 |
305 | /* Interactive
306 | ========================================================================== */
307 |
308 | /*
309 | * Add the correct display in Edge, IE 10+, and Firefox.
310 | */
311 |
312 | details {
313 | display: block;
314 | }
315 |
316 | /*
317 | * Add the correct display in all browsers.
318 | */
319 |
320 | summary {
321 | display: list-item;
322 | }
323 |
324 | /* Misc
325 | ========================================================================== */
326 |
327 | /**
328 | * Add the correct display in IE 10+.
329 | */
330 |
331 | template {
332 | display: none;
333 | }
334 |
335 | /**
336 | * Add the correct display in IE 10.
337 | */
338 |
339 | [hidden] {
340 | display: none;
341 | }
342 |
--------------------------------------------------------------------------------