├── AverageBrightness ├── index.html ├── sketch.js └── style.css ├── BackgroundSubtraction ├── index.html ├── sketch.js └── style.css ├── BrightestPoint ├── index.html ├── sketch.js └── style.css ├── ColorDetection ├── index.html ├── sketch.js └── style.css ├── ContourDetection-opencvjs ├── cv.data ├── cv.js ├── index.html ├── sketch.js └── style.css ├── ContourDetection-trackingjs ├── index.html ├── sketch.js └── style.css ├── EdgeDetection ├── index.html ├── sketch.js └── style.css ├── Empty ├── index.html ├── sketch.js └── style.css ├── FaceDetection ├── eniac.jpg ├── index.html ├── sketch.js └── style.css ├── FaceTracking ├── index.html ├── sketch.js └── style.css ├── FrameDifference ├── index.html ├── sketch.js └── style.css ├── MarkerTracking ├── index.html ├── markers.png ├── sketch.js └── style.css ├── MotionHistory ├── index.html ├── sketch.js └── style.css ├── ObjectRecognition ├── clarifai.js ├── index.html ├── sketch.js └── style.css ├── OpticalFlow ├── Graph.js ├── flow.js ├── index.html ├── sketch.js └── style.css ├── PointTracking ├── index.html ├── sketch.js └── style.css ├── Thresholding ├── index.html ├── sketch.js └── style.css ├── index.html ├── license.md └── readme.md /AverageBrightness/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
4 | 5 |Get the average brightness from the video input.
14 |15 | 16 | 17 |
18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /AverageBrightness/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | var capture; 4 | var w = 640; 5 | var h = 480; 6 | 7 | function setup() { 8 | capture = createCapture({ 9 | audio: false, 10 | video: { 11 | width: w, 12 | height: h 13 | } 14 | }, function() { 15 | console.log('capture ready.') 16 | }); 17 | capture.elt.setAttribute('playsinline', ''); 18 | capture.size(w, h); 19 | createCanvas(w, h); 20 | capture.hide(); 21 | } 22 | 23 | // [r g b a] r g b a r g b a ... 24 | function draw() { 25 | image(capture, 0, 0, w, h); 26 | capture.loadPixels(); 27 | if (capture.pixels.length > 0) { // don't forget this! 28 | var total = 0; 29 | var i = 0; 30 | for (var y = 0; y < h; y++) { 31 | for (var x = 0; x < w; x++) { 32 | var redValue = capture.pixels[i]; 33 | total += redValue; 34 | i += 4; 35 | } 36 | } 37 | var n = w * h; 38 | var avg = int(total / n); 39 | select('#average-value').elt.innerText = avg; 40 | select('#average-color').elt.style.backgroundColor = 'rgb(' + avg + ',' + avg + ',' + avg + ')'; 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /AverageBrightness/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } 4 | 5 | .icon { 6 | display: inline-block; 7 | width: 1em; 8 | height: 1em; 9 | } -------------------------------------------------------------------------------- /BackgroundSubtraction/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Detect presence in video. Step outside of the video, press resetBackground(), and step back in. Try different thresholding types and levels.
14 | 15 |16 | Mode: 17 | No Threshold 18 | Brightness (RGB) 19 | Brightness (B&W) 20 |
21 |Threshold:
22 |Presence: 0
23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /BackgroundSubtraction/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | var capture; 4 | var w = 640; 5 | var h = 480; 6 | 7 | function setup() { 8 | capture = createCapture({ 9 | audio: false, 10 | video: { 11 | width: w, 12 | height: h 13 | } 14 | }, function() { 15 | console.log('capture ready.') 16 | }); 17 | capture.elt.setAttribute('playsinline', ''); 18 | capture.size(w, h); 19 | createCanvas(w, h); 20 | capture.hide(); 21 | } 22 | 23 | var backgroundPixels; 24 | 25 | function resetBackground() { 26 | backgroundPixels = undefined; 27 | } 28 | 29 | function getRadioValue(name) { 30 | var inputs = selectAll('input'); 31 | for (var i = 0; i < inputs.length; i++) { 32 | var x = inputs[i]; 33 | if (name == x.elt.name && x.elt.checked) { 34 | return x.elt.value; 35 | } 36 | } 37 | } 38 | 39 | function copyImage(src, dst) { 40 | var n = src.length; 41 | if (!dst || dst.length != n) { 42 | dst = new src.constructor(n); 43 | } 44 | while (n--) { 45 | dst[n] = src[n]; 46 | } 47 | return dst; 48 | } 49 | 50 | function draw() { 51 | capture.loadPixels(); 52 | if (capture.pixels.length > 0) { // don't forget this! 53 | if (!backgroundPixels) { 54 | backgroundPixels = copyImage(capture.pixels, backgroundPixels); 55 | } 56 | var i = 0; 57 | var pixels = capture.pixels; 58 | var thresholdAmount = select('#thresholdAmount').value() * 255. / 100.; 59 | var thresholdType = getRadioValue('thresholdType'); 60 | if (thresholdType === 'rgb') { 61 | for (var y = 0; y < h; y++) { 62 | for (var x = 0; x < w; x++) { 63 | pixels[i] = pixels[i] - backgroundPixels[i] > thresholdAmount ? 255 : 0; 64 | i++; 65 | pixels[i] = pixels[i] - backgroundPixels[i] > thresholdAmount ? 255 : 0; 66 | i++; 67 | pixels[i] = pixels[i] - backgroundPixels[i] > thresholdAmount ? 255 : 0; 68 | i++; 69 | i++; // skip alpha 70 | } 71 | } 72 | select('#presence').elt.innerText = 'Not applicable'; 73 | } else if (thresholdType === 'bw') { 74 | var total = 0; 75 | for (var y = 0; y < h; y++) { 76 | for (var x = 0; x < w; x++) { 77 | // another common type of background thresholding uses absolute difference, like this: 78 | // var total = Math.abs(pixels[i+0] - backgroundPixels[i+0] > thresholdAmount) || ... 79 | var rdiff = Math.abs(pixels[i + 0] - backgroundPixels[i + 0]) > thresholdAmount; 80 | var gdiff = Math.abs(pixels[i + 1] - backgroundPixels[i + 1]) > thresholdAmount; 81 | var bdiff = Math.abs(pixels[i + 2] - backgroundPixels[i + 2]) > thresholdAmount; 82 | var anydiff = rdiff || gdiff || bdiff; 83 | var output = 0; 84 | if (anydiff) { 85 | output = 255; 86 | total++; 87 | } 88 | pixels[i++] = output; 89 | pixels[i++] = output; 90 | pixels[i++] = output; 91 | i++; // skip alpha 92 | } 93 | } 94 | var n = w * h; 95 | var ratio = total / n; 96 | select('#presence').elt.innerText = int(100 * ratio) + '%'; 97 | } else { 98 | for (var y = 0; y < h; y++) { 99 | for (var x = 0; x < w; x++) { 100 | pixels[i] = pixels[i] - backgroundPixels[i]; 101 | i++; 102 | pixels[i] = pixels[i] - backgroundPixels[i]; 103 | i++; 104 | pixels[i] = pixels[i] - backgroundPixels[i]; 105 | i++; 106 | i++; // skip alpha 107 | } 108 | } 109 | select('#presence').elt.innerText = 'Not applicable'; 110 | } 111 | } 112 | capture.updatePixels(); 113 | 114 | image(capture, 0, 0, 640, 480); 115 | } 116 | -------------------------------------------------------------------------------- /BackgroundSubtraction/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /BrightestPoint/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Tracking the brightest point in video and draw trails. Try changing the smoothing amount.
14 | 15 |Smoothing Amount:
16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /BrightestPoint/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | var capture; 4 | var w = 640; 5 | var h = 480; 6 | 7 | function setup() { 8 | capture = createCapture({ 9 | audio: false, 10 | video: { 11 | width: w, 12 | height: h 13 | } 14 | }, function() { 15 | console.log('capture ready.') 16 | }); 17 | capture.elt.setAttribute('playsinline', ''); 18 | capture.size(w, h); 19 | createCanvas(w, h); 20 | capture.hide(); 21 | } 22 | 23 | function findBrightest(video) { 24 | var brightestValue = 0; 25 | var brightestPosition = createVector(0, 0); 26 | var pixels = video.pixels; 27 | var i = 0; 28 | for (var y = 0; y < h; y++) { 29 | for (var x = 0; x < w; x++) { 30 | var r = pixels[i++]; 31 | var g = pixels[i++]; 32 | var b = pixels[i++]; 33 | i++; // ignore a 34 | var brightness = r + g + b; 35 | if (brightness > brightestValue) { 36 | brightestValue = brightness; 37 | brightestPosition.set(x, y); 38 | } 39 | } 40 | } 41 | return brightestPosition; 42 | } 43 | 44 | var lastPoint; 45 | function smoothPoint(point, amt) { 46 | if (!lastPoint) { 47 | lastPoint = point; 48 | } else { 49 | lastPoint.lerp(point, 1 - amt); 50 | } 51 | return lastPoint.copy(); 52 | } 53 | 54 | var trailPointsLength = 100; 55 | var trailPoints = []; 56 | function drawTrail(nextPoint) { 57 | trailPoints.push(nextPoint); 58 | if (trailPoints.length > trailPointsLength) { 59 | trailPoints.shift(); 60 | } 61 | beginShape(); 62 | trailPoints.forEach(function (point) { 63 | vertex(point.x, point.y); 64 | }) 65 | endShape(); 66 | } 67 | 68 | function clearTrail() { 69 | trailPoints = []; 70 | } 71 | 72 | var anotherLastPoint; 73 | function draw() { 74 | // this acts as a background() or clear() 75 | image(capture, 0, 0, 640, 480); 76 | 77 | capture.loadPixels(); 78 | if (capture.pixels.length > 0) { // don't forget this! 79 | var brightest = findBrightest(capture); 80 | 81 | // first step to try: uncomment the line below to enable smoothing 82 | var smoothingAmount = select("#smoothingAmount").value() / 100.0; 83 | // brightest = smoothPoint(brightest, smoothingAmount); 84 | 85 | // next step to try: ignore points that are too far from current point 86 | if (anotherLastPoint) { 87 | var dist = anotherLastPoint.dist(brightest); 88 | if (dist > 30) { 89 | // brightest = anotherLastPoint; 90 | } 91 | } 92 | 93 | var radius = 8; 94 | noStroke(); 95 | fill(255, 0, 0); 96 | ellipse(brightest.x, brightest.y, radius, radius); 97 | 98 | noFill(); 99 | strokeWeight(4); 100 | stroke(255, 0, 0); 101 | drawTrail(brightest); 102 | 103 | anotherLastPoint = brightest.copy(); 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /BrightestPoint/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /ColorDetection/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Match selected colors in video. Click on the video to select a color.
14 |Threshold:
15 |Percentage matching pixels: 0%
16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /ColorDetection/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | var capture; 4 | var w = 640; 5 | var h = 480; 6 | 7 | function setup() { 8 | capture = createCapture({ 9 | audio: false, 10 | video: { 11 | width: w, 12 | height: h 13 | } 14 | }, function() { 15 | console.log('capture ready.') 16 | }); 17 | capture.elt.setAttribute('playsinline', ''); 18 | capture.size(w, h); 19 | createCanvas(w, h); 20 | capture.hide(); 21 | } 22 | 23 | var trailPointsLength = 100; 24 | var trailPoints = []; 25 | 26 | function drawTrail(nextPoint) { 27 | trailPoints.push(nextPoint); 28 | if (trailPoints.length > trailPointsLength) { 29 | trailPoints.shift(); 30 | } 31 | beginShape(); 32 | trailPoints.forEach(function (point) { 33 | vertex(point.x, point.y); 34 | }) 35 | endShape(); 36 | } 37 | 38 | var targetColor = [255, 255, 255]; 39 | function draw() { 40 | capture.loadPixels(); 41 | var sampling = false; 42 | var sumPosition = createVector(0, 0); 43 | if (capture.pixels.length > 0) { // don't forget this! 44 | 45 | if (mouseIsPressed && 46 | mouseX > 0 && mouseX < width && 47 | mouseY > 0 && mouseY < height) { 48 | targetColor = capture.get(mouseX, mouseY); 49 | sampling = true; 50 | } 51 | 52 | var w = capture.width, 53 | h = capture.height; 54 | var i = 0; 55 | var pixels = capture.pixels; 56 | var thresholdAmount = select('#thresholdAmount').value(); 57 | thresholdAmount /= 100.; // this is the slider range 58 | thresholdAmount *= 255 * 3; // this is the maximum value 59 | var total = 0; 60 | for (var y = 0; y < h; y++) { 61 | for (var x = 0; x < w; x++) { 62 | var diff = 63 | Math.abs(pixels[i + 0] - targetColor[0]) + 64 | Math.abs(pixels[i + 1] - targetColor[1]) + 65 | Math.abs(pixels[i + 2] - targetColor[2]); 66 | var outputValue = 0; 67 | if (diff < thresholdAmount) { 68 | outputValue = 255; 69 | sumPosition.x += x; 70 | sumPosition.y += y; 71 | total++; 72 | } 73 | pixels[i++] = outputValue; // set red 74 | pixels[i++] = outputValue; // set green 75 | pixels[i++] = outputValue; // set blue 76 | i++; // skip alpha 77 | } 78 | } 79 | 80 | sumPosition.div(total); 81 | 82 | var n = w * h; 83 | var ratio = total / n; 84 | select('#percentWhite').elt.innerText = int(100 * ratio); 85 | } 86 | if (!sampling) { 87 | capture.updatePixels(); 88 | } 89 | 90 | image(capture, 0, 0, w, h); 91 | 92 | noStroke(); 93 | fill(targetColor); 94 | rect(20, 20, 40, 40); 95 | 96 | ellipse(sumPosition.x, sumPosition.y, 8, 8); 97 | noFill(); 98 | stroke(targetColor); 99 | strokeWeight(8); 100 | drawTrail(sumPosition); 101 | } 102 | -------------------------------------------------------------------------------- /ColorDetection/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } 4 | 5 | canvas { 6 | cursor: pointer; 7 | } -------------------------------------------------------------------------------- /ContourDetection-opencvjs/cv.data: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylemcdonald/cv-examples/823dd20701a57552420dbab8a72bba71561381e3/ContourDetection-opencvjs/cv.data -------------------------------------------------------------------------------- /ContourDetection-opencvjs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Detect connected bright regions in video.
34 |Blur Radius:
35 |Threshold:
36 | 37 |OpenCV Status: Loading OpenCV...
38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /ContourDetection-opencvjs/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | var capture; 4 | var w = 640; 5 | var h = 480; 6 | 7 | function setup() { 8 | capture = createCapture({ 9 | audio: false, 10 | video: { 11 | width: w, 12 | height: h 13 | } 14 | }, function() { 15 | console.log('capture ready.') 16 | }); 17 | capture.elt.setAttribute('playsinline', ''); 18 | capture.size(w, h); 19 | createCanvas(w, h); 20 | capture.hide(); 21 | } 22 | 23 | var captureMat, gray, blurred, thresholded; 24 | var contours, hierarchy; 25 | function cvSetup() { 26 | captureMat = new cv.Mat([h, w], cv.CV_8UC4); 27 | gray = new cv.Mat([h, w], cv.CV_8UC1); 28 | blurred = new cv.Mat([h, w], cv.CV_8UC1); 29 | thresholded = new cv.Mat([h, w], cv.CV_8UC1); 30 | } 31 | 32 | var ready = false; 33 | function cvReady() { 34 | if(!cv || !cv.loaded) return false; 35 | if(ready) return true; 36 | cvSetup(); 37 | ready = true; 38 | return true; 39 | } 40 | 41 | function draw() { 42 | var showThresholded = select('#showThresholded').checked(); 43 | 44 | if (cvReady()) { 45 | capture.loadPixels(); 46 | if (capture.pixels.length > 0) { 47 | captureMat.data().set(capture.pixels); 48 | 49 | var blurRadius = select('#blurRadius').value(); 50 | blurRadius = map(blurRadius, 0, 100, 1, 10); 51 | 52 | var threshold = select('#threshold').value(); 53 | threshold = map(threshold, 0, 100, 0, 255); 54 | 55 | cv.cvtColor(captureMat, gray, cv.ColorConversionCodes.COLOR_RGBA2GRAY.value, 0); 56 | cv.blur(gray, blurred, [blurRadius, blurRadius], [-1, -1], cv.BORDER_DEFAULT); 57 | cv.threshold(blurred, thresholded, threshold, 255, cv.ThresholdTypes.THRESH_BINARY.value); 58 | 59 | if (showThresholded) { 60 | var src = thresholded.data(); 61 | var dst = capture.pixels; 62 | var n = src.length; 63 | var j = 0; 64 | for (var i = 0; i < n; i++) { 65 | dst[j++] = src[i]; 66 | dst[j++] = src[i]; 67 | dst[j++] = src[i]; 68 | dst[j++] = 255; 69 | } 70 | capture.updatePixels(); 71 | } 72 | 73 | contours = new cv.MatVector(); 74 | hierarchy = new cv.Mat(); 75 | cv.findContours(thresholded, contours, hierarchy, 3, 2, [0, 0]); 76 | } 77 | } 78 | 79 | image(capture, 0, 0, w, h); 80 | 81 | if (contours && !showThresholded) { 82 | noStroke(); 83 | for (var i = 0; i < contours.size(); i++) { 84 | fill(0, 0, 255, 128); 85 | var contour = contours.get(i); 86 | beginShape(); 87 | var k = 0; 88 | for (var j = 0; j < contour.total(); j++) { 89 | var x = contour.get_int_at(k++); 90 | var y = contour.get_int_at(k++); 91 | vertex(x, y); 92 | } 93 | endShape(CLOSE); 94 | 95 | noFill(); 96 | stroke(255, 255, 255) 97 | var box = cv.boundingRect(contour); 98 | rect(box.x, box.y, box.width, box.height); 99 | 100 | // these aren't working right now: 101 | // https://github.com/ucisysarch/opencvjs/issues/30 102 | // var minAreaRect = cv.minAreaRect(contour); 103 | // var minAreaEllipse = cv.ellipse1(contour); 104 | // var fitEllipse = cv.fitEllipse(contour); 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /ContourDetection-opencvjs/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /ContourDetection-trackingjs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Detect connected blobs of color in video. Click a color to track bounding boxes.
15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /ContourDetection-trackingjs/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | var capture; 4 | var tracker; 5 | 6 | var rhi, ghi, bhi; 7 | var rlo, glo, blo; 8 | 9 | function setTarget(r, g, b, range) { 10 | range = range || 32; 11 | rhi = r + range, rlo = r - range; 12 | ghi = g + range, glo = g - range; 13 | bhi = b + range, blo = b - range; 14 | } 15 | 16 | function setup() { 17 | var w = 640, 18 | h = 480; 19 | capture = createCapture({ 20 | audio: false, 21 | video: { 22 | width: w, 23 | height: h 24 | } 25 | }, function() { 26 | console.log('capture ready.') 27 | }); 28 | capture.elt.setAttribute('playsinline', ''); 29 | capture.size(w, h); 30 | capture.parent('container'); 31 | cnv = createCanvas(w, h); 32 | cnv.parent('container'); 33 | // capture.hide(); // tracking.js can't track the video when it's hidden 34 | 35 | setTarget(255, 255, 255); // by default track white 36 | tracking.ColorTracker.registerColor('match', function (r, g, b) { 37 | if (r <= rhi && r >= rlo && 38 | g <= ghi && g >= glo && 39 | b <= bhi && b >= blo) { 40 | return true; 41 | } 42 | return false; 43 | }); 44 | tracker = new tracking.ColorTracker(['match']); 45 | tracker.minDimension = 20; // make this smaller to track smaller objects 46 | capture.elt.id = 'p5video'; 47 | tracking.track('#p5video', tracker, { 48 | camera: true 49 | }); 50 | tracker.on('track', function (event) { 51 | cnv.clear(); 52 | strokeWeight(4); 53 | stroke(255, 0, 0); 54 | noFill(); 55 | event.data.forEach(function (r) { 56 | rect(r.x, r.y, r.width, r.height); 57 | }) 58 | }); 59 | } 60 | 61 | function draw() { 62 | if (mouseIsPressed && 63 | mouseX > 0 && mouseX < width && 64 | mouseY > 0 && mouseY < height) { 65 | capture.loadPixels(); 66 | target = capture.get(mouseX, mouseY); 67 | setTarget(target[0], target[1], target[2]); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /ContourDetection-trackingjs/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } 4 | 5 | #container { 6 | position: relative; 7 | } 8 | 9 | canvas, 10 | video { 11 | width: 100%; 12 | height: 100%; 13 | position: absolute; 14 | top: 0; 15 | left: 0; 16 | } -------------------------------------------------------------------------------- /EdgeDetection/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Find edges using a Canny edge detector.
16 |Blur Size:
17 |Low Threshold:
18 |High Threshold:
19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /EdgeDetection/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | // https://inspirit.github.io/jsfeat/sample_canny_edge.html 3 | 4 | var capture; 5 | var buffer; 6 | var result; 7 | var w = 640, 8 | h = 480; 9 | 10 | function setup() { 11 | capture = createCapture({ 12 | audio: false, 13 | video: { 14 | width: w, 15 | height: h 16 | } 17 | }, function() { 18 | console.log('capture ready.') 19 | }); 20 | capture.elt.setAttribute('playsinline', ''); 21 | createCanvas(w, h); 22 | capture.size(w, h); 23 | capture.hide(); 24 | buffer = new jsfeat.matrix_t(w, h, jsfeat.U8C1_t); 25 | } 26 | 27 | function jsfeatToP5(src, dst) { 28 | if (!dst || dst.width != src.cols || dst.height != src.rows) { 29 | dst = createImage(src.cols, src.rows); 30 | } 31 | var n = src.data.length; 32 | dst.loadPixels(); 33 | var srcData = src.data; 34 | var dstData = dst.pixels; 35 | for (var i = 0, j = 0; i < n; i++) { 36 | var cur = srcData[i]; 37 | dstData[j++] = cur; 38 | dstData[j++] = cur; 39 | dstData[j++] = cur; 40 | dstData[j++] = 255; 41 | } 42 | dst.updatePixels(); 43 | return dst; 44 | } 45 | 46 | function draw() { 47 | image(capture, 0, 0, 640, 480); 48 | capture.loadPixels(); 49 | if (capture.pixels.length > 0) { // don't forget this! 50 | var blurSize = select('#blurSize').elt.value; 51 | var lowThreshold = select('#lowThreshold').elt.value; 52 | var highThreshold = select('#highThreshold').elt.value; 53 | 54 | blurSize = map(blurSize, 0, 100, 1, 12); 55 | lowThreshold = map(lowThreshold, 0, 100, 0, 255); 56 | highThreshold = map(highThreshold, 0, 100, 0, 255); 57 | 58 | jsfeat.imgproc.grayscale(capture.pixels, w, h, buffer); 59 | jsfeat.imgproc.gaussian_blur(buffer, buffer, blurSize, 0); 60 | jsfeat.imgproc.canny(buffer, buffer, lowThreshold, highThreshold); 61 | var n = buffer.rows * buffer.cols; 62 | // uncomment the following lines to invert the image 63 | // for (var i = 0; i < n; i++) { 64 | // buffer.data[i] = 255 - buffer.data[i]; 65 | // } 66 | result = jsfeatToP5(buffer, result); 67 | image(result, 0, 0, 640, 480); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /EdgeDetection/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /Empty/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Empty starting sketch for cv-examples.
14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /Empty/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | var capture; 4 | var w = 640; 5 | var h = 480; 6 | 7 | function setup() { 8 | capture = createCapture({ 9 | audio: false, 10 | video: { 11 | width: w, 12 | height: h 13 | } 14 | }, function() { 15 | console.log('capture ready.') 16 | }); 17 | capture.elt.setAttribute('playsinline', ''); 18 | capture.size(w, h); 19 | createCanvas(w, h); 20 | capture.hide(); 21 | } 22 | 23 | function draw() { 24 | image(capture, 0, 0, w, h); 25 | capture.loadPixels(); 26 | if (capture.pixels.length > 0) { // don't forget this! 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /Empty/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /FaceDetection/eniac.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylemcdonald/cv-examples/823dd20701a57552420dbab8a72bba71561381e3/FaceDetection/eniac.jpg -------------------------------------------------------------------------------- /FaceDetection/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Detect faces in a still image.
17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /FaceDetection/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | // based on https://github.com/mtschirs/js-objectdetect 4 | // the jsfeat detector is much faster but only does one object at a time: 5 | // https://inspirit.github.io/jsfeat/sample_haar_face.html 6 | // also see: 7 | // https://github.com/mtschirs/js-objectdetect/blob/master/js/objectdetect.frontalcatface.js 8 | // https://ahprojects.com/projects/cv-dazzle/ 9 | 10 | var w = 640, 11 | h = 480; 12 | var detector; 13 | var classifier = objectdetect.frontalface; 14 | var img; 15 | var faces; 16 | 17 | function setup() { 18 | createCanvas(w, h); 19 | var scaleFactor = 1.2; 20 | detector = new objectdetect.detector(w, h, scaleFactor, classifier); 21 | img = loadImage('eniac.jpg', function (img) { 22 | faces = detector.detect(img.canvas); 23 | }) 24 | } 25 | 26 | function draw() { 27 | image(img, 0, 0, w, h); 28 | 29 | stroke(255); 30 | noFill(); 31 | if (faces) { 32 | faces.forEach(function (face) { 33 | var count = face[4]; 34 | if (count > 4) { // try different thresholds 35 | rect(face[0], face[1], face[2], face[3]); 36 | } 37 | }) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /FaceDetection/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /FaceTracking/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Track a face and its expression in video. For more see itp-convo-comp.
15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /FaceTracking/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | // https://github.com/kylemcdonald/AppropriatingNewTechnologies/wiki/Week-2 3 | 4 | var capture; 5 | var tracker 6 | var w = 640, 7 | h = 480; 8 | 9 | function setup() { 10 | capture = createCapture({ 11 | audio: false, 12 | video: { 13 | width: w, 14 | height: h 15 | } 16 | }, function() { 17 | console.log('capture ready.') 18 | }); 19 | capture.elt.setAttribute('playsinline', ''); 20 | createCanvas(w, h); 21 | capture.size(w, h); 22 | capture.hide(); 23 | 24 | colorMode(HSB); 25 | 26 | tracker = new clm.tracker(); 27 | tracker.init(); 28 | tracker.start(capture.elt); 29 | } 30 | 31 | function draw() { 32 | image(capture, 0, 0, w, h); 33 | var positions = tracker.getCurrentPosition(); 34 | 35 | noFill(); 36 | stroke(255); 37 | beginShape(); 38 | for (var i = 0; i < positions.length; i++) { 39 | vertex(positions[i][0], positions[i][1]); 40 | } 41 | endShape(); 42 | 43 | noStroke(); 44 | for (var i = 0; i < positions.length; i++) { 45 | fill(map(i, 0, positions.length, 0, 360), 50, 100); 46 | ellipse(positions[i][0], positions[i][1], 4, 4); 47 | text(i, positions[i][0], positions[i][1]); 48 | } 49 | 50 | if (positions.length > 0) { 51 | var mouthLeft = createVector(positions[44][0], positions[44][1]); 52 | var mouthRight = createVector(positions[50][0], positions[50][1]); 53 | var smile = mouthLeft.dist(mouthRight); 54 | // uncomment the line below to show an estimate of amount "smiling" 55 | // rect(20, 20, smile * 3, 20); 56 | 57 | // uncomment for a surprise 58 | // noStroke(); 59 | // fill(0, 255, 255); 60 | // ellipse(positions[62][0], positions[62][1], 50, 50); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /FaceTracking/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /FrameDifference/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Detect motion in video. Try changing the threshold.
14 |15 | Threshold: 16 |
17 |18 | Motion: 0 19 |
20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /FrameDifference/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | var capture; 4 | var previousPixels; 5 | var w = 640; 6 | var h = 480; 7 | 8 | function setup() { 9 | capture = createCapture({ 10 | audio: false, 11 | video: { 12 | width: w, 13 | height: h 14 | } 15 | }, function() { 16 | console.log('capture ready.') 17 | }); 18 | capture.elt.setAttribute('playsinline', ''); 19 | capture.size(w, h); 20 | createCanvas(w, h); 21 | capture.hide(); 22 | } 23 | 24 | function copyImage(src, dst) { 25 | var n = src.length; 26 | if (!dst || dst.length != n) dst = new src.constructor(n); 27 | while (n--) dst[n] = src[n]; 28 | return dst; 29 | } 30 | 31 | function draw() { 32 | capture.loadPixels(); 33 | var total = 0; 34 | if (capture.pixels.length > 0) { // don't forget this! 35 | if (!previousPixels) { 36 | previousPixels = copyImage(capture.pixels, previousPixels); 37 | } else { 38 | var w = capture.width, 39 | h = capture.height; 40 | var i = 0; 41 | var pixels = capture.pixels; 42 | var thresholdAmount = select('#thresholdAmount').value() * 255. / 100.; 43 | thresholdAmount *= 3; // 3 for r, g, b 44 | for (var y = 0; y < h; y++) { 45 | for (var x = 0; x < w; x++) { 46 | // calculate the differences 47 | var rdiff = Math.abs(pixels[i + 0] - previousPixels[i + 0]); 48 | var gdiff = Math.abs(pixels[i + 1] - previousPixels[i + 1]); 49 | var bdiff = Math.abs(pixels[i + 2] - previousPixels[i + 2]); 50 | // copy the current pixels to previousPixels 51 | previousPixels[i + 0] = pixels[i + 0]; 52 | previousPixels[i + 1] = pixels[i + 1]; 53 | previousPixels[i + 2] = pixels[i + 2]; 54 | var diffs = rdiff + gdiff + bdiff; 55 | var output = 0; 56 | if (diffs > thresholdAmount) { 57 | output = 255; 58 | total += diffs; 59 | } 60 | pixels[i++] = output; 61 | pixels[i++] = output; 62 | pixels[i++] = output; 63 | // also try this: 64 | // pixels[i++] = rdiff; 65 | // pixels[i++] = gdiff; 66 | // pixels[i++] = bdiff; 67 | i++; // skip alpha 68 | } 69 | } 70 | } 71 | } 72 | // need this because sometimes the frames are repeated 73 | if (total > 0) { 74 | select('#motion').elt.innerText = total; 75 | capture.updatePixels(); 76 | image(capture, 0, 0, 640, 480); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /FrameDifference/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /MarkerTracking/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Track an AR marker. Take a picture of one of these markers. Make sure it has a white border.
16 | 17 |Markers detected:
18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /MarkerTracking/markers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylemcdonald/cv-examples/823dd20701a57552420dbab8a72bba71561381e3/MarkerTracking/markers.png -------------------------------------------------------------------------------- /MarkerTracking/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | // more here: 3 | // http://fhtr.org/JSARToolKit/demos/tests/test2.html 4 | 5 | var capture; 6 | var w = 640, 7 | h = 480; 8 | 9 | var raster, param, pmat, resultMat, detector; 10 | 11 | function setup() { 12 | pixelDensity(1); // this makes the internal p5 canvas smaller 13 | capture = createCapture({ 14 | audio: false, 15 | video: { 16 | width: w, 17 | height: h 18 | } 19 | }, function() { 20 | console.log('capture ready.') 21 | }); 22 | capture.elt.setAttribute('playsinline', ''); 23 | createCanvas(w, h); 24 | capture.size(w, h); 25 | capture.hide(); 26 | 27 | raster = new NyARRgbRaster_Canvas2D(canvas); 28 | param = new FLARParam(canvas.width, canvas.height); 29 | pmat = mat4.identity(); 30 | param.copyCameraMatrix(pmat, 100, 10000); 31 | resultMat = new NyARTransMatResult(); 32 | detector = new FLARMultiIdMarkerDetector(param, 2); 33 | detector.setContinueMode(true); 34 | } 35 | 36 | function draw() { 37 | image(capture, 0, 0, w, h); 38 | canvas.changed = true; 39 | var thresholdAmount = 128; //select('#thresholdAmount').value() * 255 / 100; 40 | detected = detector.detectMarkerLite(raster, thresholdAmount); 41 | select('#markersDetected').elt.innerText = detected; 42 | for (var i = 0; i < detected; i++) { 43 | // read data from the marker 44 | // var id = detector.getIdMarkerData(i); 45 | 46 | // get the transformation for this marker 47 | detector.getTransformMatrix(i, resultMat); 48 | 49 | // convert the transformation to account for our camera 50 | var mat = resultMat; 51 | var cm = mat4.create(); 52 | cm[0] = mat.m00, cm[1] = -mat.m10, cm[2] = mat.m20, cm[3] = 0; 53 | cm[4] = mat.m01, cm[5] = -mat.m11, cm[6] = mat.m21, cm[7] = 0; 54 | cm[8] = -mat.m02, cm[9] = mat.m12, cm[10] = -mat.m22, cm[11] = 0; 55 | cm[12] = mat.m03, cm[13] = -mat.m13, cm[14] = mat.m23, cm[15] = 1; 56 | mat4.multiply(pmat, cm, cm); 57 | 58 | // define a set of 3d vertices 59 | var q = 1; 60 | var verts = [ 61 | vec4.create(-q, -q, 0, 1), 62 | vec4.create(q, -q, 0, 1), 63 | vec4.create(q, q, 0, 1), 64 | vec4.create(-q, q, 0, 1), 65 | // vec4.create(0, 0, -2*q, 1) // poke up 66 | ]; 67 | 68 | // convert that set of vertices from object space to screen space 69 | var w2 = width / 2, 70 | h2 = height / 2; 71 | verts.forEach(function (v) { 72 | mat4.multiplyVec4(cm, v); 73 | v[0] = v[0] * w2 / v[3] + w2; 74 | v[1] = -v[1] * h2 / v[3] + h2; 75 | }); 76 | 77 | noStroke(); 78 | fill(0, millis() % 255); 79 | beginShape(); 80 | verts.forEach(function (v) { 81 | vertex(v[0], v[1]); 82 | }); 83 | endShape(); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /MarkerTracking/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /MotionHistory/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Detect direction of motion. Try stepping out of the video, clicking resetBackground(), and stepping back in. Try different threshold values.
14 |Threshold:
15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /MotionHistory/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | var capture; 4 | var motionHistoryImage; 5 | var w = 640, 6 | h = 480; 7 | 8 | function setup() { 9 | capture = createCapture({ 10 | audio: false, 11 | video: { 12 | width: w, 13 | height: h 14 | } 15 | }, function() { 16 | console.log('capture ready.') 17 | }); 18 | capture.elt.setAttribute('playsinline', ''); 19 | createCanvas(w, h); 20 | capture.size(w, h); 21 | capture.hide(); 22 | } 23 | 24 | var backgroundPixels; 25 | 26 | function resetBackground() { 27 | backgroundPixels = undefined; 28 | } 29 | 30 | function copyImage(src, dst) { 31 | var n = src.length; 32 | if (!dst || dst.length != n) dst = new src.constructor(n); 33 | while (n--) dst[n] = src[n]; 34 | return dst; 35 | } 36 | 37 | function draw() { 38 | image(capture, 0, 0); 39 | capture.loadPixels(); 40 | if (capture.pixels.length > 0) { // don't forget this! 41 | if (!backgroundPixels) { 42 | // copy the camera pixels for storing the background 43 | backgroundPixels = copyImage(capture.pixels, backgroundPixels); 44 | // make a grayscale image for storing the motion history 45 | motionHistoryImage = new Uint8ClampedArray(w * h); 46 | } 47 | var pixels = capture.pixels; 48 | var thresholdAmount = select('#thresholdAmount').value() / 100; 49 | var sumSquaredThreshold = thresholdAmount * (255 * 255) * 3; 50 | var iRgb = 0, 51 | iGray = 0; 52 | for (var y = 0; y < h; y++) { 53 | for (var x = 0; x < w; x++) { 54 | var rdiff = pixels[iRgb + 0] - backgroundPixels[iRgb + 0]; 55 | var gdiff = pixels[iRgb + 1] - backgroundPixels[iRgb + 1]; 56 | var bdiff = pixels[iRgb + 2] - backgroundPixels[iRgb + 2]; 57 | var sumSquaredDiff = rdiff * rdiff + gdiff * gdiff + bdiff * bdiff; 58 | // if this is a foreground pixel 59 | if (sumSquaredDiff > sumSquaredThreshold) { 60 | // set the motion history image to white 61 | motionHistoryImage[iGray] = 255; 62 | } else { 63 | // otherwise make it fade towards black 64 | motionHistoryImage[iGray]--; 65 | } 66 | var output = motionHistoryImage[iGray]; 67 | pixels[iRgb++] = output; 68 | pixels[iRgb++] = output; 69 | pixels[iRgb++] = output; 70 | iRgb++; // skip alpha in rgbindex 71 | iGray++; // next grayscale index 72 | } 73 | } 74 | 75 | // some parameters for calculating the motion vectors 76 | var stepSize = 16; 77 | var radius = 8; 78 | var maximumDiff = 8; // ignore big "motion edges" 79 | var minimumValue = 245; // ignore very old values 80 | var arrowWidth = .25; 81 | stroke(255); 82 | noFill(); 83 | 84 | // pre-calculate some values outside the loop 85 | var upOffset = -radius * w; 86 | var downOffset = +radius * w; 87 | var leftOffset = -radius; 88 | var rightOffset = +radius; 89 | var maximumLength = Math.sqrt(maximumDiff * maximumDiff * 2); 90 | for (var y = radius; y + radius < h; y += stepSize) { 91 | for (var x = radius; x + radius < w; x += stepSize) { 92 | var i = y * w + x; 93 | var center = motionHistoryImage[i]; 94 | var dx = 0, 95 | dy = 0; 96 | if (center > minimumValue) { 97 | var up = motionHistoryImage[i + upOffset]; 98 | var down = motionHistoryImage[i + downOffset]; 99 | var left = motionHistoryImage[i + leftOffset]; 100 | var right = motionHistoryImage[i + rightOffset]; 101 | dx = right - left; 102 | dy = down - up; 103 | // ignore big "motion edges" 104 | if (dx > maximumDiff || dy > maximumDiff || 105 | -dx > maximumDiff || -dy > maximumDiff) { 106 | dx = 0, dy = 0; 107 | } else { 108 | // big changes are slow motion, small changes are fast motion 109 | var length = Math.sqrt(dx * dx + dy * dy); 110 | var rescale = (maximumLength - length) / length; 111 | dx *= rescale; 112 | dy *= rescale; 113 | } 114 | } 115 | line(x + dx, y + dy, x - arrowWidth * dy, y + arrowWidth * dx); 116 | line(x + dx, y + dy, x + arrowWidth * dy, y - arrowWidth * dx); 117 | } 118 | } 119 | } 120 | 121 | if (select('#showRaw').checked()) { 122 | capture.updatePixels(); 123 | image(capture, 0, 0, 640, 480); 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /MotionHistory/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /ObjectRecognition/clarifai.js: -------------------------------------------------------------------------------- 1 | // sign up for an account here: 2 | // https://developer.clarifai.com/accounts/signup/ 3 | // confirm your account via email. 4 | // go this url and click "create a new application": 5 | // https://developer.clarifai.com/applications/ 6 | // if you don't set a language, your browser might set 7 | // "Accept-Language" headers on your behalf. 8 | 9 | var app; 10 | 11 | function setupClarifai(apiKey) { 12 | if (!apiKey) { 13 | console.warn('setupClarifai(apiKey): ' + 14 | 'Empty arguments. First create an account at https://developer.clarifai.com/accounts/signup/ and ' + 15 | 'click "Create API Key" at https://developer.clarifai.com/applications/ then ' + 16 | 'call setupClarifai() with the API Key before tagging images.'); 17 | return; 18 | } 19 | app = new Clarifai.App({ 20 | apiKey: apiKey 21 | }); 22 | } 23 | 24 | function tagUrl(url, cb, language) { 25 | if (language) { 26 | headers['Accept-Language'] = language; 27 | } 28 | app.models.predict(Clarifai.GENERAL_MODEL, url).then( 29 | function (response) { 30 | console.log(response); 31 | cb(response.outputs[0].data.concepts); 32 | }, 33 | function (err) { 34 | console.error(err); 35 | } 36 | ); 37 | } 38 | 39 | function canvasToBase64(cnv, imageType, cb) { 40 | cnv.toBlob(function (blob) { 41 | var reader = new window.FileReader(); 42 | reader.readAsDataURL(blob); 43 | reader.onloadend = function () { 44 | var base64data = reader.result.split(',')[1]; 45 | cb(base64data); 46 | } 47 | }, imageType); 48 | } 49 | 50 | function tagCanvas(cnv, cb, language) { 51 | canvasToBase64(cnv, 'image/jpeg', function (data) { 52 | app.models.predict(Clarifai.GENERAL_MODEL, { 53 | base64: data 54 | }).then( 55 | function (response) { 56 | console.log(response); 57 | cb(response.outputs[0].data.concepts); 58 | }, 59 | function (err) { 60 | console.error(err); 61 | } 62 | ); 63 | }); 64 | } 65 | 66 | function tagMedia(media, cb, language) { 67 | // on retina devices creates a double-sized buffer 68 | var buffer = createGraphics(media.width, media.height); 69 | buffer.image(media, 0, 0); 70 | tagCanvas(buffer.elt, cb, language); 71 | } -------------------------------------------------------------------------------- /ObjectRecognition/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Send a photo to Clarifai for tagging. For this example to work, you need to call setupClarifai()
with your API key.
Dense optical flow.
15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /OpticalFlow/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | var capture; 4 | var previousPixels; 5 | var flow; 6 | var w = 640, 7 | h = 480; 8 | var step = 8; 9 | 10 | var uMotionGraph, vMotionGraph; 11 | 12 | function setup() { 13 | createCanvas(w, h); 14 | capture = createCapture({ 15 | audio: false, 16 | video: { 17 | width: w, 18 | height: h 19 | } 20 | }, function() { 21 | console.log('capture ready.') 22 | }); 23 | capture.elt.setAttribute('playsinline', ''); 24 | capture.hide(); 25 | flow = new FlowCalculator(step); 26 | uMotionGraph = new Graph(100, -step / 2, +step / 2); 27 | vMotionGraph = new Graph(100, -step / 2, +step / 2); 28 | } 29 | 30 | function copyImage(src, dst) { 31 | var n = src.length; 32 | if (!dst || dst.length != n) dst = new src.constructor(n); 33 | while (n--) dst[n] = src[n]; 34 | return dst; 35 | } 36 | 37 | function same(a1, a2, stride, n) { 38 | for (var i = 0; i < n; i += stride) { 39 | if (a1[i] != a2[i]) { 40 | return false; 41 | } 42 | } 43 | return true; 44 | } 45 | 46 | function draw() { 47 | capture.loadPixels(); 48 | if (capture.pixels.length > 0) { 49 | if (previousPixels) { 50 | 51 | // cheap way to ignore duplicate frames 52 | if (same(previousPixels, capture.pixels, 4, width)) { 53 | return; 54 | } 55 | 56 | flow.calculate(previousPixels, capture.pixels, capture.width, capture.height); 57 | } 58 | previousPixels = copyImage(capture.pixels, previousPixels); 59 | image(capture, 0, 0, w, h); 60 | 61 | if (flow.flow && flow.flow.u != 0 && flow.flow.v != 0) { 62 | uMotionGraph.addSample(flow.flow.u); 63 | vMotionGraph.addSample(flow.flow.v); 64 | 65 | strokeWeight(2); 66 | flow.flow.zones.forEach(function (zone) { 67 | stroke(map(zone.u, -step, +step, 0, 255), 68 | map(zone.v, -step, +step, 0, 255), 128); 69 | line(zone.x, zone.y, zone.x + zone.u, zone.y + zone.v); 70 | }) 71 | } 72 | 73 | noFill(); 74 | stroke(255); 75 | 76 | // draw left-right motion 77 | uMotionGraph.draw(width, height / 2); 78 | line(0, height / 4, width, height / 4); 79 | 80 | // draw up-down motion 81 | translate(0, height / 2); 82 | vMotionGraph.draw(width, height / 2); 83 | line(0, height / 4, width, height / 4); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /OpticalFlow/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /PointTracking/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Track points in video. Try clicking details to track in the video. Press any key to track a bunch of random points. This is also called Lucas–Kanade optical flow.
16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /PointTracking/sketch.js: -------------------------------------------------------------------------------- 1 | // https://inspirit.github.io/jsfeat/sample_oflow_lk.html 2 | 3 | var cnv; 4 | var capture; 5 | var curpyr, prevpyr, pointCount, pointStatus, prevxy, curxy; 6 | var w = 640, 7 | h = 480; 8 | var maxPoints = 1000; 9 | 10 | function setup() { 11 | capture = createCapture({ 12 | audio: false, 13 | video: { 14 | width: w, 15 | height: h 16 | } 17 | }, function() { 18 | console.log('capture ready.') 19 | }); 20 | capture.elt.setAttribute('playsinline', ''); 21 | cnv = createCanvas(w, h); 22 | capture.size(w, h); 23 | capture.hide(); 24 | 25 | curpyr = new jsfeat.pyramid_t(3); 26 | prevpyr = new jsfeat.pyramid_t(3); 27 | curpyr.allocate(w, h, jsfeat.U8C1_t); 28 | prevpyr.allocate(w, h, jsfeat.U8C1_t); 29 | 30 | pointCount = 0; 31 | pointStatus = new Uint8Array(maxPoints); 32 | prevxy = new Float32Array(maxPoints * 2); 33 | curxy = new Float32Array(maxPoints * 2); 34 | } 35 | 36 | function keyPressed(key) { 37 | for (var i = 0; i < 100; i++) { 38 | addPoint(random(width), random(height)); 39 | } 40 | } 41 | 42 | function mousePressed() { 43 | addPoint(mouseX, mouseY); 44 | } 45 | 46 | function addPoint(x, y) { 47 | if (pointCount < maxPoints) { 48 | var pointIndex = pointCount * 2; 49 | curxy[pointIndex] = x; 50 | curxy[pointIndex + 1] = y; 51 | pointCount++; 52 | } 53 | } 54 | 55 | function prunePoints() { 56 | var outputPoint = 0; 57 | for (var inputPoint = 0; inputPoint < pointCount; inputPoint++) { 58 | if (pointStatus[inputPoint] == 1) { 59 | if (outputPoint < inputPoint) { 60 | var inputIndex = inputPoint * 2; 61 | var outputIndex = outputPoint * 2; 62 | curxy[outputIndex] = curxy[inputIndex]; 63 | curxy[outputIndex + 1] = curxy[inputIndex + 1]; 64 | } 65 | outputPoint++; 66 | } 67 | } 68 | pointCount = outputPoint; 69 | } 70 | 71 | function draw() { 72 | image(capture, 0, 0, w, h); 73 | capture.loadPixels(); 74 | if (capture.pixels.length > 0) { // don't forget this! 75 | var xyswap = prevxy; 76 | prevxy = curxy; 77 | curxy = xyswap; 78 | var pyrswap = prevpyr; 79 | prevpyr = curpyr; 80 | curpyr = pyrswap; 81 | 82 | // these are options worth breaking out and exploring 83 | var winSize = 20; 84 | var maxIterations = 30; 85 | var epsilon = 0.01; 86 | var minEigen = 0.001; 87 | 88 | jsfeat.imgproc.grayscale(capture.pixels, w, h, curpyr.data[0]); 89 | curpyr.build(curpyr.data[0], true); 90 | jsfeat.optical_flow_lk.track( 91 | prevpyr, curpyr, 92 | prevxy, curxy, 93 | pointCount, 94 | winSize, maxIterations, 95 | pointStatus, 96 | epsilon, minEigen); 97 | prunePoints(); 98 | 99 | for (var i = 0; i < pointCount; i++) { 100 | var pointOffset = i * 2; 101 | ellipse(curxy[pointOffset], curxy[pointOffset + 1], 8, 8); 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /PointTracking/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /Thresholding/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |Simplify an image. Thresholding is one of the most basic and essential computer vision algorithms.
14 |Threshold:
15 |Percentage white pixels: 0%
16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /Thresholding/sketch.js: -------------------------------------------------------------------------------- 1 | // https://kylemcdonald.github.io/cv-examples/ 2 | 3 | var capture; 4 | var w = 640, 5 | h = 480; 6 | 7 | function setup() { 8 | capture = createCapture(VIDEO); 9 | createCanvas(w, h); 10 | capture.size(w, h); 11 | capture.hide(); 12 | } 13 | 14 | function draw() { 15 | capture.loadPixels(); 16 | if (capture.pixels.length > 0) { // don't forget this! 17 | var pixels = capture.pixels; 18 | var thresholdAmount = select('#thresholdAmount').value(); 19 | thresholdAmount /= 100.0; // this is the slider range 20 | thresholdAmount *= 255; // this is the maximum value 21 | var total = 0; 22 | var i = 0; 23 | for (var y = 0; y < h; y++) { 24 | for (var x = 0; x < w; x++) { 25 | var redValue = pixels[i]; 26 | var outputValue = 0; 27 | if (redValue >= thresholdAmount) { 28 | outputValue = 255; 29 | total++; 30 | } 31 | pixels[i++] = outputValue; // set red 32 | pixels[i++] = outputValue; // set green 33 | pixels[i++] = outputValue; // set blue 34 | i++; // skip alpha 35 | } 36 | } 37 | 38 | var n = w * h; 39 | var ratio = total / n; 40 | select('#percentWhite').elt.innerText = int(100 * ratio); 41 | } 42 | capture.updatePixels(); 43 | 44 | image(capture, 0, 0, 640, 480); 45 | } -------------------------------------------------------------------------------- /Thresholding/style.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: sans-serif; 3 | } -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 |A collection of computer vision examples for p5.js by @kcimc.
24 |Including jsfeat, 25 | clmtrackr, 26 | js-objectdetect, 27 | JSARToolkit, 28 | oflow, and tracking.js.
29 |Download from GitHub.
30 | 31 | Low level (analyzes single pixels): 32 | 33 |