├── .editorconfig
├── .gitattributes
├── .github
└── workflows
│ └── node.js.yml
├── .gitignore
├── README.md
├── badges
├── javascript.svg
└── react.svg
├── package-lock.json
├── package.json
├── public
├── index.html
└── robots.txt
├── src
├── Common
│ ├── canvas.js
│ ├── magnifyResults.js
│ ├── tensorflowImages.js
│ ├── tensorflowModel.js
│ └── tensorflowPredictions.js
├── Constants
│ ├── canvas.constant.js
│ ├── emotionRecognizer.constant.js
│ ├── faceDetection.constant.js
│ └── url.constant.js
├── components
│ ├── GithubLink.jsx
│ ├── Loading.jsx
│ ├── ManageVideoOnCanvas.jsx
│ ├── SwitchCamera.jsx
│ └── VideoOnCanvas.jsx
├── index.jsx
├── reportWebVitals.jsx
├── setupTests.jsx
└── stylesheet
│ ├── App.css
│ ├── WebcamModified.css
│ ├── index.css
│ └── loading.css
└── yarn.lock
/.editorconfig:
--------------------------------------------------------------------------------
1 | # EditorConfig is awesome: https://EditorConfig.org
2 |
3 | # top-most EditorConfig file
4 | root = true
5 |
6 | # Unix-style newlines with a newline ending every file
7 | [*]
8 | end_of_line = lf
9 | insert_final_newline = true
10 | charset = utf-8
11 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # When commiting to the remote repository all files are stored with end of file LF.
2 |
3 | * text=auto eol=crlf
4 |
--------------------------------------------------------------------------------
/.github/workflows/node.js.yml:
--------------------------------------------------------------------------------
1 | # This workflow will do a clean installation of node dependencies, cache/restore them, build the source code and run tests across different versions of node
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-nodejs
3 |
4 | name: Node.js CI
5 |
6 | on:
7 | push:
8 | branches: [ "main" ]
9 | pull_request:
10 | branches: [ "main" ]
11 | permissions:
12 | contents: write
13 |
14 | jobs:
15 | build-and-deploy:
16 | concurrency: ci-${{ github.ref }}
17 | runs-on: ubuntu-latest
18 |
19 | strategy:
20 | matrix:
21 | node-version: [18.x]
22 | # See supported Node.js release schedule at https://nodejs.org/en/about/releases/
23 |
24 | steps:
25 | - name: Checkout 🔔
26 | uses: actions/checkout@v3
27 |
28 | - name: Install and Build ⚙️
29 | # uses: actions/setup-node@v3
30 | run: |
31 | yarn install
32 | yarn build
33 |
34 | - name: Deploy 🚀
35 | uses: JamesIves/github-pages-deploy-action@v4
36 | with:
37 | folder: build
38 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 |
8 | # testing
9 | /coverage
10 |
11 | # production
12 | /build
13 |
14 | # misc
15 | .DS_Store
16 | .env.local
17 | .env.development.local
18 | .env.test.local
19 | .env.production.local
20 |
21 | npm-debug.log*
22 | yarn-debug.log*
23 | yarn-error.log*
24 |
25 | .idea
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # emotion-recognition-website-react
2 |
3 | [](https://en.reactjs.org)
4 | [](https://developer.mozilla.org/fr/docs/Web/JavaScript)
5 |
6 | facial emotion recognition AI used in a website made with React
7 |
8 | ✨✨✨✨✨✨✨✨✨✨✨✨✨✨✨✨✨✨
9 |
10 | [➡️➡️➡️Try the AI directly in your Browser !!⬅️⬅️⬅️](https://clementreiffers.github.io/emotion-recognition-website-react/)
11 |
12 | ✨✨✨✨✨✨✨✨✨✨✨✨✨✨✨✨✨✨
13 |
14 | ## Overview
15 | 1. [Inspiration](#inspiration)
16 | 2. [Launch the website](#launch-the-website)
17 | 1. [Installation](#yarn-install)
18 | 2. [Start the App](#yarn-run-start)
19 | 3. [Build the App](#yarn-run-build)
20 | 3. [Sources](#sources)
21 |
22 | ## Inspiration
23 |
24 |
25 | This project is inspired from [this project](https://github.com/clementreiffers/emotion-recognition-website).
26 | The goal is the same, make a complete website to launch an AI able to recognize emotions.
27 |
28 | The AI was trained by myself and friends in [this project](https://github.com/clementreiffers/emotion_recognition_cnn/).
29 | N.B. There already is a website inside "emotion_recognition_cnn" but made with python and works only in server side. (We
30 | use the camera from the server) The goal of this old project wasn't doing an interface but only making a demo of our AI.
31 | There real interface of our project is here.
32 |
33 | ## Launch the website
34 |
35 | you can directly go to this [link](https://clementreiffers.github.io/emotion-recognition-website-react/) and accept using
36 | the camera.
37 |
38 | If you want to launch the website yourself on your computer, there is below some steps to take this:
39 |
40 | ### `yarn install`
41 |
42 | in the first time, you have to install all requirements, so :
43 | - react
44 | - react-camera
45 | - tfjs (a portability of tensorflow of javascript, used to launch our AI)
46 |
47 | ### `yarn run start`
48 |
49 | once you have installed all the requirements, you can type this command in the terminal.
50 | With this command, the website is compiled and the terminal will tell you when you will be able to launch it.
51 | you will be local, so `localhost:3000` will work on your browser
52 |
53 | ### `yarn run build`
54 |
55 | if you want an optimized version of your website, an example of a build is available in the gh-pages branch or this
56 | repository. It is not necessary if you stay in local.
57 |
58 |
59 | ## Sources
60 |
61 | - [React](https://en.reactjs.org)
62 | - [Badges of the readme](https://github.com/aleen42/badges)
63 | - [tfjs](https://www.npmjs.com/package/@tensorflow/tfjs)
64 | - [the base I used to recognize faces on camera](https://github.com/lauirvin/react-use-face-detection)
65 | - [load a layer model into react](https://towardsdatascience.com/loading-models-into-tensorflow-js-using-react-js-f3e118ee4a59)
66 |
--------------------------------------------------------------------------------
/badges/javascript.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/badges/react.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "emotion-recognition-website-react",
3 | "version": "0.1.0",
4 | "private": true,
5 | "homepage": "https://clementreiffers.github.io/emotion-recognition-website-react",
6 | "dependencies": {
7 | "@emotion/react": "^11.10.4",
8 | "@emotion/styled": "^11.10.4",
9 | "@mediapipe/camera_utils": "^0.3.1640029074",
10 | "@mediapipe/face_detection": "^0.4.1646425229",
11 | "@mui/icons-material": "^5.10.3",
12 | "@mui/material": "^5.10.3",
13 | "@mui/styled-engine": "^5.10.3",
14 | "@tensorflow/tfjs": "^3.18.0",
15 | "@tensorflow/tfjs-converter": "1.7.4",
16 | "@tensorflow/tfjs-core": "1.7.4",
17 | "@testing-library/jest-dom": "^5.16.4",
18 | "@testing-library/react": "^13.3.0",
19 | "@testing-library/user-event": "^13.5.0",
20 | "ramda": "^0.28.0",
21 | "react": "^18.2.0",
22 | "react-dom": "^18.2.0",
23 | "react-scripts": "5.0.1",
24 | "react-use-face-detection": "^1.0.1",
25 | "react-webcam": "^7.0.1",
26 | "web-vitals": "^2.1.4"
27 | },
28 | "scripts": {
29 | "start": "react-scripts start",
30 | "build": "react-scripts build",
31 | "test": "react-scripts test",
32 | "eject": "react-scripts eject",
33 | "predeploy": "yarn run build",
34 | "deploy": "gh-pages -d build"
35 | },
36 | "eslintConfig": {
37 | "extends": [
38 | "react-app",
39 | "react-app/jest"
40 | ]
41 | },
42 | "browserslist": {
43 | "production": [
44 | ">0.2%",
45 | "not dead",
46 | "not op_mini all"
47 | ],
48 | "development": [
49 | "last 1 chrome version",
50 | "last 1 firefox version",
51 | "last 1 safari version"
52 | ]
53 | },
54 | "devDependencies": {
55 | "gh-pages": "^4.0.0",
56 | "prettier": "^2.7.1"
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
11 | React ManageVideoOnCanvas
12 |
13 |
14 |
15 |
16 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/public/robots.txt:
--------------------------------------------------------------------------------
1 | # https://www.robotstxt.org/robotstxt.html
2 | User-agent: *
3 | Disallow:
4 |
--------------------------------------------------------------------------------
/src/Common/canvas.js:
--------------------------------------------------------------------------------
1 | import { predict } from "./tensorflowPredictions";
2 | import {
3 | EMOTION_PANEL_BG_COLOR,
4 | EMOTION_PANEL_COLOR,
5 | SIZE_EMOTION_PANEL,
6 | } from "../Constants/canvas.constant";
7 |
8 | const _setRectStyle = (context) => {
9 | context.lineWidth = "0.8";
10 | context.strokeStyle = "red";
11 | };
12 |
13 | const _drawRect = (context, boundingBox) => {
14 | // rectangle draw all around the face
15 | context.beginPath();
16 | _setRectStyle(context);
17 | const { x, y, width } = _getRectDim(boundingBox, context);
18 | const height = boundingBox.height * context.canvas.height;
19 | context.rect(x, y, width, height);
20 | context.stroke();
21 | };
22 |
23 | const _getFace = (context, boundingBox) => {
24 | const { x, y, width } = _getRectDim(boundingBox, context);
25 | const height = boundingBox.height * context.canvas.height;
26 | return context.getImageData(x, y, width, height);
27 | };
28 | const _setFillStyle = (context, color) => (context.fillStyle = color);
29 |
30 | const _getRectDim = (boundingBox, context) => {
31 | const x = boundingBox.xCenter * context.canvas.width;
32 | const y = boundingBox.yCenter * context.canvas.height - SIZE_EMOTION_PANEL;
33 | const width = boundingBox.width * context.canvas.width;
34 | return { x, y, width };
35 | };
36 |
37 | const _drawPanel = (context, boundingBox) => {
38 | const { x, y, width } = _getRectDim(boundingBox, context);
39 | context.fillRect(x, y, width, SIZE_EMOTION_PANEL);
40 | };
41 |
42 | const _setFont = (context) => (context.font = SIZE_EMOTION_PANEL + "px serif");
43 |
44 | const _drawText = (context, text, boundingBox) => {
45 | const { x, y, width } = _getRectDim(boundingBox, context);
46 | context.stroke();
47 | context.fillText(text, x, y + SIZE_EMOTION_PANEL, width);
48 | };
49 |
50 | const _drawEmotionPanel = (context, boundingBox, prediction) => {
51 | _setFillStyle(context, EMOTION_PANEL_BG_COLOR);
52 | _drawPanel(context, boundingBox);
53 | _setFont(context);
54 | _setFillStyle(context, EMOTION_PANEL_COLOR);
55 | _drawText(context, prediction, boundingBox);
56 | };
57 |
58 | const _isBoundingBoxPositive = (boundingBox) =>
59 | boundingBox.xCenter > 0 &&
60 | boundingBox.yCenter > 0 &&
61 | boundingBox.width > 0 &&
62 | boundingBox.height > 0;
63 |
64 | const _clearCanvas = (context) =>
65 | context.clearRect(0, 0, context.canvas.width, context.canvas.height);
66 |
67 | const _drawImage = (video, context) =>
68 | context.drawImage(video, 0, 0, context.canvas.width, context.canvas.height);
69 |
70 | const _drawPrediction = (context, bb, emotionRecognizer, state) =>
71 | _drawEmotionPanel(
72 | context,
73 | bb,
74 | predict(emotionRecognizer, state, _getFace(context, bb))
75 | );
76 |
77 | const drawOnCanvas = (
78 | state,
79 | context,
80 | video,
81 | boundingBox,
82 | emotionRecognizer
83 | ) => {
84 | _clearCanvas(context);
85 | _drawImage(video, context);
86 | for (let bb of boundingBox) {
87 | // recuperation of all values into boundingBox (coordinate of face)
88 | _drawRect(context, bb);
89 | // recuperation of face only if boundingBox has valuable coordinates
90 | if (_isBoundingBoxPositive(bb) && state.isModelSet) {
91 | _drawPrediction(context, bb, emotionRecognizer, state);
92 | }
93 | }
94 | };
95 |
96 | export default drawOnCanvas;
97 |
--------------------------------------------------------------------------------
/src/Common/magnifyResults.js:
--------------------------------------------------------------------------------
1 | import * as R from "ramda";
2 |
3 | const getPercentage = R.pipe(R.multiply(100), parseInt);
4 |
5 | const getScoreInPercentage = R.map(getPercentage);
6 |
7 | const getEmotionNearToItsScore = (listOfEmotions) => (pred) =>
8 | R.transpose([listOfEmotions, pred]);
9 |
10 | const getListOfEmotionsSorted = R.sortBy(R.prop(1));
11 |
12 | const magnifyResults = (listOfEmotions) =>
13 | R.pipe(
14 | getScoreInPercentage,
15 | getEmotionNearToItsScore(listOfEmotions),
16 | getListOfEmotionsSorted,
17 | R.reverse,
18 | R.nth(0),
19 | R.append(" %"),
20 | R.join("")
21 | );
22 |
23 | export default magnifyResults;
24 |
--------------------------------------------------------------------------------
/src/Common/tensorflowImages.js:
--------------------------------------------------------------------------------
1 | import * as tf from "@tensorflow/tfjs";
2 | import {
3 | PRED_RESIZE_SHAPE,
4 | RESIZE_SHAPE,
5 | } from "../Constants/emotionRecognizer.constant";
6 |
7 | const _resizeImg = (img) =>
8 | tf.image.resizeBilinear(img, RESIZE_SHAPE).reshape(PRED_RESIZE_SHAPE);
9 |
10 | const _convertImgToTensor = (img) =>
11 | tf.browser.fromPixels(img, 3).expandDims(0);
12 |
13 | const treatImg = (img) => _resizeImg(_convertImgToTensor(img));
14 |
15 | export { treatImg };
16 |
--------------------------------------------------------------------------------
/src/Common/tensorflowModel.js:
--------------------------------------------------------------------------------
1 | import * as tf from "@tensorflow/tfjs";
2 |
3 | const loadModel = async (link: string, setState: Function, state) => {
4 | console.log("loading model...");
5 | try {
6 | setState({
7 | ...state,
8 | model: await tf.loadLayersModel(link),
9 | isModelSet: true,
10 | });
11 | console.log("load model success");
12 | } catch (err) {
13 | console.log("couldn't load model : ", err);
14 | }
15 | };
16 |
17 | export { loadModel };
18 |
--------------------------------------------------------------------------------
/src/Common/tensorflowPredictions.js:
--------------------------------------------------------------------------------
1 | import { EMOTIONS, NO_MODEL } from "../Constants/emotionRecognizer.constant";
2 | import * as tf from "@tensorflow/tfjs";
3 | import magnifyResults from "./magnifyResults";
4 | import { treatImg } from "./tensorflowImages";
5 |
6 | const _predictTensor = (state, model, tfResizedImage) => {
7 | if (state.isModelSet) {
8 | let predict = Array.from(model.predict(tfResizedImage).dataSync());
9 | tfResizedImage.dispose();
10 | return magnifyResults(EMOTIONS)(predict);
11 | } else {
12 | return NO_MODEL;
13 | }
14 | };
15 | const _predictImg = (emotionRecognizer, state, face) =>
16 | _predictTensor(state, emotionRecognizer, treatImg(face));
17 |
18 | const predict = (emotionRecognizer, state, face) => {
19 | let prediction = "";
20 | tf.engine().startScope();
21 | tf.tidy(() => {
22 | prediction = _predictImg(emotionRecognizer, state, face);
23 | });
24 | // Check tensor memory leak stop
25 | tf.engine().endScope();
26 | return prediction;
27 | };
28 |
29 | export { predict };
30 |
--------------------------------------------------------------------------------
/src/Constants/canvas.constant.js:
--------------------------------------------------------------------------------
1 | const SIZE_EMOTION_PANEL: number = 50;
2 |
3 | const EMOTION_PANEL_COLOR: string = "#000000";
4 |
5 | const EMOTION_PANEL_BG_COLOR: string = "#FFFFFF";
6 |
7 | export { SIZE_EMOTION_PANEL, EMOTION_PANEL_COLOR, EMOTION_PANEL_BG_COLOR };
8 |
--------------------------------------------------------------------------------
/src/Constants/emotionRecognizer.constant.js:
--------------------------------------------------------------------------------
1 | const EMOTIONS = [
2 | "😡 angry : ",
3 | "🤮 disgust : ",
4 | "😨 fear : ",
5 | "😄 happy : ",
6 | "😐 neutral : ",
7 | "😭 sad : ",
8 | "😯 surprise : ",
9 | ];
10 |
11 | const NO_MODEL: string = "❌ model not loaded yet";
12 |
13 | const PRED_RESIZE_SHAPE: number[] = [1, 80, 80, 3];
14 |
15 | const RESIZE_SHAPE: number[] = [80, 80];
16 |
17 | export { EMOTIONS, NO_MODEL, PRED_RESIZE_SHAPE, RESIZE_SHAPE };
18 |
--------------------------------------------------------------------------------
/src/Constants/faceDetection.constant.js:
--------------------------------------------------------------------------------
1 | import FaceDetection from "@mediapipe/face_detection";
2 | import { Camera } from "@mediapipe/camera_utils";
3 | import { URL_JS_DELIVR } from "./url.constant";
4 |
5 | const locateFaceDetectionFile = (file: string): string =>
6 | `${URL_JS_DELIVR}${file}`;
7 |
8 | const FACE_DETECTION_PROPS = {
9 | faceDetectionOptions: {
10 | model: "short",
11 | },
12 | faceDetection: new FaceDetection.FaceDetection({
13 | locateFile: (file) => locateFaceDetectionFile(file),
14 | }),
15 | camera: ({ mediaSrc, onFrame, width, height }) =>
16 | new Camera(mediaSrc, {
17 | onFrame,
18 | width,
19 | height,
20 | }),
21 | };
22 |
23 | export { FACE_DETECTION_PROPS };
24 |
--------------------------------------------------------------------------------
/src/Constants/url.constant.js:
--------------------------------------------------------------------------------
1 | const URL_EMOTION_RECOGNITION_MODEL: string =
2 | "https://raw.githubusercontent.com/clementreiffers/emotion-recognition-website/main/resnet50js_ferplus/model.json";
3 |
4 | const URL_JS_DELIVR: string =
5 | "https://cdn.jsdelivr.net/npm/@mediapipe/face_detection/";
6 |
7 | const URL_GITHUB_CODE_SOURCE: string =
8 | "https://github.com/clementreiffers/emotion-recognition-website-react";
9 |
10 | export { URL_EMOTION_RECOGNITION_MODEL, URL_JS_DELIVR, URL_GITHUB_CODE_SOURCE };
11 |
--------------------------------------------------------------------------------
/src/components/GithubLink.jsx:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import { URL_GITHUB_CODE_SOURCE } from "../Constants/url.constant";
3 | import GitHubIcon from "@mui/icons-material/GitHub";
4 | export const GithubLink = () => (
5 | <>
6 | Emotion Recognition
7 |
8 |
9 |
10 | >
11 | );
12 |
--------------------------------------------------------------------------------
/src/components/Loading.jsx:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import SyncIcon from "@mui/icons-material/Sync";
3 | import "../stylesheet/loading.css";
4 |
5 | export const Loading = () => (
6 |
7 | loading, please wait ...
8 |
9 | );
10 |
--------------------------------------------------------------------------------
/src/components/ManageVideoOnCanvas.jsx:
--------------------------------------------------------------------------------
1 | import { useFaceDetection } from "react-use-face-detection";
2 | import React, { useEffect, useRef, useState } from "react";
3 | import * as tf from "@tensorflow/tfjs";
4 | import "../stylesheet/WebcamModified.css";
5 | import { URL_EMOTION_RECOGNITION_MODEL } from "../Constants/url.constant";
6 | import drawOnCanvas from "../Common/canvas";
7 | import VideoOnCanvas from "./VideoOnCanvas";
8 | import SwitchCamera from "./SwitchCamera";
9 | import { FACE_DETECTION_PROPS } from "../Constants/faceDetection.constant";
10 | import { loadModel } from "../Common/tensorflowModel";
11 |
12 | type stateType = { model: any, facingMode: string, isModelSet: boolean };
13 |
14 | const _init_state = {
15 | model: null,
16 | isModelSet: false,
17 | };
18 |
19 | const ManageVideoOnCanvas = () => {
20 | const { webcamRef, boundingBox } = useFaceDetection(FACE_DETECTION_PROPS);
21 | let canvasRef = useRef(null);
22 |
23 | const [state, setState]: [stateType, Function] = useState(_init_state);
24 | const [constraints, setConstraints] = useState({
25 | facingMode: "user",
26 | });
27 |
28 | useEffect(() => {
29 | const context = canvasRef.current.getContext("2d");
30 | let animationFrameId;
31 | const render = () => {
32 | drawOnCanvas(
33 | state,
34 | context,
35 | webcamRef.current.video,
36 | boundingBox,
37 | state.model
38 | );
39 | animationFrameId = window.requestAnimationFrame(render);
40 | };
41 | render();
42 | return window.cancelAnimationFrame(animationFrameId);
43 | }, [canvasRef, webcamRef, boundingBox, state]);
44 |
45 | useEffect(() => {
46 | if (!state.isModelSet) {
47 | // MODEL EMOTION RECOGNITION
48 | tf.ready().then(() =>
49 | loadModel(URL_EMOTION_RECOGNITION_MODEL, setState, state)
50 | );
51 | }
52 | }, [state, setState]);
53 |
54 | return (
55 |
56 |
60 |
65 |
66 | );
67 | };
68 |
69 | export default ManageVideoOnCanvas;
70 |
--------------------------------------------------------------------------------
/src/components/SwitchCamera.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState } from "react";
2 | import { Loading } from "./Loading";
3 | import { GithubLink } from "./GithubLink";
4 |
5 | type switchCameraProps = {
6 | isModelLoaded: boolean,
7 | setConstraints: Function,
8 | };
9 |
10 | const SwitchCamera = (props: switchCameraProps) => {
11 | const [devices, setDevices] = useState([]);
12 |
13 | const handleDevices = React.useCallback(
14 | (mediaDevices) =>
15 | setDevices(mediaDevices.filter(({ kind }) => kind === "videoinput")),
16 | [setDevices]
17 | );
18 |
19 | React.useEffect(() => {
20 | navigator.mediaDevices.enumerateDevices().then(handleDevices);
21 | }, [handleDevices]);
22 |
23 | return (
24 |
25 | {!props.isModelLoaded ?
:
}
26 |
27 |
37 |
38 |
39 | );
40 | };
41 |
42 | export default SwitchCamera;
43 |
--------------------------------------------------------------------------------
/src/components/VideoOnCanvas.jsx:
--------------------------------------------------------------------------------
1 | import Webcam from "react-webcam";
2 | import React from "react";
3 |
4 | type videoOnCanvasProps = {
5 | canvasRef: any,
6 | webcamRef: any,
7 | constraints: any,
8 | };
9 |
10 | const VideoOnCanvas = (props: videoOnCanvasProps) => (
11 |
12 |
19 |
28 |
29 | );
30 | export default VideoOnCanvas;
31 |
--------------------------------------------------------------------------------
/src/index.jsx:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import ReactDOM from "react-dom/client";
3 | import "./stylesheet/index.css";
4 | import reportWebVitals from "./reportWebVitals";
5 | import ManageVideoOnCanvas from "./components/ManageVideoOnCanvas";
6 |
7 | const root = ReactDOM.createRoot(document.getElementById("root"));
8 |
9 | root.render();
10 |
11 | // If you want to start measuring performance in your app, pass a function
12 | // to log results (for example: reportWebVitals(console.log))
13 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
14 | reportWebVitals(console.log);
15 |
--------------------------------------------------------------------------------
/src/reportWebVitals.jsx:
--------------------------------------------------------------------------------
1 | const reportWebVitals = onPerfEntry => {
2 | if (onPerfEntry && onPerfEntry instanceof Function) {
3 | import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => {
4 | getCLS(onPerfEntry);
5 | getFID(onPerfEntry);
6 | getFCP(onPerfEntry);
7 | getLCP(onPerfEntry);
8 | getTTFB(onPerfEntry);
9 | });
10 | }
11 | };
12 |
13 | export default reportWebVitals;
14 |
--------------------------------------------------------------------------------
/src/setupTests.jsx:
--------------------------------------------------------------------------------
1 | // jest-dom adds custom jest matchers for asserting on DOM nodes.
2 | // allows you to do things like:
3 | // expect(element).toHaveTextContent(/react/i)
4 | // learn more: https://github.com/testing-library/jest-dom
5 | import '@testing-library/jest-dom';
6 |
--------------------------------------------------------------------------------
/src/stylesheet/App.css:
--------------------------------------------------------------------------------
1 | video {
2 | display: none;
3 | }
--------------------------------------------------------------------------------
/src/stylesheet/WebcamModified.css:
--------------------------------------------------------------------------------
1 | body {
2 | margin: 0;
3 | }
4 |
5 | .canvas {
6 | position: fixed;
7 | height: 100%;
8 | width: 100%;
9 | }
10 |
11 | .info {
12 | text-align: center;
13 | font-size: 200%;
14 | position: fixed;
15 | z-index: 2;
16 | color: white;
17 | background-color: black;
18 | border-bottom-right-radius: 20px;
19 | padding: 2%;
20 | opacity: 0.7;
21 | }
22 |
23 | .info button{
24 | border-radius: 40px;
25 | font-size: 110%;
26 | }
27 |
28 | .cameraSelector{
29 | width:100%;
30 | font-size: 80%;
31 | }
32 |
33 | @media only screen and (max-device-width: 640px) {
34 | }
35 |
36 | @media only screen and (max-device-width: 768px) {
37 | .info{
38 | font-size: 120%;
39 | }
40 |
41 | }
42 |
--------------------------------------------------------------------------------
/src/stylesheet/index.css:
--------------------------------------------------------------------------------
1 | body {
2 | margin: 0;
3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
5 | sans-serif;
6 | -webkit-font-smoothing: antialiased;
7 | -moz-osx-font-smoothing: grayscale;
8 | }
9 |
10 | code {
11 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
12 | monospace;
13 | }
14 |
--------------------------------------------------------------------------------
/src/stylesheet/loading.css:
--------------------------------------------------------------------------------
1 | @keyframes rotation {
2 | from {
3 | transform: rotate(359deg);
4 | }
5 | to {
6 | transform: rotate(0deg);
7 | }
8 | }
9 |
10 | .loading-icon {
11 | animation: rotation 2s infinite linear;
12 | vertical-align: middle;
13 | }
14 |
--------------------------------------------------------------------------------