├── logo.ico
├── logofm.png
├── 192x192px.png
├── 512x512px.png
├── 796x626.png
├── 799x595.png
├── 802x600.png
├── layarui.png
├── tests
├── test_facemind.py
└── __init__.py
├── requirements.txt
├── mental_health_app
├── test.py
├── requirements.txt
├── __init__.py
└── main.py
├── .gitignore
├── .github
├── dependabot.yml
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── feature_request.md
│ └── bug_report.md
├── workflows
│ ├── dc.yml
│ ├── python-package.yml
│ ├── update-nonce.yml
│ ├── deploy.yml
│ ├── jekyll-gh-pages.yml
│ ├── pb.yml
│ └── requirements.txt
└── PULL_REQUEST_TEMPLATE.md
├── package.json
├── Dockerfile
├── download.html
├── SECURITY.md
├── pyproject.toml
├── setup.py
├── README.md
├── CONTRIBUTING.md
├── dfs.js
├── manifest.json
├── sty.css
├── inst.js
├── CODE_OF_CONDUCT.md
├── st.js
├── jws.js
├── index.html
├── LICENSE
├── sc.js
├── app.py
└── facemind
└── index.html
/logo.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/galihru/facemind/HEAD/logo.ico
--------------------------------------------------------------------------------
/logofm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/galihru/facemind/HEAD/logofm.png
--------------------------------------------------------------------------------
/192x192px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/galihru/facemind/HEAD/192x192px.png
--------------------------------------------------------------------------------
/512x512px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/galihru/facemind/HEAD/512x512px.png
--------------------------------------------------------------------------------
/796x626.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/galihru/facemind/HEAD/796x626.png
--------------------------------------------------------------------------------
/799x595.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/galihru/facemind/HEAD/799x595.png
--------------------------------------------------------------------------------
/802x600.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/galihru/facemind/HEAD/802x600.png
--------------------------------------------------------------------------------
/layarui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/galihru/facemind/HEAD/layarui.png
--------------------------------------------------------------------------------
/tests/test_facemind.py:
--------------------------------------------------------------------------------
1 | def test_example_function():
2 | assert True
3 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | opencv-python
2 | numpy
3 | pandas
4 | matplotlib
5 | mediapipe
6 | selenium
7 | PyQt5
8 | pytest
9 |
--------------------------------------------------------------------------------
/mental_health_app/test.py:
--------------------------------------------------------------------------------
1 | #test module
2 | def test_import():
3 | import mental_health_app.main
4 | assert True
5 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | #module
2 | from .main import save_to_database, show_graph, SplashScreen, LoginWindow, MentalHealthPredictor
3 |
--------------------------------------------------------------------------------
/mental_health_app/requirements.txt:
--------------------------------------------------------------------------------
1 | opencv-python
2 | pandas
3 | matplotlib
4 | numpy
5 | mediapipe
6 | selenium
7 | PyQt5
8 | pytest-qt
9 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.pyc
3 | *.pyo
4 | *.pyd
5 | *.db
6 | *.sqlite3
7 | *.log
8 | *.env
9 | *.venv
10 | venv/
11 | env/
12 | dist/
13 | build/
14 | *.egg-info/
15 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "pip"
4 | directory: "/"
5 | schedule:
6 | interval: "weekly"
7 | open-pull-requests-limit: 5
8 |
--------------------------------------------------------------------------------
/mental_health_app/__init__.py:
--------------------------------------------------------------------------------
1 | from .main import save_to_database, show_graph, SplashScreen, LoginWindow, MentalHealthPredictor
2 |
3 | __all__ = ["save_to_database", "show_graph", "SplashScreen", "LoginWindow", "MentalHealthPredictor"]
4 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Facemind",
3 | "version": "1.0.0",
4 | "description": "Facemind Mental Health",
5 | "main": "sc.js",
6 | "scripts": {
7 | "start": "node sc.js"
8 | },
9 | "dependencies": {
10 | "crypto": "^1.0.1",
11 | "html-minifier": "^4.0.0",
12 | "terser": "^5.37.0",
13 | "clean-css": "^5.3.0"
14 | },
15 | "author": "Galih Ridho Utomo",
16 | "license": "MIT",
17 | "type": "module"
18 | }
19 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Dockerfile
2 | FROM python:3.9-slim
3 |
4 | # Set working directory di dalam container
5 | WORKDIR /app
6 |
7 | # Copy file requirements.txt ke working directory
8 | COPY requirements.txt .
9 |
10 | # Install dependensi Python
11 | RUN pip install --no-cache-dir -r requirements.txt
12 |
13 | # Copy seluruh proyek ke working directory
14 | COPY . .
15 |
16 | # Expose port yang akan digunakan (jika diperlukan)
17 | EXPOSE 8080
18 |
19 | # Command untuk menjalankan aplikasi
20 | CMD ["python", "mental_health_app/main.py"]
21 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: [4211421036]
4 | patreon: GalihRidhoUtomo
5 | open_collective:
6 | ko_fi: # Replace with a single Ko-fi username
7 | tidelift: #facemind
8 | community_bridge: #facemind
9 | liberapay: galihridhoutomo
10 | issuehunt: # Replace with a single IssueHunt username
11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
12 | polar: # Replace with a single Polar username
13 | buy_me_a_coffee: # Replace with a single Buy Me a Coffee username
14 | thanks_dev: # Replace with a single thanks.dev username
15 | custom:
16 | - 'https://www.paypal.me/GALIHRIDHOUTOMO'
17 |
--------------------------------------------------------------------------------
/download.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
13 |
14 |
15 | You will be redirected to Downloaded Face Mind!
16 |
17 |
18 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature requests
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Supported Version
4 |
5 | Use this section to tell people about which versions of your project are
6 | currently being supported with security updates.
7 |
8 | | Version | Supported |
9 | | ------- | ------------------ |
10 | | 5.1.x | :white_check_mark: |
11 | | 5.0.x | :x: |
12 | | 4.0.x | :white_check_mark: |
13 | | < 4.0 | :x: |
14 |
15 | ## Reporting a Vulnerability
16 |
17 | Use this section to tell people how to report a vulnerability.
18 |
19 | Tell them where to go, how often they can expect to get an update on a
20 | reported vulnerability, what to expect if the vulnerability is accepted or
21 | declined, etc.
22 |
--------------------------------------------------------------------------------
/.github/workflows/dc.yml:
--------------------------------------------------------------------------------
1 | name: Publish Docker Image
2 |
3 | on:
4 | push:
5 | branches:
6 | - main # Jalankan workflow saat ada push ke branch main
7 |
8 | jobs:
9 | build-and-publish:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - name: Checkout code
14 | uses: actions/checkout@v3
15 |
16 | - name: Log in to GitHub Container Registry
17 | uses: docker/login-action@v2
18 | with:
19 | registry: ghcr.io
20 | username: ${{ github.actor }}
21 | password: ${{ secrets.GITHUB_TOKEN }}
22 |
23 | - name: Build Docker image
24 | run: |
25 | docker build -t ghcr.io/${{ github.repository_owner }}/mental_health_app:latest .
26 |
27 | - name: Push Docker image to GitHub Container Registry
28 | run: |
29 | docker push ghcr.io/${{ github.repository_owner }}/mental_health_app:latest
30 |
--------------------------------------------------------------------------------
/.github/workflows/python-package.yml:
--------------------------------------------------------------------------------
1 | name: Python CI
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 |
11 | jobs:
12 | test:
13 | runs-on: ubuntu-latest # You can also use windows-latest or macos-latest
14 |
15 | steps:
16 | - name: Checkout code
17 | uses: actions/checkout@v2
18 |
19 | - name: Set up Python
20 | uses: actions/setup-python@v2
21 | with:
22 | python-version: '3.8' # Set your desired Python version
23 |
24 | - name: Install dependencies
25 | run: |
26 | python -m pip install --upgrade pip
27 | pip freeze > requirements.txt
28 | pip install pytest
29 | pip install -r mental_health_app/requirements.txt # If you have a requirements file
30 |
31 |
32 | - name: Run tests
33 | run: |
34 | pytest mental_health_app/test.py --maxfail=1 --disable-warnings -q
35 |
36 |
37 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: Title [Bug]
5 | labels: bug
6 | assignees: '4211421036'
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is:
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | **Smartphone (please complete the following information):**
32 | - Device: [e.g. iPhone6]
33 | - OS: [e.g. iOS8.1]
34 | - Browser [e.g. stock browser, safari]
35 | - Version [e.g. 22]
36 |
37 | **Additional context**
38 | Add any other context about the problem here.
39 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=42", "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "mental_health_app"
7 | version = "0.6"
8 | authors = [
9 | {name = "GALIH RIDHO UTOMO", email = "g4lihru@students.unnes.ac.id"},
10 | {name = "Ana Maulida", email = "anamaulida@students.unnes.ac.id"},
11 | ]
12 | description = "A mental health prediction app using facial analysis."
13 | readme = "README.md"
14 | requires-python = ">=3.6"
15 | classifiers = [
16 | "Programming Language :: Python :: 3",
17 | "License :: OSI Approved :: MIT License",
18 | "Operating System :: OS Independent",
19 | ]
20 | dependencies = [
21 | "opencv-python",
22 | "numpy",
23 | "pandas",
24 | "matplotlib",
25 | "mediapipe",
26 | "selenium",
27 | "PyQt5",
28 | ]
29 |
30 | [project.urls]
31 | Documentation = "https://github.com/galihru/facemind/README.md"
32 | Source = "https://github.com/galihru/facemind"
33 | Homepage = "https://galihru.github.io/facemind/"
34 |
35 | [project.scripts]
36 | mental_health_app = "mental_health_app.main:main"
37 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # setup.py
2 | from setuptools import setup, find_packages
3 |
4 | setup(
5 | name='mental_health_app',
6 | version='0.4',
7 | packages=find_packages(),
8 | install_requires=[
9 | 'opencv-python',
10 | 'numpy',
11 | 'pandas',
12 | 'matplotlib',
13 | 'mediapipe',
14 | 'selenium',
15 | 'PyQt5',
16 | ],
17 | entry_points={
18 | 'console_scripts': [
19 | 'mental_health_app=mental_health_app.main:main',
20 | ],
21 | },
22 | author='GALIH RIDHO UTOMO. Ana Maulida',
23 | author_email='g4lihru@students.unnes.ac.id, anamaulida@students.unnes.ac.id',
24 | description='A mental health prediction app using facial analysis.',
25 | long_description=open('README.md').read(),
26 | long_description_content_type='text/markdown',
27 | url='https://github.com/galihru/facemind',
28 | classifiers=[
29 | 'Programming Language :: Python :: 3',
30 | 'License :: OSI Approved :: MIT License',
31 | 'Operating System :: OS Independent',
32 | ],
33 | python_requires='>=3.6',
34 | )
35 |
--------------------------------------------------------------------------------
/.github/workflows/update-nonce.yml:
--------------------------------------------------------------------------------
1 | name: Update HTML with Dynamic Nonce
2 |
3 | on:
4 | schedule:
5 | - cron: '*/5 * * * *' # Runs every 5 minutes, adjust as needed.
6 | push:
7 | branches:
8 | - main
9 |
10 | jobs:
11 | update-html:
12 | runs-on: ubuntu-latest
13 |
14 | steps:
15 | - name: Checkout repository
16 | uses: actions/checkout@v3
17 |
18 | - name: Setup Node.js
19 | uses: actions/setup-node@v3
20 | with:
21 | node-version: '16'
22 |
23 | - name: Clean previous installations
24 | run: rm -rf node_modules package-lock.json
25 |
26 | - name: Install dependencies
27 | run: npm install
28 |
29 | - name: Run script to update HTML
30 | run: node sc.js
31 |
32 | - name: Commit and push changes
33 | run: |
34 | git config --global user.name "GitHub Actions"
35 | git config --global user.email "actions@github.com"
36 |
37 | # Stage all modified files
38 | git add .
39 |
40 | # Commit changes
41 | git commit -m "Update index.html with dynamic nonce" || echo "No changes to commit"
42 |
43 | # Pull remote changes to avoid conflicts
44 | git pull --rebase origin main
45 |
46 | # Push changes to the remote repository
47 | git push
48 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Mental Health App
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 | This application uses computer vision and machine learning to analyze mental health based on facial expressions. The app includes login system, and real-time mental health analysis through facial landmarks, using OpenCV, Mediapipe, and PyQt5.
15 |
16 | ## Requirements
17 |
18 | Before running the app, make sure you have the following Python packages installed:
19 |
20 | - `opencv-python`
21 | - `numpy`
22 | - `mediapipe`
23 | - `selenium`
24 | - `PyQt5`
25 | - `pandas`
26 |
27 | You can install missing dependencies by running the following command:
28 |
29 | ```bash
30 | docker pull ghcr.io/galihru/mental_health_app:latest
31 | ```
32 |
33 | ## Author by
34 | 1. Ana Maulida
35 | 2. GALIH RIDHO UTOMO
36 |
37 | Delegation for competition paper
38 |
--------------------------------------------------------------------------------
/.github/workflows/deploy.yml:
--------------------------------------------------------------------------------
1 | name: Auto Deployment
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | steps:
12 | # Checkout the repository
13 | - name: Checkout repository
14 | uses: actions/checkout@v4
15 |
16 | # Set up Git configuration (user info)
17 | - name: Set up Git config
18 | run: |
19 | git config user.name "github-actions"
20 | git config user.email "github-actions@github.com"
21 |
22 | # Install dependencies if needed (if your project uses npm, yarn, etc.)
23 | - name: Install dependencies
24 | run: npm install
25 |
26 | # Stage changes (e.g., package-lock.json)
27 | - name: Stage changes
28 | run: git add .
29 |
30 | # Commit the changes if there are any changes to commit
31 | - name: Commit changes
32 | run: |
33 | git commit -m "Automatic deployment commit" || echo "No changes to commit"
34 | continue-on-error: true # In case there's nothing to commit, continue without error
35 |
36 | # Pull the latest changes from the remote repository
37 | - name: Pull latest changes
38 | run: |
39 | git pull origin main --rebase
40 |
41 | # Push the changes back to the repository
42 | - name: Push changes
43 | env:
44 | GH_TOKEN: ${{ secrets.GH_TOKEN }}
45 | run: |
46 | git pull
47 | git push origin main
48 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | # Description:
2 |
3 | Please provide a concise explanation of the modification being implemented, along with a rationale justifying its necessity and the manner in which it addresses the identified issue.
4 |
5 | ## Nature of Changes
6 |
7 | Please specify the nature of the change you are implementing. (Exclude superfluous options.)
8 |
9 | - [x] The implementation of bug fixes, otherwise referred to as non-breaking changes, is intended to address issues in a manner that is both expeditious and effective.
10 | - [x] New features are non-breaking changes that add functionality.
11 | - [x] The term "breaking changes" refers to alterations in software or programming that result in alterations to the compatibility of the software or programming with other software or programming.
12 | - [x] Documentation Improvement
13 | - [ ] Other (specify below)
14 |
15 | ## Checklist
16 |
17 | - [x] I have read and understood [CONTRIBUTING.md](CONTRIBUTING.md).
18 | - [x] To ensure the code's integrity, I have executed the `pre-commit` script, which scans all files and verifies their compliance with established standards.
19 | - [x] In order to ensure the integrity of the modifications, relevant tests have been incorporated into the changes.
20 | - [x] The documentation has been updated as necessary.
21 | - [x] The test was executed successfully with `pytest`.
22 |
23 | ## Additional Information
24 |
25 | Please submit any additional information that you believe may be useful to reviewers. This may include screenshots, links to related issues, or special notes.
26 |
27 | ## Related Issue
28 |
29 | Fixes # (insert issue number if any)
30 |
31 | ## Screenshots (if required)
32 |
33 | Add screenshots if the change involves UI or visuals.
34 |
35 | ---
36 |
37 | **Note:** Please be sure to delete any irrelevant sections before submitting the PR.
38 |
--------------------------------------------------------------------------------
/.github/workflows/jekyll-gh-pages.yml:
--------------------------------------------------------------------------------
1 | name: Deploy Jekyll to GitHub Pages
2 |
3 | on:
4 | schedule:
5 | - cron: '*/5 * * * *' # This runs every 5 minutes
6 | push:
7 | branches:
8 | - main # Ensures the action is triggered when pushing to the main branch
9 |
10 | # Allows you to run this workflow manually from the Actions tab
11 | workflow_dispatch:
12 |
13 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
14 | permissions:
15 | contents: read
16 | pages: write
17 | id-token: write
18 |
19 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
20 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
21 | concurrency:
22 | group: "pages"
23 | cancel-in-progress: false
24 |
25 | jobs:
26 | # Build job
27 | build:
28 | runs-on: ubuntu-latest
29 | steps:
30 | - name: Checkout
31 | uses: actions/checkout@v4
32 | - name: Setup Pages
33 | uses: actions/configure-pages@v5
34 | - name: Build with Jekyll
35 | uses: actions/jekyll-build-pages@v1
36 | with:
37 | source: ./ # Root of the repository
38 | destination: ./_site # Destination folder for the built site
39 | - name: Upload artifact
40 | uses: actions/upload-pages-artifact@v3
41 |
42 | # Deployment job
43 | deploy:
44 | environment:
45 | name: Download Apps
46 | url: ${{ steps.deployment.outputs.page_url }}
47 | runs-on: ubuntu-latest
48 | needs: build # This ensures the deploy job will only run after the build job finishes
49 | steps:
50 | - name: Deploy to GitHub Pages
51 | id: deployment
52 | uses: actions/deploy-pages@v4
53 | with:
54 | github_token: ${{ secrets.GITHUB_TOKEN }} # Using the default GitHub token for authentication
55 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | ## Contributing
2 |
3 | Hi there! We're thrilled that you'd like to contribute to this project. Your help is essential for keeping it great.
4 |
5 | Contributions to this project are [released](https://docs.github.com/en/github/site-policy/github-terms-of-service#6-contributions-under-repository-license)
6 | to the public under the [project's open source license](LICENSE).
7 |
8 | ## Submitting a pull request
9 |
10 | 1. [Fork](https://github.com/4211421036/facemind/fork) and clone the repository
11 | 2. Configure and install the dependencies: `yarn install`
12 | 3. Create a new branch: `git checkout -b my-branch-name`
13 | 4. Make your change
14 | 5. Make sure the tests pass: `docker buildx bake test`
15 | 6. Format code and build javascript artifacts: `docker buildx bake pre-checkin`
16 | 7. Validate all code has correctly formatted and built: `docker buildx bake validate`
17 | 8. Push to your fork and [submit a pull request](https://github.com/4211421036/facemind/compare)
18 | 9. Pat your self on the back and wait for your pull request to be reviewed and merged.
19 |
20 | Here are a few things you can do that will increase the likelihood of your pull request being accepted:
21 |
22 | - Write tests.
23 | - Make sure the `README.md` and any other relevant **documentation are kept up-to-date**.
24 | - We try to follow [SemVer v2.0.0](https://semver.org/). Randomly breaking public APIs is not an option.
25 | - Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as **separate pull requests**.
26 | - Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
27 |
28 | ## Resources
29 |
30 | - [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/)
31 | - [Using Pull Requests](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests)
32 | - [GitHub Help](https://docs.github.com/en)
33 |
--------------------------------------------------------------------------------
/dfs.js:
--------------------------------------------------------------------------------
1 | document.addEventListener('DOMContentLoaded', function () {
2 | const installButton = document.getElementById('installButton');
3 | const modal = document.getElementById('swipeableModal');
4 | const cancelInstall = document.getElementById('cancelInstall');
5 |
6 | if (!modal) {
7 | console.error('Element with id "swipeableModal" not found.');
8 | return;
9 | }
10 |
11 | const modalContent = modal.querySelector('.modal-content');
12 | if (!modalContent) {
13 | console.error('Element ".modal-content" not found inside #swipeableModal.');
14 | return;
15 | }
16 |
17 | let startY;
18 |
19 | installButton.onclick = function () {
20 | modal.classList.add('show');
21 | document.body.classList.add('modal-open');
22 | modalContent.classList.add('translate-reset');
23 | };
24 |
25 | cancelInstall.onclick = function () {
26 | closeModal();
27 | };
28 |
29 | modal.addEventListener('touchstart', function (e) {
30 | startY = e.touches[0].clientY;
31 | });
32 |
33 | modal.addEventListener('touchmove', function (e) {
34 | const currentY = e.touches[0].clientY;
35 | const diffY = currentY - startY;
36 |
37 | if (diffY > 0) {
38 | modalContent.classList.add('translate-drag');
39 | modalContent.style.setProperty('--dragY', `${diffY}px`);
40 | }
41 | });
42 |
43 | modal.addEventListener('touchend', function (e) {
44 | const endY = e.changedTouches[0].clientY;
45 | const diffY = endY - startY;
46 |
47 | modalContent.classList.remove('translate-drag');
48 |
49 | if (diffY > 100) {
50 | closeModal();
51 | } else {
52 | modalContent.classList.add('translate-reset');
53 | modalContent.style.setProperty('--dragY', `0px`);
54 | }
55 | });
56 |
57 | function closeModal() {
58 | modalContent.classList.remove('translate-reset', 'translate-drag');
59 | modalContent.classList.add('translate-hide');
60 |
61 | setTimeout(() => {
62 | modal.classList.remove('show');
63 | document.body.classList.remove('modal-open');
64 | modalContent.classList.remove('translate-hide');
65 | modalContent.classList.add('translate-reset');
66 | modalContent.style.removeProperty('--dragY');
67 | }, 300);
68 | }
69 | });
70 |
--------------------------------------------------------------------------------
/.github/workflows/pb.yml:
--------------------------------------------------------------------------------
1 | name: Release to PyPI & GitHub
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | permissions:
9 | contents: write
10 | deployments: write
11 | id-token: write
12 |
13 | jobs:
14 | build_publish_and_release:
15 | runs-on: ubuntu-latest
16 | environment: pypi
17 |
18 | steps:
19 | - name: Check out code
20 | uses: actions/checkout@v4
21 | with:
22 | fetch-depth: 0
23 |
24 | - name: Read package metadata
25 | id: metadata
26 | run: |
27 | VERSION=$(grep '^version' pyproject.toml | head -1 | sed -E 's/version *= *"(.*)"/\1/')
28 | NAME=$(grep '^name' pyproject.toml | head -1 | sed -E 's/name *= *"(.*)"/\1/')
29 | echo "VERSION=$VERSION" >> $GITHUB_OUTPUT
30 | echo "PACKAGE_NAME=$NAME" >> $GITHUB_OUTPUT
31 |
32 | - name: Start PyPI Deployment
33 | id: create-deployment
34 | uses: actions/github-script@v7
35 | with:
36 | script: |
37 | const deployment = await github.rest.repos.createDeployment({
38 | owner: context.repo.owner,
39 | repo: context.repo.repo,
40 | ref: context.ref,
41 | environment: 'pypi',
42 | auto_merge: false,
43 | required_contexts: [],
44 | transient_environment: true,
45 | });
46 | return deployment.data.id;
47 | result-encoding: string
48 |
49 | - name: Set up Python
50 | uses: actions/setup-python@v5
51 | with:
52 | python-version: '3.x'
53 |
54 | - name: Install build & twine
55 | run: |
56 | python -m pip install --upgrade pip build twine
57 |
58 | - name: Build sdist & wheel
59 | run: python -m build --sdist --wheel
60 |
61 | - name: Publish to PyPI
62 | env:
63 | TWINE_USERNAME: __token__
64 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
65 | run: |
66 | python -m twine upload --skip-existing dist/*
67 |
68 | - name: Update Deployments Status
69 | uses: actions/github-script@v7
70 | with:
71 | script: |
72 | github.rest.repos.createDeploymentStatus({
73 | owner: context.repo.owner,
74 | repo: context.repo.repo,
75 | deployment_id: ${{ steps.create-deployment.outputs.result }},
76 | state: 'success',
77 | environment_url: 'https://pypi.org/project/${{ steps.metadata.outputs.PACKAGE_NAME }}/'
78 | });
79 |
--------------------------------------------------------------------------------
/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "FcaeMind",
3 | "short_name": "Face Mind",
4 | "description": "Application uses computer vision and machine learning to analyze mental health based on facial expressions. The app includes login system, and real-time mental health analysis through facial landmarks, using OpenCV, Mediapipe, and PyQt5. This Application Delegation Paper Competition for",
5 | "start_url": "./facemind",
6 | "display": "standalone",
7 | "orientation": "portrait",
8 | "background_color": "#fff",
9 | "theme_color": "#1A1A3D",
10 | "icons": [
11 | {
12 | "src": "192x192px.png",
13 | "sizes": "192x192",
14 | "type": "image/png"
15 | },
16 | {
17 | "src": "512x512px.png",
18 | "sizes": "512x512",
19 | "type": "image/png"
20 | }
21 | ],
22 | "screenshots": [
23 | {
24 | "src": "799x595.png",
25 | "sizes": "799x595",
26 | "type": "image/png",
27 | "form_factor": "wide",
28 | "label": "Splash Screen view displaying FaceMind App"
29 | },
30 | {
31 | "src": "799x595.png",
32 | "sizes": "799x595",
33 | "type": "image/png",
34 | "platform": "android",
35 | "label": "Splash Screen view displaying FaceMind App"
36 | },
37 | {
38 | "src": "802x600.png",
39 | "sizes": "802x630",
40 | "type": "image/png",
41 | "platform": "android",
42 | "label": "Dashboard view displaying FaceMind App"
43 | },
44 | {
45 | "src": "796x626.png",
46 | "sizes": "796x626",
47 | "type": "image/png",
48 | "platform": "android",
49 | "label": "Emotional analysis based on face recognition"
50 | }
51 | ],
52 | "shortcuts": [
53 | {
54 | "name": "Download Apps",
55 | "short_name": "Download Apps",
56 | "description": "Download Apps Face Mind",
57 | "url": "./download.html",
58 | "icons": [
59 | {
60 | "src": "192x192px.png",
61 | "sizes": "192x192"
62 | }
63 | ]
64 | }
65 | ],
66 | "manifest_version": 3,
67 | "version": "1.0.0",
68 | "author": "GALIH RIDHO UTOMO",
69 | "editor": "GALIH RIDHO UTOMO",
70 | "scope": "/",
71 | "display_override": [
72 | "window-control-overlay",
73 | "minimal-ui"
74 | ],
75 | "edgesidepanel": {
76 | "preferredwidth": 748
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/sty.css:
--------------------------------------------------------------------------------
1 | body {
2 | margin: 0;
3 | font-family: Arial, sans-serif;
4 | background-color: #1A1A3D;
5 | color: white;
6 | display: flex;
7 | justify-content: center;
8 | align-items: center;
9 | height: 100vh;
10 | flex-direction: column;
11 | }
12 |
13 | .container {
14 | text-align: center;
15 | max-width: 400px;
16 | }
17 |
18 | .header-image {
19 | margin-top: 20px;
20 | }
21 |
22 | .header-image img {
23 | width: 100%;
24 | height: auto;
25 | }
26 |
27 | .content {
28 | background-color: #2C2C54;
29 | padding: 20px;
30 | border-radius: 20px;
31 | margin-top: 20px;
32 | }
33 |
34 | .content h2 {
35 | margin: 10px 0;
36 | font-size: 24px;
37 | }
38 |
39 | .content p {
40 | font-size: 16px;
41 | margin: 10px 0;
42 | }
43 |
44 | button {
45 | margin-top: 20px;
46 | display: flex;
47 | flex-direction: column;
48 | gap: 15px;
49 | }
50 |
51 | button {
52 | padding: 15px;
53 | border: none;
54 | border-radius: 25px;
55 | font-size: 16px;
56 | cursor: pointer;
57 | transition: all 0.3s ease;
58 | width: 100%;
59 | align-items: center;
60 | }
61 |
62 | button .start-checkin {
63 | background-color: #4A4AFF;
64 | color: white;
65 | box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
66 | }
67 |
68 | button .start-checkin:hover {
69 | background-color: #3C3CFF;
70 | transform: translateY(-2px);
71 | box-shadow: 0 6px 12px rgba(0, 0, 0, 0.3);
72 | }
73 |
74 | button .start-checkin:active {
75 | background-color: #2C2CFF;
76 | transform: translateY(1px);
77 | }
78 |
79 | button .later {
80 | background-color: #E0E0E0;
81 | color: #4A4AFF;
82 | border: 2px solid #4A4AFF;
83 | font-weight: bold;
84 | }
85 |
86 | button .later:hover {
87 | background-color: #D0D0D0;
88 | transform: translateY(-2px);
89 | }
90 |
91 | button .later:active {
92 | background-color: #B0B0B0;
93 | transform: translateY(1px);
94 | }
95 |
96 | .note {
97 | font-size: 12px;
98 | color: #B0B0B0;
99 | margin-top: 10px;
100 | }
101 |
102 | /* Modal Styles */
103 | .modal {
104 | position: fixed;
105 | bottom: 0;
106 | left: 0;
107 | width: 100%;
108 | height: auto;
109 | display: none;
110 | justify-content: center;
111 | align-items: flex-end;
112 | background: rgba(0, 0, 0, 0.2);
113 | transition: transform 0.3s ease;
114 | }
115 |
116 | .modal.show {
117 | display: flex;
118 | transform: translateY(0);
119 | }
120 |
121 | .modal-content {
122 | background: #2c2c54;
123 | width: 100%;
124 | padding: 20px;
125 | border-radius: 20px 20px 0 0;
126 | box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.1);
127 | position: relative;
128 | transition: transform 0.3s ease;
129 | }
130 |
131 | .modal h2 {
132 | font-size: 20px;
133 | margin: 10px 0;
134 | }
135 |
136 | .modal button {
137 | width: 100%;
138 | padding: 15px;
139 | border: none;
140 | border-radius: 25px;
141 | font-size: 16px;
142 | cursor: pointer;
143 | }
144 |
145 | .modal button#confirmInstall {
146 | background-color: #4A4AFF;
147 | color: white;
148 | margin-bottom: 10px;
149 | }
150 |
151 | .modal button#cancelInstall {
152 | background-color: #E0E0E0;
153 | color: #4A4AFF;
154 | }
155 |
156 | /* Swipe indicator */
157 | .swipe-indicator {
158 | width: 50px;
159 | height: 5px;
160 | background-color: white;
161 | border-radius: 5px;
162 | position: absolute;
163 | top: 10px;
164 | left: 50%;
165 | transform: translateX(-50%);
166 | }
167 |
168 | /* Lock scroll on body when modal is open */
169 | body.modal-open {
170 | overflow: hidden;
171 | }
172 | img {
173 | width: inherit;
174 | background: #fff;
175 | border-radius: 15px;
176 | }
177 |
--------------------------------------------------------------------------------
/inst.js:
--------------------------------------------------------------------------------
1 | if ('serviceWorker' in navigator) {
2 | window.addEventListener('load', function() {
3 | navigator.serviceWorker.register('https://apps.ukmpenelitianunnes.com/sw.js').then(function(registration) {
4 | console.log('ServiceWorker registration successful with scope: ', registration.scope);
5 | }, function(err) {
6 | console.log('ServiceWorker registration failed: ', err);
7 | });
8 | });
9 | }
10 |
11 | let deferredInstallPrompt = null;
12 |
13 | window.addEventListener('beforeinstallprompt', function(event) {
14 | event.preventDefault();
15 | deferredInstallPrompt = event;
16 | showInstallPrompt();
17 | });
18 |
19 | function showInstallPrompt() {
20 | document.body.style.overflow = 'hidden';
21 |
22 | // Create the modal container
23 | const modal = document.createElement('div');
24 | modal.style.position = 'fixed';
25 | modal.style.top = '0';
26 | modal.style.left = '0';
27 | modal.style.width = '100%';
28 | modal.style.height = '100%';
29 | modal.style.background = 'rgba(0,0,0,0.5)';
30 | modal.style.display = 'flex';
31 | modal.style.justifyContent = 'center';
32 | modal.style.alignItems = 'center';
33 | modal.style.zIndex = '1000';
34 | modal.style.transition = 'transform 0.3s ease-out';
35 |
36 | // Create the install prompt box
37 | const installPrompt = document.createElement('div');
38 | installPrompt.style.padding = '20px';
39 | installPrompt.style.borderRadius = '30px';
40 | installPrompt.style.width = '80%';
41 | installPrompt.style.bottom = '70px';
42 | installPrompt.style.position = 'absolute';
43 | installPrompt.style.boxShadow = '0 0 10px rgba(0,0,0,0.2)';
44 | installPrompt.style.transition = 'opacity 0.3s ease-out';
45 |
46 | // Create the style tag and add styles
47 | const style = document.createElement('style');
48 | style.innerHTML = `
49 | @media (prefers-color-scheme: dark) {
50 | div {
51 | background-color: #1a202c;
52 | color: #fff;
53 | }
54 | }
55 |
56 | @media (prefers-color-scheme: light) {
57 | div {
58 | background: radial-gradient(100% 193.51% at 100% 0%, #EDF4F8 0%, #EFF2FA 16.92%, #FAEFF6 34.8%, #FAE6F2 48.8%, #FAF0F7 63.79%, #F1F1FB 81.34%, #F0F4F8 100%);
59 | color: #000;
60 | }
61 | }
62 |
63 | h2 {
64 | margin-bottom: 10px;
65 | font-size: 1.5em;
66 | }
67 |
68 | p {
69 | font-size: 1em;
70 | margin-bottom: 20px;
71 | }
72 |
73 | button {
74 | padding: 10px 20px;
75 | border: none;
76 | border-radius: 50px;
77 | background-color: #0050A8;
78 | color: white;
79 | cursor: pointer;
80 | width: 100%;
81 | }
82 |
83 | button:hover {
84 | background-color: #0056b3;
85 | }
86 |
87 | .slide-top {
88 | width: 50px;
89 | height: 5px;
90 | background-color: white;
91 | border-radius: 5px;
92 | position: absolute;
93 | top: 10px;
94 | left: 50%;
95 | transform: translateX(-50%);
96 | }
97 |
98 | img {
99 | position: relative;
100 | left: 50%;
101 | transform: translate(-50%, 10px);
102 | background-size: cover;
103 | width: 85px;
104 | margin: 0;
105 | padding: 0;
106 | }
107 | `;
108 | document.head.appendChild(style); // Append styles to the head
109 |
110 | // Add the inner HTML to the install prompt
111 | installPrompt.innerHTML = `
112 |
113 |
114 | Face Mind
115 | Application uses computer vision and machine learning to analyze mental health based on facial expressions
116 | Install
117 | `;
118 |
119 | // Add event listener to the install button
120 | installPrompt.querySelector('#install-button').addEventListener('click', function() {
121 | if (deferredInstallPrompt) {
122 | deferredInstallPrompt.prompt();
123 | deferredInstallPrompt.userChoice.then(function(choiceResult) {
124 | if (choiceResult.outcome === 'accepted') {
125 | console.log('User accepted the install prompt');
126 | } else {
127 | console.log('User dismissed the install prompt');
128 | }
129 | });
130 | }
131 | });
132 |
133 | // Handling swipe events for closing the modal
134 | let swipeStartY = 0;
135 | let deltaY = 0;
136 |
137 | modal.addEventListener('touchstart', function(event) {
138 | swipeStartY = event.touches[0].clientY;
139 | });
140 |
141 | modal.addEventListener('touchmove', function(event) {
142 | let currentY = event.touches[0].clientY;
143 | deltaY = currentY - swipeStartY;
144 | if (deltaY > 0) {
145 | modal.style.transform = `translateY(${deltaY}px)`;
146 | }
147 | });
148 |
149 | modal.addEventListener('touchend', function() {
150 | if (deltaY > 100) {
151 | modal.style.transform = `translateY(100%)`;
152 | setTimeout(function() {
153 | modal.remove();
154 | document.body.style.overflow = '';
155 | }, 300);
156 | } else {
157 | modal.style.transform = 'translateY(0)';
158 | }
159 | deltaY = 0;
160 | });
161 |
162 | // Append the modal and install prompt to the body
163 | document.body.appendChild(modal);
164 | modal.appendChild(installPrompt);
165 | }
166 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community including:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | E-Mail [g4lihru@students.unnes.ac.id](mailto:g4lihru@students.unnes.ac.id) (GALIH RIDHO UTOMO).
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/st.js:
--------------------------------------------------------------------------------
1 | // Simple SHA-256 hash function for hashing the CSS content
2 | function sha256(str) {
3 | const buffer = new TextEncoder("utf-8").encode(str);
4 | return window.crypto.subtle.digest("SHA-256", buffer).then(hashBuffer => {
5 | return Array.from(new Uint8Array(hashBuffer)).map(byte => byte.toString(16).padStart(2, '0')).join('');
6 | });
7 | }
8 |
9 | // Function to hash the CSS and dynamically apply it
10 | async function hashAndApplyCSS() {
11 | const cssContent = `
12 | body {
13 | margin: 0;
14 | font-family: Arial, sans-serif;
15 | background-color: #1A1A3D;
16 | color: white;
17 | display: flex;
18 | justify-content: center;
19 | align-items: center;
20 | height: 100vh;
21 | flex-direction: column;
22 | }
23 |
24 | .container {
25 | text-align: center;
26 | max-width: 400px;
27 | }
28 |
29 | .header-image {
30 | margin-top: 20px;
31 | }
32 |
33 | .header-image img {
34 | width: 100%;
35 | height: auto;
36 | }
37 |
38 | .content {
39 | background-color: #2C2C54;
40 | padding: 20px;
41 | border-radius: 20px;
42 | margin-top: 20px;
43 | }
44 |
45 | .content h2 {
46 | margin: 10px 0;
47 | font-size: 24px;
48 | }
49 |
50 | .content p {
51 | font-size: 16px;
52 | margin: 10px 0;
53 | }
54 |
55 | button {
56 | margin-top: 20px;
57 | display: flex;
58 | flex-direction: column;
59 | gap: 15px;
60 | }
61 |
62 | button {
63 | padding: 15px;
64 | border: none;
65 | border-radius: 25px;
66 | font-size: 16px;
67 | cursor: pointer;
68 | transition: all 0.3s ease;
69 | width: 100%;
70 | align-items: center;
71 | }
72 |
73 | button .start-checkin {
74 | background-color: #4A4AFF;
75 | color: white;
76 | box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
77 | }
78 |
79 | button .start-checkin:hover {
80 | background-color: #3C3CFF;
81 | transform: translateY(-2px);
82 | box-shadow: 0 6px 12px rgba(0, 0, 0, 0.3);
83 | }
84 |
85 | button .start-checkin:active {
86 | background-color: #2C2CFF;
87 | transform: translateY(1px);
88 | }
89 |
90 | button .later {
91 | background-color: #E0E0E0;
92 | color: #4A4AFF;
93 | border: 2px solid #4A4AFF;
94 | font-weight: bold;
95 | }
96 |
97 | button .later:hover {
98 | background-color: #D0D0D0;
99 | transform: translateY(-2px);
100 | }
101 |
102 | button .later:active {
103 | background-color: #B0B0B0;
104 | transform: translateY(1px);
105 | }
106 |
107 | .note {
108 | font-size: 12px;
109 | color: #B0B0B0;
110 | margin-top: 10px;
111 | }
112 |
113 | /* Modal Styles */
114 | .modal {
115 | position: fixed;
116 | bottom: 0;
117 | left: 0;
118 | width: 100%;
119 | height: auto;
120 | display: none;
121 | justify-content: center;
122 | align-items: flex-end;
123 | background: rgba(0, 0, 0, 0.2);
124 | transition: transform 0.3s ease;
125 | }
126 |
127 | .modal.show {
128 | display: flex;
129 | transform: translateY(0);
130 | }
131 |
132 | .modal-content {
133 | background: #2c2c54;
134 | width: 100%;
135 | padding: 20px;
136 | border-radius: 20px 20px 0 0;
137 | box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.1);
138 | position: relative;
139 | transition: transform 0.3s ease;
140 | }
141 |
142 | .modal h2 {
143 | font-size: 20px;
144 | margin: 10px 0;
145 | }
146 |
147 | .modal button {
148 | width: 100%;
149 | padding: 15px;
150 | border: none;
151 | border-radius: 25px;
152 | font-size: 16px;
153 | cursor: pointer;
154 | }
155 |
156 | .modal button#confirmInstall {
157 | background-color: #4A4AFF;
158 | color: white;
159 | margin-bottom: 10px;
160 | }
161 |
162 | .modal button#cancelInstall {
163 | background-color: #E0E0E0;
164 | color: #4A4AFF;
165 | }
166 |
167 | /* Swipe indicator */
168 | .swipe-indicator {
169 | width: 50px;
170 | height: 5px;
171 | background-color: white;
172 | border-radius: 5px;
173 | position: absolute;
174 | top: 10px;
175 | left: 50%;
176 | transform: translateX(-50%);
177 | }
178 |
179 | /* Lock scroll on body when modal is open */
180 | body.modal-open {
181 | overflow: hidden;
182 | }
183 | `;
184 |
185 | // Hash the CSS content
186 | const cssHash = await sha256(cssContent);
187 |
188 | // Add the hash to the
105 |
106 |
107 | Face Mind
108 | Application uses computer vision and machine learning to analyze mental health based on facial expressions
109 | Install
110 | `;
111 |
112 | // Add event listener to install button
113 | installPrompt.querySelector('#install-button').addEventListener('click', function () {
114 | if (deferredInstallPrompt) {
115 | deferredInstallPrompt.prompt();
116 | deferredInstallPrompt.userChoice
117 | .then(function (choiceResult) {
118 | if (choiceResult.outcome === 'accepted') {
119 | console.log('User accepted the install prompt');
120 | } else {
121 | console.log('User dismissed the install prompt');
122 | }
123 | });
124 | }
125 | });
126 |
127 | // Add swipe event listener to modal with dynamic translateY
128 | let swipeStartY = 0;
129 | let deltaY = 0;
130 | modal.addEventListener('touchstart', function (event) {
131 | swipeStartY = event.touches[0].clientY;
132 | console.log('touchstart: ', swipeStartY);
133 | });
134 | modal.addEventListener('touchmove', function (event) {
135 | let currentY = event.touches[0].clientY;
136 | deltaY = currentY - swipeStartY;
137 | console.log('touchmove: deltaY = ', deltaY);
138 |
139 | // Prevent modal from moving up
140 | if (deltaY > 0) {
141 | modal.style.transform = `translateY(${deltaY}px)`;
142 | }
143 | });
144 | modal.addEventListener('touchend', function () {
145 | console.log('touchend: deltaY = ', deltaY);
146 | if (deltaY > 100) { // Threshold untuk swipe
147 | // Animate modal sliding down and remove
148 | modal.style.transform = `translateY(100%)`;
149 | setTimeout(function () {
150 | modal.remove();
151 | // Unlock body scroll
152 | document.body.style.overflow = '';
153 | // Reload the page
154 | }, 300);
155 | } else {
156 | // If swipe is not enough, reset position
157 | modal.style.transform = 'translateY(0)';
158 | }
159 | // Reset deltaY
160 | deltaY = 0;
161 | });
162 | // Append modal and install prompt to body
163 | document.body.appendChild(modal);
164 | modal.appendChild(installPrompt);
165 | }
166 |
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 | Facemind Face Mind Face the Future of Mental Health Solution Application based on Computer Vision, and Integrated with Instagram Apps
Install Application Close Apps Note that you won't be able to proceed to the next touch session unless you complete the check-in
Install Application Yes, Install Cancel
--------------------------------------------------------------------------------
/.github/workflows/requirements.txt:
--------------------------------------------------------------------------------
1 | absl-py==2.0.0
2 | Adafruit-Blinka==8.24.0
3 | adafruit-board-toolkit==1.1.1
4 | Adafruit-CharLCD==1.1.1
5 | adafruit-circuitpython-74hc595==1.4.3
6 | adafruit-circuitpython-busdevice==5.2.6
7 | adafruit-circuitpython-charlcd==3.4.9
8 | adafruit-circuitpython-dht==4.0.3
9 | adafruit-circuitpython-mcp230xx==2.5.12
10 | adafruit-circuitpython-requests==2.0.2
11 | adafruit-circuitpython-typing==1.9.5
12 | Adafruit-GPIO==1.0.3
13 | Adafruit-PlatformDetect==3.53.0
14 | Adafruit-PureIO==1.1.11
15 | affine==2.4.0
16 | aiohttp==3.9.5
17 | aiosignal==1.3.1
18 | altgraph==0.17.4
19 | anyio==4.0.0
20 | argon2-cffi==23.1.0
21 | argon2-cffi-bindings==21.2.0
22 | arrow==1.3.0
23 | asttokens==2.4.1
24 | astunparse==1.6.3
25 | async-lru==2.0.4
26 | async-timeout==4.0.3
27 | attrs==24.3.0
28 | Babel==2.15.0
29 | beautifulsoup4==4.12.3
30 | bidict==0.22.1
31 | binaryornot==0.4.4
32 | bitarray==2.8.5
33 | bitstring==4.1.4
34 | bleach==6.1.0
35 | blinker==1.7.0
36 | board==1.0
37 | briefcase==0.3.16
38 | build==1.0.3
39 | buildozer==1.5.0
40 | CacheControl==0.14.0
41 | cachetools==5.3.2
42 | certifi==2023.7.22
43 | cffi==1.16.0
44 | chardet==5.2.0
45 | charset-normalizer==3.3.2
46 | click==8.1.7
47 | click-plugins==1.1.1
48 | cligj==0.7.2
49 | clr-loader==0.2.6
50 | colorama==0.4.6
51 | comm==0.2.2
52 | comtypes==1.2.0
53 | contextily==1.6.0
54 | contourpy==1.2.0
55 | controller==0.1.0
56 | cookiecutter==2.5.0
57 | cryptography==41.0.7
58 | cvzone==1.6.1
59 | cycler==0.12.1
60 | Cython==3.0.10
61 | debugpy==1.8.1
62 | decorator==5.1.1
63 | deepface==0.0.90
64 | defusedxml==0.7.1
65 | distlib==0.3.8
66 | docutils==0.20.1
67 | ecdsa==0.18.0
68 | embedbase==1.2.8
69 | esptool==4.6.2
70 | et-xmlfile==1.1.0
71 | exceptiongroup==1.1.3
72 | executing==2.0.1
73 | fastapi==0.95.2
74 | fastjsonschema==2.19.1
75 | filelock==3.13.1
76 | fiona==1.9.6
77 | fire==0.6.0
78 | firebase==4.0.1
79 | firebase-admin==6.5.0
80 | Flask==3.0.3
81 | flatbuffers==24.12.23
82 | fonttools==4.44.0
83 | fqdn==1.5.1
84 | frozenlist==1.4.1
85 | fsspec==2024.3.1
86 | future==0.18.3
87 | fuzzywuzzy==0.18.0
88 | gast==0.5.4
89 | gdown==5.1.0
90 | geographiclib==2.0
91 | geopandas==0.14.4
92 | geopy==2.4.1
93 | georinex==1.16.1
94 | gitdb==4.0.11
95 | GitPython==3.1.40
96 | gnss==0.3.1
97 | gnss-lib-py==1.0.2
98 | google-api-core==2.18.0
99 | google-api-python-client==2.125.0
100 | google-auth==2.29.0
101 | google-auth-httplib2==0.2.0
102 | google-auth-oauthlib==1.0.0
103 | google-cloud-core==2.4.1
104 | google-cloud-firestore==2.16.0
105 | google-cloud-storage==2.16.0
106 | google-crc32c==1.5.0
107 | google-pasta==0.2.0
108 | google-resumable-media==2.7.0
109 | googleapis-common-protos==1.63.0
110 | gpiod==1.5.4
111 | gps3==0.33.3
112 | grpcio==1.62.1
113 | grpcio-status==1.62.1
114 | gunicorn==22.0.0
115 | h11==0.14.0
116 | h5py==3.12.1
117 | hatanaka==2.8.1
118 | httpcore==1.0.2
119 | httplib2==0.22.0
120 | httptools==0.6.1
121 | httpx==0.25.1
122 | idna==3.4
123 | imageio==2.34.0
124 | imbalanced-learn==0.11.0
125 | importlib-metadata==6.8.0
126 | importlib-resources==6.1.1
127 | iniconfig==2.0.0
128 | ipykernel==6.29.4
129 | ipython==8.18.1
130 | ipywidgets==8.1.2
131 | iso8601==2.1.0
132 | isoduration==20.11.0
133 | itsdangerous==2.2.0
134 | jax==0.4.26
135 | jedi==0.19.1
136 | Jinja2==3.1.2
137 | joblib==1.3.2
138 | json5==0.9.25
139 | jsonpointer==2.4
140 | jsonschema==4.22.0
141 | jsonschema-specifications==2023.12.1
142 | jupyter==1.0.0
143 | jupyter-console==6.6.3
144 | jupyter-events==0.10.0
145 | jupyter-lsp==2.2.5
146 | jupyter_client==8.6.2
147 | jupyter_core==5.7.2
148 | jupyter_server==2.14.1
149 | jupyter_server_terminals==0.5.3
150 | jupyterlab==4.2.1
151 | jupyterlab_pygments==0.3.0
152 | jupyterlab_server==2.27.2
153 | jupyterlab_widgets==3.0.10
154 | kaleido==0.2.1
155 | keras==3.8.0
156 | keyboard==0.13.5
157 | Kivy==2.2.1
158 | kivy-deps.angle==0.3.3
159 | kivy-deps.glew==0.3.1
160 | kivy-deps.sdl2==0.6.0
161 | Kivy-examples==2.2.1
162 | Kivy-Garden==0.1.5
163 | kivymd==1.2.0
164 | kiwisolver==1.4.5
165 | labyrinth-py==1.0.4
166 | Levenshtein==0.25.1
167 | libclang==16.0.6
168 | Markdown==3.5.1
169 | markdown-it-py==3.0.0
170 | MarkupSafe==2.1.3
171 | matplotlib==3.8.1
172 | matplotlib-inline==0.1.6
173 | mdurl==0.1.2
174 | mediapipe==0.10.11
175 | mercantile==1.2.1
176 | mistune==3.0.2
177 | ml-dtypes==0.4.1
178 | mpmath==1.3.0
179 | msgpack==1.0.8
180 | mtcnn==0.1.1
181 | multidict==6.0.5
182 | namex==0.0.8
183 | nbclient==0.10.0
184 | nbconvert==7.16.4
185 | nbformat==5.10.4
186 | ncompress==1.0.2
187 | nest-asyncio==1.6.0
188 | networkx==3.2.1
189 | notebook==7.2.1
190 | notebook_shim==0.2.4
191 | numpy==1.26.2
192 | oauthlib==3.2.2
193 | openai==0.27.10
194 | opencv-contrib-python==4.8.1.78
195 | opencv-python==4.9.0.80
196 | opencv-python-headless==4.9.0.80
197 | openpyxl==3.1.2
198 | OPi.GPIO==0.5.2
199 | opt-einsum==3.3.0
200 | opticalmaterialspy==0.3.3
201 | optree==0.11.0
202 | outcome==1.3.0.post0
203 | overrides==7.7.0
204 | packaging==23.2
205 | pandas==2.2.2
206 | pandocfilters==1.5.1
207 | parso==0.8.3
208 | pefile==2023.2.7
209 | pexpect==4.9.0
210 | Pillow==10.1.0
211 | platformdirs==3.11.0
212 | plotly==5.20.0
213 | pluggy==1.4.0
214 | prometheus_client==0.20.0
215 | prompt-toolkit==3.0.43
216 | proto-plus==1.23.0
217 | protobuf==3.20.3
218 | psutil==5.9.7
219 | ptyprocess==0.7.0
220 | pure-eval==0.2.2
221 | py-cpuinfo==9.0.0
222 | pyasn1==0.5.0
223 | pyasn1-modules==0.3.0
224 | PyAudio==0.2.14
225 | pycparser==2.21
226 | pycryptodome==3.19.0
227 | pydantic==1.10.15
228 | pydantic-yaml==0.4.3
229 | pyFirmata==1.1.0
230 | pyftdi==0.55.0
231 | pygame==2.5.2
232 | pygame-ce==2.4.1
233 | pygame_gui==0.6.10
234 | Pygments==2.17.2
235 | pygpiod==0.2
236 | pyinstaller==6.2.0
237 | pyinstaller-hooks-contrib==2023.10
238 | PyJWT==2.8.0
239 | pynmea2==1.19.0
240 | pyOpenSSL==24.1.0
241 | pyparsing==3.1.1
242 | pypiwin32==223
243 | pyproj==3.6.1
244 | pyproject_hooks==1.0.0
245 | PyQt5==5.15.10
246 | PyQt5-Qt5==5.15.2
247 | PyQt5-sip==12.13.0
248 | PyQtWebEngine==5.15.6
249 | PyQtWebEngine-Qt5==5.15.2
250 | pyserial==3.5
251 | PySocks==1.7.1
252 | pyTelegramBotAPI==4.14.0
253 | pytest==8.1.1
254 | pytest-lazy-fixture==0.6.3
255 | python-dateutil==2.8.2
256 | python-dotenv==1.0.1
257 | python-engineio==4.8.0
258 | python-i18n==0.3.9
259 | python-json-logger==2.0.7
260 | python-Levenshtein==0.25.1
261 | python-slugify==8.0.1
262 | python-socketio==5.10.0
263 | python-telegram-bot==20.6
264 | pythonnet==3.0.3
265 | pyttsx3==2.90
266 | pytz==2023.3.post1
267 | pyusb==1.2.1
268 | pywin32==306
269 | pywin32-ctypes==0.2.2
270 | pywinpty==2.0.13
271 | PyYAML==6.0.1
272 | pyzmq==26.0.3
273 | qtconsole==5.5.2
274 | QtPy==2.4.1
275 | rapidfuzz==3.9.1
276 | rasterio==1.3.10
277 | reedsolo==1.7.0
278 | referencing==0.35.1
279 | regex==2024.4.16
280 | requests==2.31.0
281 | requests-oauthlib==1.3.1
282 | retina-face==0.0.17
283 | rfc3339-validator==0.1.4
284 | rfc3986-validator==0.1.1
285 | rich==13.7.0
286 | rpds-py==0.18.1
287 | RPLCD==1.3.1
288 | rsa==4.9
289 | scikit-fuzzy==0.4.2
290 | scikit-learn==1.3.2
291 | scipy==1.11.3
292 | seaborn==0.13.0
293 | selenium==4.27.1
294 | semver==3.0.2
295 | Send2Trash==1.8.3
296 | serial==0.0.97
297 | sh==2.0.6
298 | shapely==2.0.4
299 | simple-websocket==1.0.0
300 | simplekml==1.3.6
301 | six==1.16.0
302 | smbus2==0.4.3
303 | smmap==5.0.1
304 | sniffio==1.3.0
305 | snuggs==1.4.7
306 | sortedcontainers==2.4.0
307 | sounddevice==0.4.6
308 | soupsieve==2.5
309 | SpeechRecognition==3.10.0
310 | stack-data==0.6.3
311 | starlette==0.27.0
312 | stem==1.8.2
313 | supervision==0.19.0
314 | sympy==1.12
315 | tenacity==8.2.3
316 | tensorboard==2.18.0
317 | tensorboard-data-server==0.7.2
318 | tensorflow==2.16.1
319 | tensorflow-estimator==2.14.0
320 | tensorflow-intel==2.16.1
321 | tensorflow-io-gcs-filesystem==0.31.0
322 | termcolor==2.3.0
323 | terminado==0.18.1
324 | text-unidecode==1.3
325 | thop==0.1.1.post2209072238
326 | threadpoolctl==3.2.0
327 | tiktoken==0.3.3
328 | tinycss2==1.3.0
329 | toga==0.4.2
330 | toga-core==0.4.2
331 | toga-winforms==0.4.2
332 | tomli==2.0.1
333 | tomli_w==1.0.0
334 | torch==2.2.2
335 | torchvision==0.17.2
336 | tornado==6.4.1
337 | tqdm==4.66.2
338 | traitlets==5.14.2
339 | travertino==0.3.0
340 | trio==0.28.0
341 | trio-websocket==0.11.1
342 | types-python-dateutil==2.8.19.14
343 | types-PyYAML==6.0.12.20240311
344 | typing_extensions==4.12.2
345 | tzdata==2023.3
346 | ultralytics==8.1.45
347 | unlzw3==0.2.2
348 | uri-template==1.3.0
349 | uritemplate==4.1.1
350 | urllib3==2.0.7
351 | uvicorn==0.22.0
352 | virtualenv==20.25.0
353 | waitress==2.1.2
354 | watchfiles==0.21.0
355 | wcwidth==0.2.13
356 | webcolors==24.6.0
357 | webencodings==0.5.1
358 | websocket-client==1.8.0
359 | websockets==12.0
360 | Werkzeug==3.0.1
361 | widgetsnbextension==4.0.10
362 | wrapt==1.14.1
363 | wsproto==1.2.0
364 | xarray==2024.5.0
365 | xyzservices==2024.6.0
366 | yarl==1.9.4
367 | zipp==3.17.0
368 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/sc.js:
--------------------------------------------------------------------------------
1 | import fs from 'fs';
2 | import path from 'path';
3 | import crypto from 'crypto';
4 | import { minify } from 'html-minifier';
5 |
6 | // Fungsi untuk generate nonce sederhana
7 | function generateNonce() {
8 | return Math.random().toString(36).substring(2, 15) +
9 | Math.random().toString(36).substring(2, 15);
10 | }
11 |
12 | // Fungsi untuk menghitung hash file untuk SRI
13 | function generateIntegrityHash(filePath) {
14 | const fileBuffer = fs.readFileSync(filePath);
15 | const hash = crypto.createHash('sha384');
16 | hash.update(fileBuffer);
17 | return hash.digest('base64');
18 | }
19 |
20 | // Fungsi untuk mendapatkan waktu sekarang
21 | function getCurrentTime() {
22 | const now = new Date();
23 | return now.toLocaleString(); // Format waktu sesuai dengan lokal
24 | }
25 |
26 | async function generateHtml() {
27 | // Generate nonce untuk setiap elemen
28 | const nonce = generateNonce();
29 |
30 | const jsFiles = ['jws.js','dfs.js'];
31 |
32 | const cspContent = [
33 | `default-src 'self' https://4211421036.github.io`,
34 | `script-src 'self' 'nonce-${nonce}' https://4211421036.github.io`,
35 | `style-src 'self' 'nonce-${nonce}' https://4211421036.github.io`,
36 | "object-src 'none'",
37 | "base-uri 'self'",
38 | "img-src 'self' data: https://4211421036.github.io",
39 | "font-src 'self' https://4211421036.github.io",
40 | "media-src 'self' https://4211421036.github.io",
41 | "connect-src 'self' https://4211421036.github.io",
42 | "form-action 'self'",
43 | "manifest-src 'self' https://4211421036.github.io",
44 | "worker-src 'self' blob: https://4211421036.github.io"
45 | ].join('; ');
46 |
47 | let htmlContent = `
48 |
49 |
50 | Facemind
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 | `;
73 | // Menambahkan file JavaScript dengan atribut integrity dan crossorigin
74 | jsFiles.forEach(file => {
75 | const filePath = path.join(process.cwd(), file);
76 | const integrityHash = generateIntegrityHash(filePath);
77 | htmlContent += `
78 |
79 | `;
80 | });
81 | htmlContent += `
82 |
285 |
286 |
287 |
288 |
292 |
293 |
Face Mind
294 |
Face the Future of Mental Health
295 |
Solution Application based on Computer Vision, and Integrated with Instagram Apps
296 |
Install Application
297 |
Close Apps
298 |
299 |
Note that you won't be able to proceed to the next touch session unless you complete the
300 | check-in
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
310 |
Install Application
311 |
Yes, Install
312 |
Cancel
313 |
314 |
315 |
316 |
317 | `;
318 |
319 | try {
320 | // Minify HTML yang dihasilkan
321 | const minifiedHtml = await minify(htmlContent, {
322 | collapseWhitespace: true, // Menghapus spasi dan baris kosong
323 | removeComments: true, // Menghapus komentar
324 | removeRedundantAttributes: true, // Menghapus atribut yang tidak perlu
325 | useShortDoctype: true, // Menggunakan doctype singkat
326 | minifyJS: true, // Minify JS
327 | minifyCSS: true // Minify CSS
328 | });
329 |
330 | // Tentukan path untuk file HTML yang akan dihasilkan
331 | const outputPath = path.join(process.cwd(), 'index.html');
332 | //const outputPath = path.join(process.cwd(), 'index.html');
333 |
334 | // Simpan HTML yang telah di-minify ke file
335 | fs.writeFileSync(outputPath, minifiedHtml);
336 | console.log('File HTML telah dibuat dan di-minify di:', outputPath);
337 | } catch (error) {
338 | console.error('Error during minification:', error);
339 | }
340 | }
341 |
342 | // Generate HTML
343 | generateHtml();
344 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import cv2
3 | import numpy as np
4 | import mediapipe as mp
5 | import csv
6 | import datetime
7 | import pandas as pd
8 | import matplotlib.pyplot as plt
9 | from selenium import webdriver
10 | from selenium.webdriver.common.by import By
11 | from selenium.webdriver.common.keys import Keys
12 | from selenium.webdriver.support.ui import WebDriverWait
13 | from selenium.webdriver.support import expected_conditions as EC
14 | from PyQt5.QtWidgets import (QApplication, QLabel, QPushButton, QVBoxLayout,
15 | QWidget, QMainWindow, QLineEdit, QMessageBox, QAction)
16 | from PyQt5.QtCore import QTimer, Qt
17 | from PyQt5.QtGui import QPixmap, QImage
18 | import subprocess
19 | import pkg_resources
20 | required = {'opencv-python', 'csv', 'datetime', 'pandas', 'matplotlib', 'numpy', 'mediapipe', 'selenium', 'PyQt5'}
21 | installed = {pkg.key for pkg in pkg_resources.working_set}
22 | missing = required - installed
23 |
24 | if missing:
25 | python = sys.executable
26 | subprocess.check_call([python, '-m', 'pip', 'install', *missing])
27 |
28 | def save_to_database(prediction):
29 | filename = "mental_health_predictions.csv"
30 | current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
31 | with open(filename, mode='a', newline='') as file:
32 | writer = csv.writer(file)
33 | writer.writerow([current_time, prediction])
34 |
35 | def show_graph(date_filter=None):
36 | # Membaca data dari CSV
37 | data = pd.read_csv("mental_health_predictions.csv", names=["Time", "Prediction"])
38 | data["Time"] = pd.to_datetime(data["Time"])
39 |
40 | if date_filter:
41 | data = data[data["Time"].dt.date == date_filter]
42 |
43 | # Mapping prediksi ke angka untuk grafik
44 | predictions_map = {"Neutral": 0, "Fatigue Detected": 1, "Positive Mood": 2, "Stressed/Concerned": 3}
45 | data["Prediction Value"] = data["Prediction"].map(predictions_map)
46 |
47 | # Membuat grafik dengan warna berbeda untuk setiap prediksi
48 | plt.figure(figsize=(12, 8))
49 | colors = {'Neutral': 'blue', 'Fatigue Detected': 'red', 'Positive Mood': 'green', 'Stressed/Concerned': 'orange'}
50 | for prediction, group_data in data.groupby("Prediction"):
51 | plt.plot_date(group_data["Time"], group_data["Prediction Value"], linestyle='solid', marker='o', color=colors[prediction], label=prediction)
52 |
53 | plt.xlabel("Time")
54 | plt.ylabel("Prediction")
55 | plt.title("Mental Health Prediction Monitoring")
56 | plt.xticks(rotation=45)
57 | plt.tight_layout()
58 | plt.legend()
59 |
60 | # Tampilkan grafik
61 | plt.show()
62 |
63 | # Menghitung akurasi prediksi
64 | total_predictions = len(data)
65 | correct_predictions = data["Prediction"].value_counts().max()
66 | accuracy = (correct_predictions / total_predictions) * 100
67 |
68 | print(f"Total Predictions: {total_predictions}")
69 | print(f"Correct Predictions: {correct_predictions}")
70 | print(f"Accuracy: {accuracy:.2f}%")
71 |
72 | # Kesimpulan akhir prediksi emosi
73 | final_prediction = data["Prediction"].iloc[-1]
74 | print(f"Kesimpulan akhir prediksi emosi: {final_prediction}")
75 |
76 | # Saran untuk mengatasi atau mengurangi emosi
77 | suggestions = {
78 | "Neutral": "Pertahankan kondisi Anda saat ini dan terus lakukan aktivitas yang positif.",
79 | "Fatigue Detected": "Istirahat yang cukup, tidur yang berkualitas, dan hindari stres berlebihan.",
80 | "Positive Mood": "Lanjutkan aktivitas yang membuat Anda bahagia dan berbagi kebahagiaan dengan orang lain.",
81 | "Stressed/Concerned": "Lakukan relaksasi, meditasi, atau aktivitas yang menenangkan. Jangan ragu untuk mencari bantuan profesional jika diperlukan."
82 | }
83 | print(f"Saran: {suggestions.get(final_prediction, 'Tidak ada saran yang tersedia.')}")
84 |
85 | # Grafik akurasi prediksi
86 | plt.figure(figsize=(12, 8))
87 | plt.plot(data["Time"], data["Prediction Value"], label="Detected Emotion", color='blue', linestyle='solid', marker='o')
88 | plt.plot(data["Time"], data["Prediction Value"].rolling(window=10).mean(), label="Predicted Emotion", color='red', linestyle='dashed')
89 | plt.xlabel("Time")
90 | plt.ylabel("Prediction Value")
91 | plt.title("Detected vs Predicted Emotion")
92 | plt.xticks(rotation=45)
93 | plt.tight_layout()
94 | plt.legend()
95 | plt.show()
96 |
97 | class SplashScreen(QMainWindow):
98 | def __init__(self):
99 | super().__init__()
100 | self.setWindowTitle("Splash Screen")
101 | self.setFixedSize(800, 600)
102 | self.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)
103 | layout = QVBoxLayout()
104 | logo = QLabel()
105 | pixmap = QPixmap("logofm.png").scaled(400, 136, Qt.KeepAspectRatio, Qt.SmoothTransformation)
106 | logo.setPixmap(pixmap)
107 | logo.setAlignment(Qt.AlignCenter)
108 | label = QLabel("Welcome to Mental Health App")
109 | label.setAlignment(Qt.AlignCenter)
110 | layout.addStretch()
111 | layout.addWidget(logo)
112 | layout.addWidget(label)
113 | layout.addStretch()
114 | container = QWidget()
115 | container.setObjectName("container")
116 | container.setLayout(layout)
117 | self.setStyleSheet("""
118 | QMainWindow {
119 | border-radius: 20px;
120 | background-color: white;
121 | }
122 | """)
123 | self.setCentralWidget(container)
124 |
125 | def showSplash(self):
126 | self.show()
127 | QTimer.singleShot(3000, self.close)
128 |
129 |
130 | class LoginWindow(QMainWindow):
131 | def __init__(self):
132 | super().__init__()
133 | self.setWindowTitle("Login")
134 | self.setFixedSize(800, 600)
135 | self.setStyleSheet("""
136 | QMainWindow {
137 | background-color: #f0f0f0;
138 | }
139 | QLabel {
140 | font-size: 18px;
141 | color: #333;
142 | }
143 | QLineEdit {
144 | font-size: 16px;
145 | padding: 10px;
146 | border: 1px solid #ccc;
147 | border-radius: 5px;
148 | }
149 | QPushButton {
150 | font-size: 16px;
151 | padding: 10px;
152 | background-color: #007BFF;
153 | color: white;
154 | border: none;
155 | border-radius: 5px;
156 | }
157 | QPushButton:disabled {
158 | background-color: #cccccc;
159 | }
160 | QPushButton:hover {
161 | background-color: #0056b3;
162 | }
163 | QMenuBar {
164 | background-color: #f0f0f0;
165 | }
166 | QMenuBar::item {
167 | background-color: #f0f0f0;
168 | color: #333;
169 | }
170 | QMenuBar::item:selected {
171 | background-color: #e0e0e0;
172 | color: #333;
173 | }
174 | QMenu {
175 | background-color: #f0f0f0;
176 | color: #333;
177 | }
178 | QMenu::item:selected {
179 | background-color: #e0e0e0;
180 | color: #333;
181 | }
182 | """)
183 | menubar = self.menuBar()
184 | file_menu = menubar.addMenu('File')
185 | help_menu = menubar.addMenu('Help')
186 | about_menu = menubar.addMenu('About')
187 | monitoring_menu = menubar.addMenu('Monitoring')
188 |
189 | exit_action = QAction('Exit', self)
190 | exit_action.triggered.connect(QApplication.instance().quit)
191 | file_menu.addAction(exit_action)
192 |
193 | help_action = QAction('Help', self)
194 | help_action.triggered.connect(self.show_help)
195 | help_menu.addAction(help_action)
196 |
197 | about_action = QAction('About', self)
198 | about_action.triggered.connect(self.show_about)
199 | about_menu.addAction(about_action)
200 |
201 | show_graph_action = QAction('Show Monitoring Graph', self)
202 | show_graph_action.triggered.connect(self.show_graph)
203 | monitoring_menu.addAction(show_graph_action)
204 |
205 | layout = QVBoxLayout()
206 | logo = QLabel()
207 | pixmap = QPixmap("layarui.png").scaledToWidth(self.width(), Qt.SmoothTransformation)
208 | logo.setPixmap(pixmap)
209 | logo.setAlignment(Qt.AlignCenter)
210 | layout.addWidget(logo)
211 | label = QLabel("Login to your account")
212 | layout.addWidget(label)
213 | label.setAlignment(Qt.AlignCenter)
214 | self.username_input = QLineEdit()
215 | self.username_input.setPlaceholderText('Username')
216 | self.password_input = QLineEdit()
217 | self.password_input.setPlaceholderText('Password')
218 | self.password_input.setEchoMode(QLineEdit.Password)
219 | self.login_button = QPushButton('Login to Instagram')
220 | self.login_button.clicked.connect(self.login_instagram)
221 | self.start_analysis_button = QPushButton('Start Mental Health Analysis')
222 | self.start_analysis_button.clicked.connect(self.start_analysis)
223 | self.start_analysis_button.setEnabled(False)
224 | self.status_label = QLabel('')
225 | layout.addWidget(self.username_input)
226 | layout.addWidget(self.password_input)
227 | layout.addWidget(self.login_button)
228 | layout.addWidget(self.start_analysis_button)
229 | layout.addWidget(self.status_label)
230 | container = QWidget()
231 | container.setLayout(layout)
232 | self.setCentralWidget(container)
233 | self.driver = None
234 |
235 | def show_graph(self):
236 | # Panggil fungsi untuk menampilkan grafik
237 | show_graph()
238 |
239 | def show_help(self):
240 | QMessageBox.information(self, "Help", "This is the help section.")
241 |
242 | def show_about(self):
243 | QMessageBox.information(self, "About", "Mental Health App v1.0")
244 |
245 | def login_instagram(self):
246 | self.status_label.setText('Logging in...')
247 | self.driver = webdriver.Chrome()
248 | self.driver.get('https://www.instagram.com/accounts/login/')
249 | wait = WebDriverWait(self.driver, 10)
250 | username_field = wait.until(EC.presence_of_element_located((By.NAME, 'username')))
251 | username_field.send_keys(self.username_input.text())
252 | password_field = self.driver.find_element(By.NAME, 'password')
253 | password_field.send_keys(self.password_input.text())
254 | password_field.send_keys(Keys.RETURN)
255 | wait.until(EC.url_contains('instagram.com'))
256 | if 'login' not in self.driver.current_url:
257 | self.status_label.setText('Login successful! You can now start the analysis.')
258 | self.start_analysis_button.setEnabled(True)
259 | self.login_button.setEnabled(False)
260 | else:
261 | self.status_label.setText('Login failed. Please check your credentials.')
262 |
263 | def start_analysis(self):
264 | if self.driver:
265 | self.main_window = MentalHealthPredictor()
266 | self.main_window.show()
267 | self.hide()
268 | self.monitor_instagram_posts()
269 | else:
270 | self.status_label.setText('Please login first')
271 |
272 | def monitor_instagram_posts(self):
273 | # Monitor Instagram posts and analyze user's emotion
274 | self.timer = QTimer()
275 | self.timer.timeout.connect(self.check_posts)
276 | self.timer.start(1000) # Check every second
277 |
278 | def check_posts(self):
279 | self.driver.get('https://www.instagram.com/')
280 | wait = WebDriverWait(self.driver, 10)
281 | posts = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'article div div div div a')))
282 | for post in posts:
283 | post.click()
284 | wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.C4VMK span')))
285 | self.main_window.update_frame()
286 | self.driver.back()
287 | # Check the latest prediction
288 | latest_prediction = self.main_window.prediction_label.text().split(': ')[-1]
289 | if latest_prediction in ["Fatigue Detected", "Stressed/Concerned"]:
290 | self.driver.quit()
291 | QMessageBox.warning(self, "Warning", "Detected mental health issue. Instagram will be closed.")
292 | break
293 |
294 | def closeEvent(self, event):
295 | if self.driver:
296 | self.driver.quit()
297 | event.accept()
298 |
299 |
300 | class MentalHealthPredictor(QWidget):
301 | def __init__(self):
302 | super().__init__()
303 | self.initUI()
304 | self.setupFaceDetection()
305 | self.startVideo()
306 |
307 | def setupFaceDetection(self):
308 | self.mp_face_mesh = mp.solutions.face_mesh
309 | self.face_mesh = self.mp_face_mesh.FaceMesh(
310 | max_num_faces=1,
311 | min_detection_confidence=0.5,
312 | min_tracking_confidence=0.5
313 | )
314 | self.mp_drawing = mp.solutions.drawing_utils
315 | self.drawing_spec = self.mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
316 | self.cap = cv2.VideoCapture(0)
317 |
318 | def initUI(self):
319 | self.setWindowTitle('Mental Health Analysis')
320 | self.setMinimumSize(800, 600)
321 | layout = QVBoxLayout()
322 | self.image_label = QLabel()
323 | self.image_label.setAlignment(Qt.AlignCenter)
324 | self.prediction_label = QLabel('Analyzing...')
325 | self.prediction_label.setAlignment(Qt.AlignCenter)
326 | self.back_button = QPushButton('Back to Login')
327 | self.back_button.clicked.connect(self.back_to_login)
328 | layout.addWidget(self.image_label)
329 | layout.addWidget(self.prediction_label)
330 | layout.addWidget(self.back_button)
331 | self.setLayout(layout)
332 |
333 | def back_to_login(self):
334 | for widget in QApplication.topLevelWidgets():
335 | if isinstance(widget, LoginWindow):
336 | widget.show()
337 | self.close()
338 |
339 | def startVideo(self):
340 | self.timer = QTimer()
341 | self.timer.timeout.connect(self.update_frame)
342 | self.timer.start(30)
343 |
344 | def update_frame(self):
345 | ret, frame = self.cap.read()
346 | if ret:
347 | frame = cv2.flip(frame, 1)
348 | rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
349 | results = self.face_mesh.process(rgb_frame)
350 | if results.multi_face_landmarks:
351 | for face_landmarks in results.multi_face_landmarks:
352 | self.mp_drawing.draw_landmarks(
353 | image=frame,
354 | landmark_list=face_landmarks,
355 | connections=self.mp_face_mesh.FACEMESH_TESSELATION,
356 | landmark_drawing_spec=self.drawing_spec,
357 | connection_drawing_spec=self.drawing_spec)
358 | mental_state = self.analyze_mental_state(face_landmarks, frame.shape)
359 | self.prediction_label.setText(f'Mental State: {mental_state}')
360 | save_to_database(mental_state)
361 | rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
362 | h, w, ch = rgb_frame.shape
363 | bytes_per_line = ch * w
364 | qt_image = QImage(rgb_frame.data, w, h, bytes_per_line, QImage.Format_RGB888)
365 | self.image_label.setPixmap(QPixmap.fromImage(qt_image))
366 |
367 | def analyze_mental_state(self, landmarks, frame_shape):
368 | height, width = frame_shape[:2]
369 | points = np.array([(lm.x * width, lm.y * height) for lm in landmarks.landmark])
370 | eye_ratio = self.calculate_eye_ratio(points)
371 | mouth_ratio = self.calculate_mouth_ratio(points)
372 | brow_ratio = self.calculate_brow_ratio(points)
373 | if eye_ratio < 0.2:
374 | return "Fatigue Detected"
375 | elif mouth_ratio > 0.5:
376 | return "Positive Mood"
377 | elif brow_ratio > 0.3:
378 | return "Stressed/Concerned"
379 | else:
380 | return "Neutral"
381 |
382 |
383 | def calculate_eye_ratio(self, points):
384 | left_eye = [33, 160, 158, 133, 153, 144]
385 | right_eye = [362, 385, 387, 263, 373, 380]
386 | left_eye_points = points[left_eye]
387 | right_eye_points = points[right_eye]
388 | def eye_aspect_ratio(eye_points):
389 | vertical_dist = np.linalg.norm(eye_points[1] - eye_points[5]) + \
390 | np.linalg.norm(eye_points[2] - eye_points[4])
391 | horizontal_dist = np.linalg.norm(eye_points[0] - eye_points[3]) * 2
392 | return vertical_dist / horizontal_dist if horizontal_dist != 0 else 0
393 | left_ear = eye_aspect_ratio(left_eye_points)
394 | right_ear = eye_aspect_ratio(right_eye_points)
395 | return (left_ear + right_ear) / 2
396 |
397 | def calculate_mouth_ratio(self, points):
398 | mouth_points = [61, 291, 0, 17]
399 | mouth_pts = points[mouth_points]
400 | vertical_dist = np.linalg.norm(mouth_pts[2] - mouth_pts[3])
401 | horizontal_dist = np.linalg.norm(mouth_pts[0] - mouth_pts[1])
402 | return vertical_dist / horizontal_dist if horizontal_dist != 0 else 0
403 |
404 | def calculate_brow_ratio(self, points):
405 | left_brow = [70, 63, 105, 66, 107]
406 | left_eye = [159, 145, 133]
407 | brow_points = points[left_brow]
408 | eye_points = points[left_eye]
409 | brow_height = np.mean(brow_points[:, 1])
410 | eye_height = np.mean(eye_points[:, 1])
411 | return (eye_height - brow_height) / (points[152, 1] - points[10, 1])
412 |
413 | def closeEvent(self, event):
414 | self.cap.release()
415 | self.face_mesh.close()
416 | event.accept()
417 |
418 |
419 | if __name__ == '__main__':
420 | app = QApplication(sys.argv)
421 | splash = SplashScreen()
422 | login_window = LoginWindow()
423 | splash.showSplash()
424 | QTimer.singleShot(3000, login_window.show)
425 | sys.exit(app.exec_())
426 |
--------------------------------------------------------------------------------
/facemind/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Facemind
6 |
7 |
8 |
9 |
10 |
12 |
13 |
216 |
217 |
218 |
219 |
220 |
224 |
225 |
Face Mind
226 |
Face the Future of Mental Health
227 |
Solution Application based on Computer Vision, and Integrated with Instagram Apps
228 |
Install Application
229 |
Close Apps
230 |
231 |
Note that you won't be able to proceed to the next touch session unless you complete the
232 | check-in
233 |
234 |
235 |
236 |
251 |
252 |
307 |
312 |
481 |
482 |
483 |
484 |
--------------------------------------------------------------------------------
/mental_health_app/main.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import cv2
3 | import numpy as np
4 | import mediapipe as mp
5 | import csv
6 | import datetime
7 | import pandas as pd
8 | import matplotlib.pyplot as plt
9 | from selenium import webdriver
10 | from selenium.webdriver.common.by import By
11 | from selenium.webdriver.common.keys import Keys
12 | from selenium.webdriver.support.ui import WebDriverWait
13 | from selenium.webdriver.support import expected_conditions as EC
14 | from PyQt5.QtWidgets import (QApplication, QLabel, QPushButton, QVBoxLayout,
15 | QWidget, QMainWindow, QLineEdit, QMessageBox, QAction)
16 | from PyQt5.QtCore import QTimer, Qt
17 | from PyQt5.QtGui import QPixmap, QImage
18 | import subprocess
19 | import pkg_resources
20 | required = {'opencv-python', 'pandas', 'matplotlib', 'numpy', 'mediapipe', 'selenium', 'PyQt5'}
21 | installed = {pkg.key for pkg in pkg_resources.working_set}
22 | missing = required - installed
23 |
24 | if missing:
25 | python = sys.executable
26 | subprocess.check_call([python, '-m', 'pip', 'install', *missing])
27 |
28 | def save_to_database(prediction):
29 | filename = "mental_health_predictions.csv"
30 | current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
31 | with open(filename, mode='a', newline='') as file:
32 | writer = csv.writer(file)
33 | writer.writerow([current_time, prediction])
34 |
35 | # Fungsi untuk menampilkan grafik monitoring prediksi mental health dan menghitung akurasi
36 | def show_graph(date_filter=None):
37 | # Membaca data dari CSV
38 | data = pd.read_csv("mental_health_predictions.csv", names=["Time", "Prediction"])
39 | data["Time"] = pd.to_datetime(data["Time"])
40 |
41 | if date_filter:
42 | data = data[data["Time"].dt.date == date_filter]
43 |
44 | # Mapping prediksi ke angka untuk grafik
45 | predictions_map = {"Neutral": 0, "Fatigue Detected": 1, "Positive Mood": 2, "Stressed/Concerned": 3}
46 | data["Prediction Value"] = data["Prediction"].map(predictions_map)
47 |
48 | # Membuat grafik dengan warna berbeda untuk setiap prediksi
49 | plt.figure(figsize=(12, 8))
50 | colors = {'Neutral': 'blue', 'Fatigue Detected': 'red', 'Positive Mood': 'green', 'Stressed/Concerned': 'orange'}
51 | for prediction, group_data in data.groupby("Prediction"):
52 | plt.plot_date(group_data["Time"], group_data["Prediction Value"], linestyle='solid', marker='o', color=colors[prediction], label=prediction)
53 |
54 | plt.xlabel("Time")
55 | plt.ylabel("Prediction")
56 | plt.title("Mental Health Prediction Monitoring")
57 | plt.xticks(rotation=45)
58 | plt.tight_layout()
59 | plt.legend()
60 |
61 | # Tampilkan grafik
62 | plt.show()
63 |
64 | # Menghitung akurasi prediksi
65 | total_predictions = len(data)
66 | correct_predictions = data["Prediction"].value_counts().max()
67 | accuracy = (correct_predictions / total_predictions) * 100
68 |
69 | print(f"Total Predictions: {total_predictions}")
70 | print(f"Correct Predictions: {correct_predictions}")
71 | print(f"Accuracy: {accuracy:.2f}%")
72 |
73 | # Kesimpulan akhir prediksi emosi
74 | final_prediction = data["Prediction"].iloc[-1]
75 | print(f"Kesimpulan akhir prediksi emosi: {final_prediction}")
76 |
77 | # Saran untuk mengatasi atau mengurangi emosi
78 | suggestions = {
79 | "Neutral": "Pertahankan kondisi Anda saat ini dan terus lakukan aktivitas yang positif.",
80 | "Fatigue Detected": "Istirahat yang cukup, tidur yang berkualitas, dan hindari stres berlebihan.",
81 | "Positive Mood": "Lanjutkan aktivitas yang membuat Anda bahagia dan berbagi kebahagiaan dengan orang lain.",
82 | "Stressed/Concerned": "Lakukan relaksasi, meditasi, atau aktivitas yang menenangkan. Jangan ragu untuk mencari bantuan profesional jika diperlukan."
83 | }
84 | print(f"Saran: {suggestions.get(final_prediction, 'Tidak ada saran yang tersedia.')}")
85 |
86 | # Grafik akurasi prediksi
87 | plt.figure(figsize=(12, 8))
88 | plt.plot(data["Time"], data["Prediction Value"], label="Detected Emotion", color='blue', linestyle='solid', marker='o')
89 | plt.plot(data["Time"], data["Prediction Value"].rolling(window=10).mean(), label="Predicted Emotion", color='red', linestyle='dashed')
90 | plt.xlabel("Time")
91 | plt.ylabel("Prediction Value")
92 | plt.title("Detected vs Predicted Emotion")
93 | plt.xticks(rotation=45)
94 | plt.tight_layout()
95 | plt.legend()
96 | plt.show()
97 |
98 | class SplashScreen(QMainWindow):
99 | def __init__(self):
100 | super().__init__()
101 | self.setWindowTitle("Splash Screen")
102 | self.setFixedSize(800, 600)
103 | self.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)
104 | layout = QVBoxLayout()
105 | logo = QLabel()
106 | pixmap = QPixmap(r"C:\Users\asus\Downloads\logofm.png").scaled(400, 136, Qt.KeepAspectRatio, Qt.SmoothTransformation)
107 | logo.setPixmap(pixmap)
108 | logo.setAlignment(Qt.AlignCenter)
109 | label = QLabel("Welcome to Mental Health App")
110 | label.setAlignment(Qt.AlignCenter)
111 | layout.addStretch()
112 | layout.addWidget(logo)
113 | container = QWidget()
114 | container.setObjectName("container")
115 | container.setLayout(layout)
116 | self.setStyleSheet("""
117 | QMainWindow {
118 | border-radius: 20px;
119 | background-color: white;
120 | }
121 | """)
122 | layout.addStretch()
123 | self.setCentralWidget(container)
124 | layout = QVBoxLayout()
125 | logo = QLabel()
126 | pixmap = QPixmap(r"C:\Users\asus\Downloads\logofm.png").scaled(400, 136, Qt.KeepAspectRatio, Qt.SmoothTransformation)
127 | logo.setPixmap(pixmap)
128 | logo.setAlignment(Qt.AlignCenter)
129 | label = QLabel("Welcome to Mental Health App")
130 | label.setAlignment(Qt.AlignCenter)
131 | layout.addStretch()
132 | layout.addWidget(logo)
133 | container = QWidget()
134 | container.setObjectName("container")
135 | container.setLayout(layout)
136 | layout.addStretch()
137 | self.setCentralWidget(container)
138 |
139 | def showSplash(self):
140 | self.show()
141 | QTimer.singleShot(3000, self.close)
142 |
143 |
144 | class LoginWindow(QMainWindow):
145 | def __init__(self):
146 | super().__init__()
147 | self.setWindowTitle("Login")
148 | self.setFixedSize(800, 600)
149 | self.setStyleSheet("""
150 | QMainWindow {
151 | background-color: #f0f0f0;
152 | }
153 | QLabel {
154 | font-size: 18px;
155 | color: #333;
156 | }
157 | QLineEdit {
158 | font-size: 16px;
159 | padding: 10px;
160 | border: 1px solid #ccc;
161 | border-radius: 5px;
162 | }
163 | QPushButton {
164 | font-size: 16px;
165 | padding: 10px;
166 | background-color: #007BFF;
167 | color: white;
168 | border: none;
169 | border-radius: 5px;
170 | }
171 | QPushButton:disabled {
172 | background-color: #cccccc;
173 | }
174 | QPushButton:hover {
175 | background-color: #0056b3;
176 | }
177 | QMenuBar {
178 | background-color: #f0f0f0;
179 | }
180 | QMenuBar::item {
181 | background-color: #f0f0f0;
182 | color: #333;
183 | }
184 | QMenuBar::item:selected {
185 | background-color: #e0e0e0;
186 | color: #333;
187 | }
188 | QMenu {
189 | background-color: #f0f0f0;
190 | color: #333;
191 | }
192 | QMenu::item:selected {
193 | background-color: #e0e0e0;
194 | color: #333;
195 | }
196 | """)
197 | menubar = self.menuBar()
198 | file_menu = menubar.addMenu('File')
199 | help_menu = menubar.addMenu('Help')
200 | about_menu = menubar.addMenu('About')
201 | monitoring_menu = menubar.addMenu('Monitoring')
202 |
203 | exit_action = QAction('Exit', self)
204 | exit_action.triggered.connect(QApplication.instance().quit)
205 | file_menu.addAction(exit_action)
206 |
207 | help_action = QAction('Help', self)
208 | help_action.triggered.connect(self.show_help)
209 | help_menu.addAction(help_action)
210 |
211 | about_action = QAction('About', self)
212 | about_action.triggered.connect(self.show_about)
213 | about_menu.addAction(about_action)
214 |
215 | show_graph_action = QAction('Show Monitoring Graph', self)
216 | show_graph_action.triggered.connect(self.show_graph)
217 | monitoring_menu.addAction(show_graph_action)
218 |
219 | layout = QVBoxLayout()
220 | logo = QLabel()
221 | pixmap = QPixmap(r"C:\Users\asus\Downloads\layarui.png").scaledToWidth(self.width(), Qt.SmoothTransformation)
222 | logo.setPixmap(pixmap)
223 | logo.setAlignment(Qt.AlignCenter)
224 | layout.addWidget(logo)
225 | label = QLabel("Login to your account")
226 | layout.addWidget(label)
227 | label.setAlignment(Qt.AlignCenter)
228 | self.username_input = QLineEdit()
229 | self.username_input.setPlaceholderText('Username')
230 | self.password_input = QLineEdit()
231 | self.password_input.setPlaceholderText('Password')
232 | self.password_input.setEchoMode(QLineEdit.Password)
233 | self.login_button = QPushButton('Login to Instagram')
234 | self.login_button.clicked.connect(self.login_instagram)
235 | self.start_analysis_button = QPushButton('Start Mental Health Analysis')
236 | self.start_analysis_button.clicked.connect(self.start_analysis)
237 | self.start_analysis_button.setEnabled(False)
238 | self.status_label = QLabel('')
239 | layout.addWidget(self.username_input)
240 | layout.addWidget(self.password_input)
241 | layout.addWidget(self.login_button)
242 | layout.addWidget(self.start_analysis_button)
243 | layout.addWidget(self.status_label)
244 | container = QWidget()
245 | container.setLayout(layout)
246 | self.setCentralWidget(container)
247 | self.driver = None
248 |
249 | def show_graph(self):
250 | # Panggil fungsi untuk menampilkan grafik
251 | show_graph()
252 |
253 | def show_help(self):
254 | QMessageBox.information(self, "Help", "This is the help section.")
255 |
256 | def show_about(self):
257 | QMessageBox.information(self, "About", "Mental Health App v1.0")
258 |
259 | def login_instagram(self):
260 | self.status_label.setText('Logging in...')
261 | self.driver = webdriver.Chrome()
262 | self.driver.get('https://www.instagram.com/accounts/login/')
263 | wait = WebDriverWait(self.driver, 10)
264 | username_field = wait.until(EC.presence_of_element_located((By.NAME, 'username')))
265 | username_field.send_keys(self.username_input.text())
266 | password_field = self.driver.find_element(By.NAME, 'password')
267 | password_field.send_keys(self.password_input.text())
268 | password_field.send_keys(Keys.RETURN)
269 | wait.until(EC.url_contains('instagram.com'))
270 | if 'login' not in self.driver.current_url:
271 | self.status_label.setText('Login successful! You can now start the analysis.')
272 | self.start_analysis_button.setEnabled(True)
273 | self.login_button.setEnabled(False)
274 | else:
275 | self.status_label.setText('Login failed. Please check your credentials.')
276 | self.status_label.setText('Login successful! You can now start the analysis.')
277 | self.start_analysis_button.setEnabled(True)
278 | self.login_button.setEnabled(False)
279 |
280 | def start_analysis(self):
281 | if self.driver:
282 | self.main_window = MentalHealthPredictor()
283 | self.main_window.show()
284 | self.hide()
285 | self.monitor_instagram_posts()
286 | else:
287 | self.status_label.setText('Please login first')
288 |
289 | def monitor_instagram_posts(self):
290 | # Monitor Instagram posts and analyze user's emotion
291 | self.timer = QTimer()
292 | self.timer.timeout.connect(self.check_posts)
293 | self.timer.start(1000) # Check every second
294 |
295 | def check_posts(self):
296 | self.driver.get('https://www.instagram.com/')
297 | wait = WebDriverWait(self.driver, 10)
298 | posts = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'article div div div div a')))
299 | for post in posts:
300 | post.click()
301 | wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.C4VMK span')))
302 | self.main_window.update_frame()
303 | self.driver.back()
304 | # Check the latest prediction
305 | latest_prediction = self.main_window.prediction_label.text().split(': ')[-1]
306 | if latest_prediction in ["Fatigue Detected", "Stressed/Concerned"]:
307 | self.driver.quit()
308 | QMessageBox.warning(self, "Warning", "Detected mental health issue. Instagram will be closed.")
309 | break
310 |
311 | def closeEvent(self, event):
312 | if self.driver:
313 | self.driver.quit()
314 | event.accept()
315 |
316 |
317 | class MentalHealthPredictor(QWidget):
318 | def __init__(self):
319 | super().__init__()
320 | self.initUI()
321 | self.setupFaceDetection()
322 | self.startVideo()
323 |
324 | def setupFaceDetection(self):
325 | self.mp_face_mesh = mp.solutions.face_mesh
326 | self.face_mesh = self.mp_face_mesh.FaceMesh(
327 | max_num_faces=1,
328 | min_detection_confidence=0.5,
329 | min_tracking_confidence=0.5
330 | )
331 | self.mp_drawing = mp.solutions.drawing_utils
332 | self.drawing_spec = self.mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
333 | self.cap = cv2.VideoCapture(0)
334 |
335 | def initUI(self):
336 | self.setWindowTitle('Mental Health Analysis')
337 | self.setMinimumSize(800, 600)
338 | layout = QVBoxLayout()
339 | self.image_label = QLabel()
340 | self.image_label.setAlignment(Qt.AlignCenter)
341 | self.prediction_label = QLabel('Analyzing...')
342 | self.prediction_label.setAlignment(Qt.AlignCenter)
343 | self.back_button = QPushButton('Back to Login')
344 | self.back_button.clicked.connect(self.back_to_login)
345 | layout.addWidget(self.image_label)
346 | layout.addWidget(self.prediction_label)
347 | layout.addWidget(self.back_button)
348 | self.setLayout(layout)
349 |
350 | def back_to_login(self):
351 | for widget in QApplication.topLevelWidgets():
352 | if isinstance(widget, LoginWindow):
353 | widget.show()
354 | self.close()
355 |
356 | def startVideo(self):
357 | self.timer = QTimer()
358 | self.timer.timeout.connect(self.update_frame)
359 | self.timer.start(30)
360 |
361 | def update_frame(self):
362 | ret, frame = self.cap.read()
363 | if ret:
364 | frame = cv2.flip(frame, 1)
365 | rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
366 | results = self.face_mesh.process(rgb_frame)
367 | if results.multi_face_landmarks:
368 | for face_landmarks in results.multi_face_landmarks:
369 | self.mp_drawing.draw_landmarks(
370 | image=frame,
371 | landmark_list=face_landmarks,
372 | connections=self.mp_face_mesh.FACEMESH_TESSELATION,
373 | landmark_drawing_spec=self.drawing_spec,
374 | connection_drawing_spec=self.drawing_spec)
375 | mental_state = self.analyze_mental_state(face_landmarks, frame.shape)
376 | self.prediction_label.setText(f'Mental State: {mental_state}')
377 | save_to_database(mental_state)
378 | rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
379 | h, w, ch = rgb_frame.shape
380 | bytes_per_line = ch * w
381 | qt_image = QImage(rgb_frame.data, w, h, bytes_per_line, QImage.Format_RGB888)
382 | self.image_label.setPixmap(QPixmap.fromImage(qt_image))
383 |
384 | def analyze_mental_state(self, landmarks, frame_shape):
385 | height, width = frame_shape[:2]
386 | points = np.array([(lm.x * width, lm.y * height) for lm in landmarks.landmark])
387 | eye_ratio = self.calculate_eye_ratio(points)
388 | mouth_ratio = self.calculate_mouth_ratio(points)
389 | brow_ratio = self.calculate_brow_ratio(points)
390 | if eye_ratio < 0.2:
391 | return "Fatigue Detected"
392 | elif mouth_ratio > 0.5:
393 | return "Positive Mood"
394 | elif brow_ratio > 0.3:
395 | return "Stressed/Concerned"
396 | else:
397 | return "Neutral"
398 |
399 |
400 | def calculate_eye_ratio(self, points):
401 | left_eye = [33, 160, 158, 133, 153, 144]
402 | right_eye = [362, 385, 387, 263, 373, 380]
403 | left_eye_points = points[left_eye]
404 | right_eye_points = points[right_eye]
405 | def eye_aspect_ratio(eye_points):
406 | vertical_dist = np.linalg.norm(eye_points[1] - eye_points[5]) + \
407 | np.linalg.norm(eye_points[2] - eye_points[4])
408 | horizontal_dist = np.linalg.norm(eye_points[0] - eye_points[3]) * 2
409 | return vertical_dist / horizontal_dist if horizontal_dist != 0 else 0
410 | left_ear = eye_aspect_ratio(left_eye_points)
411 | right_ear = eye_aspect_ratio(right_eye_points)
412 | return (left_ear + right_ear) / 2
413 |
414 | def calculate_mouth_ratio(self, points):
415 | mouth_points = [61, 291, 0, 17]
416 | mouth_pts = points[mouth_points]
417 | vertical_dist = np.linalg.norm(mouth_pts[2] - mouth_pts[3])
418 | horizontal_dist = np.linalg.norm(mouth_pts[0] - mouth_pts[1])
419 | return vertical_dist / horizontal_dist if horizontal_dist != 0 else 0
420 |
421 | def calculate_brow_ratio(self, points):
422 | left_brow = [70, 63, 105, 66, 107]
423 | left_eye = [159, 145, 133]
424 | brow_points = points[left_brow]
425 | eye_points = points[left_eye]
426 | brow_height = np.mean(brow_points[:, 1])
427 | eye_height = np.mean(eye_points[:, 1])
428 | return (eye_height - brow_height) / (points[152, 1] - points[10, 1])
429 |
430 | def closeEvent(self, event):
431 | self.cap.release()
432 | self.face_mesh.close()
433 | event.accept()
434 |
435 |
436 | if __name__ == '__main__':
437 | app = QApplication(sys.argv)
438 | splash = SplashScreen()
439 | login_window = LoginWindow()
440 | splash.showSplash()
441 | QTimer.singleShot(3000, login_window.show)
442 | sys.exit(app.exec_())
443 |
--------------------------------------------------------------------------------