├── .gitattributes
├── .github
├── FUNDING.yml
└── workflows
│ ├── python-app.yml
│ └── python-publish.yml
├── .gitignore
├── .vscode
└── launch.json
├── LICENSE
├── README.md
├── __init__.py
├── app.py
├── clear-commits.bat
├── config.json
├── demo_images
├── cover.png
├── cover_p.png
└── cursed-gpt-promo.png
├── link.bf
├── main.py
├── redirect.py
├── requirements.txt
├── run-server.bat
├── run.bat
├── scripts
├── __init__.py
├── install_dependencies.py
├── system
│ ├── __init__.py
│ ├── capture_photo.py
│ ├── clean_text.py
│ ├── generate_text.py
│ ├── play_audio.py
│ ├── stt.py
│ └── tts.py
├── transformer.py
├── transformer_s2t2s.py
├── transformer_t2s.py
└── transformer_webcam.py
├── settings.json
├── setup.bat
├── setup.sh
├── static
└── css
│ └── style.css
└── templates
└── index.html
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.json linguist-vendored=false
2 | *.json linguist-generated=false
3 | *.json linguist-documentation=false
4 | *.json linguist-detectable=true
5 |
6 | *.ini linguist-vendored=false
7 | *.ini linguist-generated=false
8 | *.ini linguist-documentation=false
9 | *.ini linguist-detectable=true
10 |
11 | *.txt linguist-vendored=false
12 | *.txt linguist-generated=false
13 | *.txt linguist-documentation=false
14 | *.txt linguist-detectable=true
15 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | custom: [ buymeacoffee.com/cursedentertainment, ko-fi.com/cursedentertainment ]
2 |
--------------------------------------------------------------------------------
/.github/workflows/python-app.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: Python application
5 |
6 | on:
7 | push:
8 | branches: [ "main" ]
9 | pull_request:
10 | branches: [ "main" ]
11 |
12 | permissions:
13 | contents: read
14 |
15 | jobs:
16 | build:
17 |
18 | runs-on: ubuntu-latest
19 |
20 | steps:
21 | - uses: actions/checkout@v3
22 | - name: Set up Python 3.10
23 | uses: actions/setup-python@v3
24 | with:
25 | python-version: "3.10"
26 | - name: Install dependencies
27 | run: |
28 | python -m pip install --upgrade pip
29 | pip install flake8 pytest
30 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
31 | - name: Lint with flake8
32 | run: |
33 | # stop the build if there are Python syntax errors or undefined names
34 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
35 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
36 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
37 | - name: Test with pytest
38 | run: |
39 | pytest
40 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package
10 |
11 | on:
12 | release:
13 | types: [published]
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | deploy:
20 |
21 | runs-on: ubuntu-latest
22 |
23 | steps:
24 | - uses: actions/checkout@v3
25 | - name: Set up Python
26 | uses: actions/setup-python@v3
27 | with:
28 | python-version: '3.x'
29 | - name: Install dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | pip install build
33 | - name: Build package
34 | run: python -m build
35 | - name: Publish package
36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
37 | with:
38 | user: __token__
39 | password: ${{ secrets.PYPI_API_TOKEN }}
40 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | torchvision)/
2 | torch)/
3 | 5.3.0/
4 | psdenv/
5 | output/
6 | models/
7 |
8 | # Byte-compiled / optimized / DLL files
9 | __pycache__/
10 | *.py[cod]
11 | *$py.class
12 |
13 | # C extensions
14 | *.so
15 |
16 | # Distribution / packaging
17 | .Python
18 | build/
19 | develop-eggs/
20 | dist/
21 | downloads/
22 | eggs/
23 | .eggs/
24 | lib/
25 | lib64/
26 | parts/
27 | sdist/
28 | var/
29 | wheels/
30 | share/python-wheels/
31 | *.egg-info/
32 | .installed.cfg
33 | *.egg
34 | MANIFEST
35 |
36 | # PyInstaller
37 | # Usually these files are written by a python script from a template
38 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
39 | *.manifest
40 | *.spec
41 |
42 | # Installer logs
43 | pip-log.txt
44 | pip-delete-this-directory.txt
45 |
46 | # Unit test / coverage reports
47 | htmlcov/
48 | .tox/
49 | .nox/
50 | .coverage
51 | .coverage.*
52 | .cache
53 | nosetests.xml
54 | coverage.xml
55 | *.cover
56 | *.py,cover
57 | .hypothesis/
58 | .pytest_cache/
59 | cover/
60 |
61 | # Translations
62 | *.mo
63 | *.pot
64 |
65 | # Django stuff:
66 | *.log
67 | local_settings.py
68 | db.sqlite3
69 | db.sqlite3-journal
70 |
71 | # Flask stuff:
72 | instance/
73 | .webassets-cache
74 |
75 | # Scrapy stuff:
76 | .scrapy
77 |
78 | # Sphinx documentation
79 | docs/_build/
80 |
81 | # PyBuilder
82 | .pybuilder/
83 | target/
84 |
85 | # Jupyter Notebook
86 | .ipynb_checkpoints
87 |
88 | # IPython
89 | profile_default/
90 | ipython_config.py
91 |
92 | # pyenv
93 | # For a library or package, you might want to ignore these files since the code is
94 | # intended to run in multiple environments; otherwise, check them in:
95 | # .python-version
96 |
97 | # pipenv
98 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
99 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
100 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
101 | # install all needed dependencies.
102 | #Pipfile.lock
103 |
104 | # poetry
105 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
106 | # This is especially recommended for binary packages to ensure reproducibility, and is more
107 | # commonly ignored for libraries.
108 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
109 | #poetry.lock
110 |
111 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
112 | __pypackages__/
113 |
114 | # Celery stuff
115 | celerybeat-schedule
116 | celerybeat.pid
117 |
118 | # SageMath parsed files
119 | *.sage.py
120 |
121 | # Environments
122 | .env
123 | .venv
124 | env/
125 | venv/
126 | ENV/
127 | env.bak/
128 | venv.bak/
129 |
130 | # Spyder project settings
131 | .spyderproject
132 | .spyproject
133 |
134 | # Rope project settings
135 | .ropeproject
136 |
137 | # mkdocs documentation
138 | /site
139 |
140 | # mypy
141 | .mypy_cache/
142 | .dmypy.json
143 | dmypy.json
144 |
145 | # Pyre type checker
146 | .pyre/
147 |
148 | # pytype static type analyzer
149 | .pytype/
150 |
151 | # Cython debug symbols
152 | cython_debug/
153 |
154 | # PyCharm
155 | # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can
156 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
157 | # and can be added to the global gitignore or merged into this file. For a more nuclear
158 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
159 | #.idea/
160 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | // Use IntelliSense to learn about possible attributes.
3 | // Hover to view descriptions of existing attributes.
4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5 | "version": "0.2.0",
6 | "configurations": [
7 | {
8 | "name": "Python Debugger: Current File",
9 | "type": "debugpy",
10 | "request": "launch",
11 | "program": "${file}",
12 | "console": "integratedTerminal"
13 | }
14 | ]
15 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Cursed Entertainment
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
4 |
5 |
6 |

7 |

8 |
9 |
10 | # CursedGPT
11 |
12 |
18 |
19 |
20 | ```bash
21 | "you suck, but you're not going to do anything about it."
22 | ```
23 | CursedGPT leverages the Hugging Face Transformers library to interact with a pre-trained GPT-2 model. It employs TensorFlow for model management and AutoTokenizer for efficient tokenization. The script enables users to input prompts interactively, generating text responses from the GPT-2 model. To improve the user experience, unnecessary warning messages related to the transformers library are effectively suppressed.
24 |
25 | ## Scripts:
26 |
27 | - **main.py:** The selection menu for CursedGPT
28 | - **app.py:** Run CursedGPT as a web-server
29 |
30 | ### /scripts/
31 |
32 | - **transformer.py:** Run CursedGPT
33 | - **transformer_t2s.py:** Run CursedGPT with text-to-speech functionality
34 | - **transformer_s2t2s.py:** Run CursedGPT with speech-to-text-to-speech functionality
35 | - **transformer_webcam.py:** Run CursedGPT with webcam functionality
36 | - **install_dependencies.py:** Install dependencies
37 |
38 | ### /scripts/system
39 |
40 | - **generate_text.py:** The GPT text generator
41 | - **clean_text.py:** Clean text from recorded voice audio
42 | - **stt.py:** Speech-to-text
43 | - **tts.py:** Text-to-speech
44 | - **capture_photo.py:** Capture photo with a webcam
45 | - **play_audio.py:** Play generated audio
46 |
47 | ## Requirements:
48 |
49 | ```bash
50 | transformers==4.37.1
51 | tensorflow==2.14.0
52 | torch==2.1.1
53 | torchvision==0.16.1
54 | torchaudio==2.1.1
55 | flask==3.0.0
56 | gtts==2.5.0
57 | pyaudio==0.2.14
58 | pydub==0.25.1
59 | beautifulsoup4==4.10.0
60 | SpeechRecognition==3.10.1
61 | pygame==2.5.2
62 | ```
63 | ## How to Run:
64 | ```bash
65 | pip install -r requirements.txt
66 | ```
67 | ```bash
68 | pip install torch torchvision torchaudio
69 | ```
70 | ```bash
71 | python main.py
72 | ```
73 | ```bash
74 | python app.py
75 | ```
76 |
77 |
78 |
84 |
85 |
86 | - [GloriosaAI Repository](https://github.com/CursedPrograms/GloriosaAI)
87 | - [Gender-Age-ID Repository](https://github.com/CursedPrograms/Gender-Age-ID)
88 | - [Detect-Face Repository](https://github.com/CursedPrograms/Detect-Face)
89 | - [Image-Generator Repository](https://github.com/CursedPrograms/Image-Generator)
90 |
91 |
92 |
98 |
99 |
105 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CursedPrograms/Cursed-GPT/5dfaad3c0e946dbb7c7c485d3bc34805f4b202e8/__init__.py
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, render_template, request
2 | import transformers
3 | from transformers import TFAutoModelForCausalLM, AutoTokenizer
4 | import tensorflow as tf
5 | import logging
6 | from scripts.system.generate_text import generate_text
7 | import webbrowser
8 |
9 | transformers.logging.set_verbosity_error()
10 | tf.get_logger().setLevel(logging.ERROR)
11 |
12 | app = Flask(__name__, static_url_path='/static')
13 |
14 | model_name = "gpt2"
15 | model = TFAutoModelForCausalLM.from_pretrained(model_name)
16 | tokenizer = AutoTokenizer.from_pretrained(model_name, pad_token_id=50256)
17 |
18 | @app.route('/')
19 | def home():
20 | return render_template('index.html')
21 |
22 | @app.route('/generate', methods=['POST'])
23 | def generate():
24 | prompt = request.form['prompt']
25 | generated_text = generate_text(prompt, model, tokenizer)
26 | return render_template('index.html', prompt=prompt, generated_text=generated_text)
27 |
28 | if __name__ == "__main__":
29 | webbrowser.open('http://127.0.0.1:5000/')
30 | app.run(debug=True, use_reloader=False)
31 |
--------------------------------------------------------------------------------
/clear-commits.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | REM Switch to a new orphan branch
3 | git checkout --orphan new_branch
4 |
5 | REM Stage all changes
6 | git add .
7 |
8 | REM Commit changes
9 | git commit -m "new_commit"
10 |
11 | REM Delete the old main branch
12 | git branch -D main
13 |
14 | REM Rename the new branch to main
15 | git branch -m main
16 |
17 | REM Force push to the remote main branch
18 | git push -f origin main
19 |
--------------------------------------------------------------------------------
/config.json:
--------------------------------------------------------------------------------
1 | <<<<<<< HEAD
2 | <<<<<<< HEAD
3 | {
4 | "Config": {
5 | "AppName": "CursedGPT",
6 | "Description": "CursedGPT leverages the Hugging Face Transformers library to interact with a pre-trained GPT-2 model. It employs TensorFlow for model management and AutoTokenizer for efficient tokenization. The script enables users to input prompts interactively, generating text responses from the GPT-2 model. To improve the user experience, unnecessary warning messages related to the transformers library are effectively suppressed.",
7 | "ProjectStructure": {
8 | "MainScript": "main.py",
9 | "TransformerScript": "scripts/transformer.py",
10 | "Transformer-t2s-Script": "scripts/transformer-t2s.py",
11 | "Transformer-s2t2s-Script": "scripts/transformer-s2t2s.py",
12 | "Transforemer-WebcamScript": "scripts/transformer-WebcamScript.py",
13 | "DependenciesScript": "scripts/install-dependencies.py",
14 | "AppScript": "app.py"
15 | },
16 | "Scripts": {
17 | "MainScript": "main.py",
18 | "TransformerScript": "scripts/transformer.py",
19 | "Transformer-t2s-Script": "scripts/transformer-t2s.py",
20 | "Transformer-s2t2s-Script": "scripts/transformer-s2t2s.py",
21 | "Transforemer-WebcamScript": "scripts/transformer-WebcamScript.py",
22 | "DependenciesScript": "scripts/install-dependencies.py",
23 | "AppScript": "app.py"
24 | }
25 | }
26 | =======
27 | {
28 | "Config": {
29 | "AppName": "CursedGPT",
30 | "Description": "CursedGPT leverages the Hugging Face Transformers library to interact with a pre-trained GPT-2 model. It employs TensorFlow for model management and AutoTokenizer for efficient tokenization. The script enables users to input prompts interactively, generating text responses from the GPT-2 model. To improve the user experience, unnecessary warning messages related to the transformers library are effectively suppressed.",
31 | "ProjectStructure": {
32 | "MainScript": "main.py",
33 | "TransformerScript": "scripts/transformer.py",
34 | "Transformer-t2s-Script": "scripts/transformer-t2s.py",
35 | "Transformer-s2t2s-Script": "scripts/transformer-s2t2s.py",
36 | "Transforemer-WebcamScript": "scripts/transformer-WebcamScript.py",
37 | "DependenciesScript": "scripts/install-dependencies.py",
38 | "AppScript": "app.py"
39 | },
40 | "Scripts": {
41 | "MainScript": "main.py",
42 | "TransformerScript": "scripts/transformer.py",
43 | "Transformer-t2s-Script": "scripts/transformer-t2s.py",
44 | "Transformer-s2t2s-Script": "scripts/transformer-s2t2s.py",
45 | "Transforemer-WebcamScript": "scripts/transformer-WebcamScript.py",
46 | "DependenciesScript": "scripts/install-dependencies.py",
47 | "AppScript": "app.py"
48 | }
49 | }
50 | >>>>>>> parent of f3be308 (Merge pull request #2 from SynthWomb/_dev)
51 | =======
52 | {
53 | "Config": {
54 | "AppName": "CursedGPT",
55 | "Description": "CursedGPT leverages the Hugging Face Transformers library to interact with a pre-trained GPT-2 model. It employs TensorFlow for model management and AutoTokenizer for efficient tokenization. The script enables users to input prompts interactively, generating text responses from the GPT-2 model. To improve the user experience, unnecessary warning messages related to the transformers library are effectively suppressed.",
56 | "ProjectStructure": {
57 | "MainScript": "main.py",
58 | "TransformerScript": "scripts/transformer.py",
59 | "Transformer-t2s-Script": "scripts/transformer-t2s.py",
60 | "Transformer-s2t2s-Script": "scripts/transformer-s2t2s.py",
61 | "Transforemer-WebcamScript": "scripts/transformer-WebcamScript.py",
62 | "DependenciesScript": "scripts/install-dependencies.py",
63 | "AppScript": "app.py"
64 | },
65 | "Scripts": {
66 | "MainScript": "main.py",
67 | "TransformerScript": "scripts/transformer.py",
68 | "Transformer-t2s-Script": "scripts/transformer-t2s.py",
69 | "Transformer-s2t2s-Script": "scripts/transformer-s2t2s.py",
70 | "Transforemer-WebcamScript": "scripts/transformer-WebcamScript.py",
71 | "DependenciesScript": "scripts/install-dependencies.py",
72 | "AppScript": "app.py"
73 | }
74 | }
75 | >>>>>>> parent of f3be308 (Merge pull request #2 from SynthWomb/_dev)
76 | }
--------------------------------------------------------------------------------
/demo_images/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CursedPrograms/Cursed-GPT/5dfaad3c0e946dbb7c7c485d3bc34805f4b202e8/demo_images/cover.png
--------------------------------------------------------------------------------
/demo_images/cover_p.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CursedPrograms/Cursed-GPT/5dfaad3c0e946dbb7c7c485d3bc34805f4b202e8/demo_images/cover_p.png
--------------------------------------------------------------------------------
/demo_images/cursed-gpt-promo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CursedPrograms/Cursed-GPT/5dfaad3c0e946dbb7c7c485d3bc34805f4b202e8/demo_images/cursed-gpt-promo.png
--------------------------------------------------------------------------------
/link.bf:
--------------------------------------------------------------------------------
1 | ++++++++++[>+>+++>+++++++>++++++++++<<<<-]>>>>-.++++++++++++++++++.---.+.--------------.-.<<+++++++++++++++.>>+.+++++++++.++++++.---------------.+++++++++++++.++.-------------------.++++++++.+++++.-.--------.+++++++++.++++++.<<+.>>-----------.+++++++++++.-----------------.+++++.<<.>>+.++++++.
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import json
4 |
5 | def main():
6 | with open('config.json') as json_file:
7 | config_data = json.load(json_file)
8 |
9 | # Get the project name from the JSON data
10 | app_name = config_data.get('Config', {}).get('AppName', 'default_app')
11 |
12 | # Print the actual app name value
13 | print(app_name)
14 |
15 | scripts = {
16 | "1": {
17 | "name": "Run 'transformer.py'",
18 | "description": "Generate text using Cursed GPT",
19 | "file_name": "scripts/transformer.py"
20 | },
21 | "2": {
22 | "name": "Run 'transformer-t2s.py'",
23 | "description": "Run Cursed GPT with text to speech",
24 | "file_name": "scripts/transformer_t2s.py"
25 | },
26 | "3": {
27 | "name": "Run 'transformer-s2t2s.py'",
28 | "description": "Run Cursed GPT with speech to text to speech",
29 | "file_name": "scripts/transformer_s2t2s.py"
30 | },
31 | "4": {
32 | "name": "Run 'app.py'",
33 | "description": "Interact with Cursed GPT using HTML and Flask",
34 | "file_name": "app.py"
35 | },
36 | "5": {
37 | "name": "Run 'transformer-webcam.py",
38 | "description": "Interact with Cursed GPT using a webcam and mic",
39 | "file_name": "scripts/transformer_webcam.py"
40 | },
41 | "00": {
42 | "name": "Run 'Install Dependencies'",
43 | "description": "Install dependencies",
44 | "file_name": "scripts/install_dependencies.py"
45 | },
46 | "default": {
47 | "name": "Redirect to Main",
48 | "description": "Redirect to Main",
49 | "file_name": "redirect.py"
50 | }
51 | }
52 |
53 | current_script_dir = os.path.dirname(os.path.abspath(__file__))
54 |
55 | while True:
56 | print("\nAvailable Scripts:")
57 | for key, script_info in scripts.items():
58 | print(f"{key}: {script_info['name']} - {script_info['description']}")
59 |
60 | user_choice = input("Enter the number of the script you want to run (or 'q' to quit): ").strip()
61 |
62 | if user_choice == 'q':
63 | break
64 |
65 | if user_choice in scripts:
66 | selected_script = scripts[user_choice]
67 | script_file_name = selected_script["file_name"]
68 | script_file_path = os.path.join(current_script_dir, script_file_name)
69 |
70 | if os.path.exists(script_file_path):
71 | try:
72 | subprocess.run(["python", script_file_path])
73 | except Exception as e:
74 | print(f"An error occurred while running the script: {e}")
75 | else:
76 | print(f"Script file '{script_file_name}' does not exist.")
77 | else:
78 | print("Invalid choice. Please select a valid script number.")
79 |
80 | if __name__ == "__main__":
81 | main()
--------------------------------------------------------------------------------
/redirect.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 |
4 | if __name__ == "__main__":
5 | script_path = os.path.join("scripts", "transformer.py")
6 | subprocess.run(["python", script_path])
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | transformers==4.37.1
2 | tensorflow==2.14.0
3 | torch==2.1.1
4 | torchvision==0.16.1
5 | torchaudio==2.1.1
6 | flask==3.0.0
7 | gtts==2.5.0
8 | pyaudio==0.2.14
9 | pydub==0.25.1
10 | beautifulsoup4==4.10.0
11 | SpeechRecognition==3.10.1
12 | pygame==2.5.2
13 |
14 |
--------------------------------------------------------------------------------
/run-server.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | set "VENV_DIR=psdenv"
4 |
5 | rem
6 | if not exist "%VENV_DIR%" (
7 | rem
8 | python -m venv "%VENV_DIR%"
9 | )
10 |
11 | rem
12 | call "%VENV_DIR%\Scripts\activate" && python app.py
13 |
14 | rem
15 | pause
16 |
--------------------------------------------------------------------------------
/run.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | set "VENV_DIR=psdenv"
4 |
5 | rem
6 | if not exist "%VENV_DIR%" (
7 | rem
8 | python -m venv "%VENV_DIR%"
9 | )
10 |
11 | rem
12 | call "%VENV_DIR%\Scripts\activate" && python main.py
13 |
14 | rem
15 | pause
16 |
--------------------------------------------------------------------------------
/scripts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CursedPrograms/Cursed-GPT/5dfaad3c0e946dbb7c7c485d3bc34805f4b202e8/scripts/__init__.py
--------------------------------------------------------------------------------
/scripts/install_dependencies.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 |
4 | def install_dependencies():
5 | try:
6 | # Get the directory of the current script
7 | script_directory = os.path.dirname(os.path.abspath(__file__))
8 |
9 | # Specify the path to the requirements.txt file relative to the script's location
10 | requirements_file_path = os.path.join(script_directory, '../requirements.txt')
11 |
12 | # Activate the virtual environment if it exists
13 | venv_activate_path = os.path.join(script_directory, 'psdenv/Scripts/activate')
14 | if os.path.exists(venv_activate_path):
15 | subprocess.run([venv_activate_path], shell=True)
16 |
17 | # Install dependencies using pip
18 | subprocess.run(['pip', 'install', '-r', requirements_file_path])
19 |
20 | print("Dependencies installed successfully.")
21 |
22 | except Exception as e:
23 | print(f"Error installing dependencies: {e}")
24 |
25 | if __name__ == "__main__":
26 | install_dependencies()
27 |
--------------------------------------------------------------------------------
/scripts/system/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CursedPrograms/Cursed-GPT/5dfaad3c0e946dbb7c7c485d3bc34805f4b202e8/scripts/system/__init__.py
--------------------------------------------------------------------------------
/scripts/system/capture_photo.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 |
4 | def capture_photo(output_dir="output/shot"):
5 | # Create the output directory if it doesn't exist
6 | os.makedirs(output_dir, exist_ok=True)
7 |
8 | # Open a connection to the webcam
9 | cap = cv2.VideoCapture(0)
10 |
11 | # Capture a single frame
12 | ret, frame = cap.read()
13 |
14 | # Release the webcam
15 | cap.release()
16 |
17 | # Save the captured frame to the specified directory
18 | image_path = os.path.join(output_dir, "captured_photo.jpg")
19 | cv2.imwrite(image_path, frame)
20 |
21 | return image_path
--------------------------------------------------------------------------------
/scripts/system/clean_text.py:
--------------------------------------------------------------------------------
1 | from bs4 import BeautifulSoup
2 |
3 | def clean_text(text):
4 | """
5 | Cleans the given text by removing HTML tags and unwanted characters.
6 |
7 | Parameters:
8 | - text (str): The input text.
9 |
10 | Returns:
11 | - str: The cleaned text.
12 | """
13 | # Remove HTML tags
14 | soup = BeautifulSoup(text, "html.parser")
15 | cleaned_text = soup.get_text(separator=" ")
16 |
17 | # Remove unwanted characters
18 | cleaned_text = cleaned_text.replace("\n", " ").strip()
19 |
20 | return cleaned_text
--------------------------------------------------------------------------------
/scripts/system/generate_text.py:
--------------------------------------------------------------------------------
1 | import transformers
2 | import tensorflow as tf
3 | import logging
4 | import json
5 |
6 | transformers.logging.set_verbosity_error()
7 | tf.get_logger().setLevel(logging.ERROR)
8 |
9 | def generate_text(prompt, model, tokenizer, settings_path="settings.json"):
10 | with open(settings_path, "r") as settings_file:
11 | settings = json.load(settings_file)
12 |
13 | max_length = settings.get("max_length", 50)
14 |
15 | inputs = tokenizer.encode(prompt, return_tensors="tf", max_length=max_length, truncation=True)
16 | attention_mask = tf.ones_like(inputs)
17 | outputs = model.generate(
18 | inputs,
19 | max_length=max_length,
20 | num_beams=5,
21 | no_repeat_ngram_size=2,
22 | top_k=50,
23 | top_p=None,
24 | do_sample=True,
25 | attention_mask=attention_mask
26 | )
27 | generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
28 | return generated_text
--------------------------------------------------------------------------------
/scripts/system/play_audio.py:
--------------------------------------------------------------------------------
1 | import pygame
2 | import time
3 | import os
4 |
5 | def play_audio(audio_path):
6 | pygame.mixer.init()
7 | pygame.mixer.music.load(audio_path)
8 |
9 | play_count = 0
10 | while play_count < 1:
11 | pygame.mixer.music.play()
12 | while pygame.mixer.music.get_busy():
13 | time.sleep(1)
14 | pygame.mixer.music.stop()
15 | play_count += 1
16 |
17 | try:
18 | os.remove(audio_path)
19 | except Exception as e:
20 | print(f"Error while removing file: {e}")
21 |
--------------------------------------------------------------------------------
/scripts/system/stt.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 |
3 | def speech_to_text():
4 | recognizer = sr.Recognizer()
5 |
6 | with sr.Microphone() as source:
7 | print("Say something:")
8 | recognizer.adjust_for_ambient_noise(source, duration=1)
9 | audio = recognizer.listen(source)
10 |
11 | try:
12 | print("Recognizing...")
13 | prompt = recognizer.recognize_google(audio)
14 | print(f"You said: {prompt}")
15 | return prompt
16 | except sr.UnknownValueError:
17 | print("Could not understand audio.")
18 | return ""
19 | except sr.RequestError as e:
20 | print(f"Error with the speech recognition service; {e}")
21 | return ""
--------------------------------------------------------------------------------
/scripts/system/tts.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | from gtts import gTTS
4 | import speech_recognition as sr
5 | from .clean_text import clean_text
6 | from .play_audio import play_audio
7 |
8 | def text_to_speech(text):
9 | cleaned_text = clean_text(text)
10 | tts = gTTS(text=cleaned_text, lang='en', slow=False)
11 |
12 | audio_path = "output/audio/output.mp3"
13 | os.makedirs(os.path.dirname(audio_path), exist_ok=True)
14 |
15 | tts.save(audio_path)
16 | tts.save(audio_path)
17 | play_audio(audio_path)
--------------------------------------------------------------------------------
/scripts/transformer.py:
--------------------------------------------------------------------------------
1 | import transformers
2 | from transformers import TFAutoModelForCausalLM, AutoTokenizer
3 | import tensorflow as tf
4 | import logging
5 | from system.generate_text import generate_text
6 |
7 | transformers.logging.set_verbosity_error()
8 | tf.get_logger().setLevel(logging.ERROR)
9 |
10 | def main():
11 | model_name = "gpt2"
12 | model = TFAutoModelForCausalLM.from_pretrained(model_name)
13 | tokenizer = AutoTokenizer.from_pretrained(model_name, pad_token_id=50256)
14 |
15 | while True:
16 | prompt = input("Enter a prompt (type 'exit' to end): ")
17 |
18 | if prompt.lower() == 'exit':
19 | print("Exiting the program.")
20 | break
21 |
22 | generated_text = generate_text(prompt, model, tokenizer)
23 |
24 | print("Generated Text:")
25 | print(generated_text)
26 |
27 | if __name__ == "__main__":
28 | main()
29 |
--------------------------------------------------------------------------------
/scripts/transformer_s2t2s.py:
--------------------------------------------------------------------------------
1 | import transformers
2 | from transformers import TFAutoModelForCausalLM, AutoTokenizer
3 | import tensorflow as tf
4 | import logging
5 | from system.generate_text import generate_text
6 | from system.tts import text_to_speech
7 | from system.stt import speech_to_text
8 |
9 | transformers.logging.set_verbosity_error()
10 | tf.get_logger().setLevel(logging.ERROR)
11 |
12 | def main():
13 | model_name = "gpt2"
14 | model = TFAutoModelForCausalLM.from_pretrained(model_name)
15 | tokenizer = AutoTokenizer.from_pretrained(model_name, pad_token_id=50256)
16 |
17 | while True:
18 | choice = input("Choose input method (1 for text, 2 for speech, 'exit' to end): ")
19 |
20 | if choice == '1':
21 | prompt = input("Enter a prompt: ")
22 | elif choice == '2':
23 | prompt = speech_to_text()
24 | elif choice.lower() == 'exit':
25 | print("Exiting the program.")
26 | break
27 | else:
28 | print("Invalid choice. Please enter '1', '2', or 'exit'.")
29 | continue
30 |
31 | generated_text = generate_text(prompt, model, tokenizer)
32 |
33 | print("Generated Text:")
34 | print(generated_text)
35 |
36 | text_to_speech(generated_text)
37 |
38 | if __name__ == "__main__":
39 | main()
40 |
--------------------------------------------------------------------------------
/scripts/transformer_t2s.py:
--------------------------------------------------------------------------------
1 | import transformers
2 | from transformers import TFAutoModelForCausalLM, AutoTokenizer
3 | import tensorflow as tf
4 | import logging
5 | from system.generate_text import generate_text
6 | from system.tts import text_to_speech
7 |
8 | transformers.logging.set_verbosity_error()
9 | tf.get_logger().setLevel(logging.ERROR)
10 |
11 | def main():
12 | model_name = "gpt2"
13 | model = TFAutoModelForCausalLM.from_pretrained(model_name)
14 | tokenizer = AutoTokenizer.from_pretrained(model_name, pad_token_id=50256)
15 |
16 | while True:
17 | prompt = input("Enter a prompt (type 'exit' to end): ")
18 |
19 | if prompt.lower() == 'exit':
20 | print("Exiting the program.")
21 | break
22 |
23 | generated_text = generate_text(prompt, model, tokenizer)
24 |
25 | print("Generated Text:")
26 | print(generated_text)
27 |
28 | text_to_speech(generated_text)
29 |
30 | if __name__ == "__main__":
31 | main()
32 |
--------------------------------------------------------------------------------
/scripts/transformer_webcam.py:
--------------------------------------------------------------------------------
1 | import transformers
2 | from transformers import TFAutoModelForCausalLM, AutoTokenizer
3 | import tensorflow as tf
4 | import logging
5 | import os
6 | import cv2
7 | import time
8 | import numpy as np
9 | import pyaudio
10 | import speech_recognition as sr
11 | import wave
12 | from system.generate_text import generate_text
13 | from system.tts import text_to_speech
14 | from system.capture_photo import capture_photo
15 |
16 | transformers.logging.set_verbosity_error()
17 | tf.get_logger().setLevel(logging.ERROR)
18 |
19 | import urllib.request
20 |
21 | def download_file(url, save_path):
22 | urllib.request.urlretrieve(url, save_path)
23 |
24 | def download_caffe_model_files():
25 | # Create a directory to save the model files
26 | os.makedirs("models", exist_ok=True)
27 |
28 | # Download deploy.prototxt
29 | deploy_prototxt_url = "https://github.com/chuanqi305/MobileNet-SSD/raw/master/deploy.prototxt"
30 | deploy_prototxt_path = os.path.join("models", "deploy.prototxt")
31 | download_file(deploy_prototxt_url, deploy_prototxt_path)
32 |
33 | # Download mobilenet_iter_73000.caffemodel
34 | caffemodel_url = "https://github.com/chuanqi305/MobileNet-SSD/raw/master/mobilenet_iter_73000.caffemodel"
35 | caffemodel_path = os.path.join("models", "mobilenet_iter_73000.caffemodel")
36 | download_file(caffemodel_url, caffemodel_path)
37 |
38 | def capture_audio(duration=5, sample_rate=44100):
39 | p = pyaudio.PyAudio()
40 | stream = p.open(format=pyaudio.paInt16,
41 | channels=1,
42 | rate=sample_rate,
43 | input=True,
44 | frames_per_buffer=1024)
45 |
46 | frames = []
47 | print("Recording audio...")
48 |
49 | for i in range(0, int(sample_rate / 1024 * duration)):
50 | data = stream.read(1024)
51 | frames.append(data)
52 |
53 | print("Finished recording.")
54 | stream.stop_stream()
55 | stream.close()
56 | p.terminate()
57 |
58 | audio_data = np.frombuffer(b''.join(frames), dtype=np.int16)
59 |
60 | # Save audio data to a temporary file
61 |
62 | temp_audio_file = "output/audio/temp_audio.wav"
63 | os.makedirs(os.path.dirname(temp_audio_file), exist_ok=True)
64 | wf = wave.open(temp_audio_file, 'wb')
65 | wf.setnchannels(1)
66 | wf.setsampwidth(pyaudio.PyAudio().get_sample_size(pyaudio.paInt16))
67 | wf.setframerate(sample_rate)
68 | wf.writeframes(b''.join(frames))
69 | wf.close()
70 |
71 | return temp_audio_file
72 |
73 | def image_description(image_path):
74 | # Load the pre-trained MobileNet SSD model and its class labels
75 | net = cv2.dnn.readNetFromCaffe("models/deploy.prototxt", "models/mobilenet_iter_73000.caffemodel")
76 | classes = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair",
77 | "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa",
78 | "train", "tvmonitor"]
79 |
80 | # Load and preprocess the input image
81 | image = cv2.imread(image_path)
82 | h, w = image.shape[:2]
83 | blob = cv2.dnn.blobFromImage(image, 0.007843, (300, 300), 127.5)
84 |
85 | # Pass the blob through the network and obtain the detections
86 | net.setInput(blob)
87 | detections = net.forward()
88 |
89 | # Loop over the detections and get the description of the most confident prediction
90 | for i in range(detections.shape[2]):
91 | confidence = detections[0, 0, i, 2]
92 | if confidence > 0.5: # Confidence threshold
93 | class_id = int(detections[0, 0, i, 1])
94 | description = f"A photo containing a {classes[class_id]} with confidence {confidence:.2f}"
95 | return description
96 |
97 | return "Unable to identify the content of the photo."
98 |
99 | if not os.path.exists("models/deploy.prototxt") or not os.path.exists("models/mobilenet_iter_73000.caffemodel"):
100 | download_caffe_model_files()
101 |
102 | # Example usage:
103 | captured_image_path = capture_photo()
104 | print(f"Captured image saved at: {captured_image_path}")
105 |
106 | description = image_description(captured_image_path)
107 | print(description)
108 |
109 | def main():
110 | model_name = "gpt2"
111 | model = TFAutoModelForCausalLM.from_pretrained(model_name)
112 | tokenizer = AutoTokenizer.from_pretrained(model_name, pad_token_id=50256)
113 |
114 | while True:
115 | # Capture a photo and get image description
116 | photo = capture_photo()
117 | image_desc = image_description(photo)
118 | print(f"Image Description: {image_desc}")
119 |
120 | # Convert audio file to text
121 | recognizer = sr.Recognizer()
122 | audio_text = ""
123 |
124 | with sr.AudioFile(capture_audio()) as source:
125 | try:
126 | audio = recognizer.record(source)
127 | audio_text = recognizer.recognize_google(audio)
128 | print(f"Audio Text: {audio_text}")
129 | except sr.UnknownValueError:
130 | print("Could not understand audio.")
131 | except sr.RequestError as e:
132 | print(f"Error with the speech recognition service; {e}")
133 |
134 | # Determine the prompt based on available information
135 | if audio_text and image_desc:
136 | prompt = f"{audio_text} {image_desc}"
137 | elif audio_text:
138 | prompt = audio_text
139 | elif image_desc:
140 | prompt = image_desc
141 | else:
142 | prompt = ""
143 |
144 | # Generate text based on the prompt
145 | generated_text = generate_text(prompt, model, tokenizer)
146 |
147 | # Print and say the generated text
148 | if generated_text:
149 | print("Generated Text:")
150 | print(generated_text)
151 | text_to_speech(generated_text)
152 |
153 | # Sleep for 15 seconds before the next iteration
154 | time.sleep(15)
155 |
156 | if __name__ == "__main__":
157 | main()
--------------------------------------------------------------------------------
/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "max_length": 50
3 | }
--------------------------------------------------------------------------------
/setup.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | python -m venv psdenv
3 | cmd /k ".\psdenv\Scripts\activate & python main.py"
4 |
--------------------------------------------------------------------------------
/setup.sh:
--------------------------------------------------------------------------------
1 | python main.py
2 |
--------------------------------------------------------------------------------
/static/css/style.css:
--------------------------------------------------------------------------------
1 | @import url('https://cursedprograms.github.io/cursedentertainment/styles/main-style.css');
--------------------------------------------------------------------------------
/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | CursedGPT
8 |
9 |
10 |
11 |
12 |
CursedGPT
13 |
14 | {% if prompt %}
15 |
16 |
You:
17 |
{{ prompt }}
18 |
19 | {% endif %}
20 | {% if generated_text %}
21 |
22 |
CursedGPT:
23 |
{{ generated_text }}
24 |
25 | {% endif %}
26 |
27 |
32 |
33 |
34 |
36 |
37 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------