├── gpt_code_ui
├── __init__.py
├── webapp
│ ├── __init__.py
│ ├── static
│ │ └── .gitignore
│ └── main.py
├── kernel_program
│ ├── __init__.py
│ ├── launch_kernel.py
│ ├── config.py
│ ├── utils.py
│ ├── main.py
│ └── kernel_manager.py
└── main.py
├── workspace
└── .gitignore
├── frontend
├── src
│ ├── vite-env.d.ts
│ ├── config.tsx
│ ├── main.tsx
│ ├── App.css
│ ├── index.css
│ ├── components
│ │ ├── Input.css
│ │ ├── Sidebar.css
│ │ ├── Chat.css
│ │ ├── Sidebar.tsx
│ │ ├── Input.tsx
│ │ └── Chat.tsx
│ └── App.tsx
├── vite.config.ts
├── tsconfig.node.json
├── public
│ └── assets
│ │ └── assistant.svg
├── .gitignore
├── index.html
├── .eslintrc.cjs
├── tsconfig.json
└── package.json
├── notes
├── blog-post-notes.txt
└── todo.txt
├── .github
├── ISSUE_TEMPLATE
│ ├── config.yml
│ ├── feature-request.yaml
│ └── bug-report.yaml
├── PULL_REQUEST_TEMPLATE
│ └── pull_request_template_simple.md
├── CONTRIBUTING.md
└── CODE_OF_CONDUCT.md
├── .gitignore
├── .env.example
├── .env.azure-example
├── scripts
└── create_release.sh
├── setup.py
├── LICENSE
├── .devcontainer
└── devcontainer.json
├── Makefile
└── README.md
/gpt_code_ui/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/gpt_code_ui/webapp/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/gpt_code_ui/kernel_program/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/workspace/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !.gitignore
--------------------------------------------------------------------------------
/gpt_code_ui/webapp/static/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !.gitignore
--------------------------------------------------------------------------------
/frontend/src/vite-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
--------------------------------------------------------------------------------
/gpt_code_ui/kernel_program/launch_kernel.py:
--------------------------------------------------------------------------------
1 | if __name__ == "__main__":
2 | from ipykernel import kernelapp as app
3 |
4 | app.launch_new_instance()
--------------------------------------------------------------------------------
/notes/blog-post-notes.txt:
--------------------------------------------------------------------------------
1 | - Show trick of install_requires with pip output
2 | - Explain prompt injection trick for file upload
3 | - Jupyter Kernel manager
4 |
--------------------------------------------------------------------------------
/frontend/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vite'
2 | import react from '@vitejs/plugin-react-swc'
3 |
4 | // https://vitejs.dev/config/
5 | export default defineConfig({
6 | plugins: [react()],
7 | })
8 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: Ask a question
4 | url: https://github.com/ricklamers/gpt-code-ui/discussions/categories/q-a
5 | about: Ask questions and discuss with other community members
--------------------------------------------------------------------------------
/frontend/src/config.tsx:
--------------------------------------------------------------------------------
1 | let resolvedWebAddress = import.meta.env.VITE_WEB_ADDRESS ? import.meta.env.VITE_WEB_ADDRESS : "";
2 |
3 | const Config = {
4 | WEB_ADDRESS: resolvedWebAddress,
5 | API_ADDRESS: resolvedWebAddress + "/api"
6 | }
7 |
8 | export default Config;
--------------------------------------------------------------------------------
/frontend/tsconfig.node.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "composite": true,
4 | "skipLibCheck": true,
5 | "module": "ESNext",
6 | "moduleResolution": "bundler",
7 | "allowSyntheticDefaultImports": true
8 | },
9 | "include": ["vite.config.ts"]
10 | }
11 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Secrets
2 | .env
3 | .dev.env
4 |
5 | # Program related
6 | process_pids/
7 | kernel_connection_file.json
8 |
9 | # Python stuff
10 | *.egg-info/
11 | venv
12 | .venv
13 | __pycache__/
14 |
15 | # IDE
16 | .vscode/
17 |
18 | # Logs
19 | *.log
20 |
21 | dist/
22 | build/
--------------------------------------------------------------------------------
/frontend/src/main.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react'
2 | import ReactDOM from 'react-dom/client'
3 | import App from './App.tsx'
4 | import './index.css'
5 |
6 | ReactDOM.createRoot(document.getElementById('root') as HTMLElement).render(
7 |
8 |
9 | ,
10 | )
11 |
--------------------------------------------------------------------------------
/frontend/src/App.css:
--------------------------------------------------------------------------------
1 | #root {
2 | width: 100%;
3 | margin: 0 auto;
4 | }
5 |
6 | .app {
7 | height: 100vh;
8 | width: 100%;
9 | display: flex;
10 | flex-direction: row;
11 | }
12 |
13 | .main {
14 | display: flex;
15 | flex-direction: column;
16 | flex: 1;
17 | overflow: hidden;
18 | height: 100vh;
19 | }
--------------------------------------------------------------------------------
/frontend/public/assets/assistant.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/frontend/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | pnpm-debug.log*
8 | lerna-debug.log*
9 |
10 | node_modules
11 | dist
12 | dist-ssr
13 | *.local
14 |
15 | # Editor directories and files
16 | .vscode/*
17 | !.vscode/extensions.json
18 | .idea
19 | .DS_Store
20 | *.suo
21 | *.ntvs*
22 | *.njsproj
23 | *.sln
24 | *.sw?
25 |
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY=sk-XXXX
2 | OPENAI_API_TYPE=open_ai
3 | OPENAI_API_BASE=https://api.openai.com/v1
4 | OPENAI_API_VERSION=2023-03-15-preview
5 | # OPENAI_EXTRA_HEADERS={"key": "value"}
6 | OPENAI_MODELS=[{"displayName": "GPT-3.5", "name": "gpt-3.5-turbo"}, {"displayName": "GPT-4", "name": "gpt-4"}]
7 | # OPENAI_API_LOGLEVEL=debug
8 | API_PORT=5010
9 | WEB_PORT=8080
10 | SNAKEMQ_PORT=8765
11 |
--------------------------------------------------------------------------------
/notes/todo.txt:
--------------------------------------------------------------------------------
1 | - (Done) Kernel restart
2 | - (Done) Sidebar
3 | - (Done) Pass in OpenAI key
4 | - (Done) Choose model
5 | - (Done) Add prior chat message as context
6 | - (Done) Add download file support
7 | - (Done) Test bundling
8 | - (Done) Test uploading to PyPI
9 | - (Done) Create decent README
10 |
11 | Nice-to-haves
12 | - GitHub action for PyPI publish on release
13 | - Dark mode
14 | - Support image outputs
15 |
--------------------------------------------------------------------------------
/frontend/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | GPT-Code UI
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.env.azure-example:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY=XXXX
2 | OPENAI_API_TYPE=azure
3 | OPENAI_API_BASE=https://your-resource-name.openai.azure.com
4 | OPENAI_API_VERSION=2023-03-15-preview
5 | # OPENAI_EXTRA_HEADERS={"key": "value"}
6 | AZURE_OPENAI_DEPLOYMENTS=[{"displayName": "GPT-3.5", "name": "your-gpt-3.5-deployment"}, {"displayName": "GPT-4", "name": "your-gpt-4-deployment"}]
7 | # OPENAI_API_LOGLEVEL=debug
8 | API_PORT=5010
9 | WEB_PORT=8080
10 | SNAKEMQ_PORT=8765
11 |
--------------------------------------------------------------------------------
/frontend/.eslintrc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | env: { browser: true, es2020: true },
3 | extends: [
4 | 'eslint:recommended',
5 | 'plugin:@typescript-eslint/recommended',
6 | 'plugin:react-hooks/recommended',
7 | ],
8 | parser: '@typescript-eslint/parser',
9 | parserOptions: { ecmaVersion: 'latest', sourceType: 'module' },
10 | plugins: ['react-refresh'],
11 | rules: {
12 | 'react-refresh/only-export-components': 'warn',
13 | },
14 | }
15 |
--------------------------------------------------------------------------------
/gpt_code_ui/kernel_program/config.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | IDENT_KERNEL_MANAGER = "kernel_manager"
5 | IDENT_MAIN = "main"
6 | KERNEL_PID_DIR = "process_pids"
7 | SNAKEMQ_PORT = int(os.environ.get("SNAKEMQ_PORT", 8765))
8 |
9 |
10 | def get_logger():
11 | logging.basicConfig(
12 | format="[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s"
13 | )
14 |
15 | logger = logging.getLogger(__name__)
16 | if "DEBUG" in os.environ:
17 | logger.setLevel(logging.DEBUG)
18 | return logger
--------------------------------------------------------------------------------
/frontend/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ESNext",
4 | "lib": ["DOM", "DOM.Iterable", "ESNext"],
5 | "module": "ESNext",
6 | "skipLibCheck": true,
7 |
8 | /* Bundler mode */
9 | "moduleResolution": "bundler",
10 | "allowImportingTsExtensions": true,
11 | "resolveJsonModule": true,
12 | "isolatedModules": true,
13 | "noEmit": true,
14 | "jsx": "react-jsx",
15 |
16 | /* Linting */
17 | "strict": true,
18 | "noUnusedLocals": true,
19 | "noUnusedParameters": true,
20 | "noFallthroughCasesInSwitch": true
21 | },
22 | "include": ["src"],
23 | "references": [{ "path": "./tsconfig.node.json" }]
24 | }
25 |
--------------------------------------------------------------------------------
/scripts/create_release.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | VERSION=$(grep -oP "(?<=version=')[^']*" setup.py)
3 | TAG="v$VERSION"
4 | TITLE="$TAG"
5 | DESCRIPTION="Install with \`pip install gpt-code-ui\` or download bundle and run \`pip install -e .\`."
6 |
7 | # If $GH_API_TOKEN print error
8 | if [ -z "$GH_API_TOKEN" ]; then
9 | echo "Error: Please set the GH_API_TOKEN environment variable."
10 | exit 1
11 | fi
12 |
13 | API_JSON=$(printf '{"tag_name": "%s", "target_commitish": "main", "name": "%s", "body": "%s", "draft": false, "prerelease": false}' "$TAG" "$TITLE" "$DESCRIPTION")
14 |
15 | curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token $GH_API_TOKEN" --data "$API_JSON" https://api.github.com/repos/ricklamers/gpt-code-ui/releases
16 |
--------------------------------------------------------------------------------
/frontend/src/index.css:
--------------------------------------------------------------------------------
1 | :root {
2 | font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif;
3 | line-height: 1.5;
4 | font-weight: 400;
5 |
6 | color: #213547;
7 | background-color: #ffffff;
8 |
9 | font-synthesis: none;
10 | text-rendering: optimizeLegibility;
11 | -webkit-font-smoothing: antialiased;
12 | -moz-osx-font-smoothing: grayscale;
13 | -webkit-text-size-adjust: 100%;
14 | }
15 |
16 | a {
17 | font-weight: 500;
18 | color: #000;
19 | text-decoration: inherit;
20 | }
21 |
22 | * {
23 | box-sizing: border-box;
24 | }
25 |
26 | body {
27 | margin: 0;
28 | padding: 0;
29 | }
30 |
31 | h1 {
32 | font-size: 3.2em;
33 | line-height: 1.1;
34 | }
35 |
36 |
37 | @media (prefers-color-scheme: light) {
38 |
39 | }
40 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE/pull_request_template_simple.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Description
4 |
5 |
6 |
7 | ## Motivation and Context
8 |
9 |
10 |
11 |
12 | ## How has this been tested?
13 |
14 |
15 |
16 |
17 |
18 | ## Screenshots (if appropriate)
19 |
20 | ## Types of changes
21 |
22 |
--------------------------------------------------------------------------------
/gpt_code_ui/kernel_program/utils.py:
--------------------------------------------------------------------------------
1 | import re
2 | import json
3 | import snakemq.link
4 | import snakemq.packeter
5 | import snakemq.messaging
6 | import snakemq.message
7 |
8 | import gpt_code_ui.kernel_program.config as config
9 |
10 | def escape_ansi(line):
11 | ansi_escape = re.compile(r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]")
12 | return ansi_escape.sub("", line)
13 |
14 |
15 | def send_json(messaging, message, identity):
16 | message = snakemq.message.Message(json.dumps(message).encode("utf-8"), ttl=600)
17 | messaging.send_message(identity, message)
18 |
19 | def init_snakemq(ident, init_type="listen"):
20 | link = snakemq.link.Link()
21 | packeter = snakemq.packeter.Packeter(link)
22 | messaging = snakemq.messaging.Messaging(ident, "", packeter)
23 | if init_type == "listen":
24 | link.add_listener(("localhost", config.SNAKEMQ_PORT))
25 | elif init_type == "connect":
26 | link.add_connector(("localhost", config.SNAKEMQ_PORT))
27 | else:
28 | raise Exception("Unsupported init type.")
29 | return messaging, link
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 | from os import path
3 |
4 | this_directory = path.abspath(path.dirname(__file__))
5 | with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
6 | long_description = f.read()
7 |
8 | setup(
9 | name='gpt_code_ui',
10 | version='0.42.40',
11 | description="An Open Source version of ChatGPT Code Interpreter",
12 | long_description=long_description,
13 | long_description_content_type='text/markdown', # This field specifies the format of the `long_description`.
14 | packages=find_packages(),
15 | package_data={'gpt_code_ui.webapp': ['static/*', 'static/assets/*']},
16 | install_requires=[
17 | 'ipykernel>=6,<7',
18 | 'snakemq>=1,<2',
19 | 'requests>=2,<3',
20 | 'Flask>=2,<3',
21 | 'flask-cors>=3,<4',
22 | 'python-dotenv>=0.18,<2',
23 | 'pandas>=1.3,<2',
24 | 'openai>=0.25,<1',
25 | ],
26 | entry_points={
27 | 'console_scripts': [
28 | 'gptcode = gpt_code_ui.main:main',
29 | ],
30 | },
31 | )
32 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Rick Lamers
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/frontend/src/components/Input.css:
--------------------------------------------------------------------------------
1 | textarea {
2 | padding: 0.9rem 1rem;
3 | border: none;
4 | font-size: 1rem;
5 | border-radius: .375rem;
6 | flex: 1;
7 | font: inherit;
8 | font-size: 1rem;
9 | line-height: 1.5rem;
10 | resize: none;
11 | max-height: 300px;
12 | }
13 |
14 | textarea:focus {
15 | outline: none
16 | }
17 |
18 | form.file-upload button, button.send {
19 | border: 0;
20 | background: #eee;
21 | margin: 0.5rem;
22 | border-radius: 0.5rem;
23 | padding: 0.3rem 0.5rem;
24 | cursor: pointer;
25 | height: fit-content;
26 | }
27 |
28 | form.file-upload button {
29 | margin-right: 0;
30 | }
31 |
32 | form.file-upload svg, button.send svg {
33 | margin-top: 3px;
34 | width: 20px;
35 | height: auto;
36 | }
37 |
38 | .input-parent {
39 | padding: 1rem;
40 | }
41 |
42 | svg {
43 | color: #000;
44 | }
45 |
46 | .input-holder {
47 | display: flex;
48 | justify-items: center;
49 | box-shadow: 0 0 10px rgba(0,0,0,.1);
50 | border: rgba(0,0,0,.1) 1px solid;
51 | border-radius: .375rem;
52 | }
53 |
54 | .input-holder.focused {
55 | border: rgba(0,0,0,.3) 1px solid;
56 | }
--------------------------------------------------------------------------------
/frontend/src/components/Sidebar.css:
--------------------------------------------------------------------------------
1 | .sidebar {
2 | min-width: 260px;
3 | background: #202123;
4 | height: 100%;
5 | display: flex;
6 | flex-direction: column;
7 | justify-content: space-between;
8 | padding: 0.75rem;
9 | color: #fff;
10 | }
11 |
12 | .logo {
13 | text-align: center;
14 | font-weight: bold;
15 | font-size: 1.5rem;
16 | margin-top: 1rem
17 | }
18 |
19 | .github a {
20 | color: #999;
21 | font-size: 0.8rem;
22 | }
23 |
24 | a:hover {
25 | color: #fff;
26 | }
27 |
28 | .logo svg {
29 | color: #fff;
30 | width: 35px;
31 | height: 35px;
32 | margin-right: 5px;
33 | margin-bottom: -8px;
34 | }
35 |
36 | .settings {
37 | display: flex;
38 | flex-direction: column;
39 | }
40 |
41 | button, select {
42 | padding: 0.8rem;
43 | border: none;
44 | border-radius: .25rem;
45 | background-color: #333;
46 | color: #fff;
47 | }
48 |
49 | select option {
50 | padding: 0.8rem;
51 | }
52 |
53 | button {
54 | cursor: pointer;
55 | margin-bottom: 0.5rem;
56 | background: #74a89b;
57 | }
58 |
59 | label {
60 | font-size: 0.9rem;
61 | color: #999;
62 | margin-top: .5rem;
63 | margin-bottom: .25rem;
64 | }
65 |
66 | label.header {
67 | font-size: 1.2rem;
68 | }
--------------------------------------------------------------------------------
/frontend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "gpt-code-frontend",
3 | "private": true,
4 | "version": "0.0.0",
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite",
8 | "build": "echo Building version $VITE_APP_VERSION && tsc && vite build",
9 | "lint": "eslint src --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
10 | "preview": "vite preview"
11 | },
12 | "dependencies": {
13 | "@emotion/react": "^11.11.0",
14 | "@emotion/styled": "^11.11.0",
15 | "@mui/icons-material": "^5.11.16",
16 | "@mui/material": "^5.13.1",
17 | "react": "^18.2.0",
18 | "react-dom": "^18.2.0",
19 | "react-markdown": "^8.0.7",
20 | "react-syntax-highlighter": "^15.5.0",
21 | "react-textarea-autosize": "^8.4.1",
22 | "remark-gfm": "^3.0.1",
23 | "usehooks-ts": "^2.9.1"
24 | },
25 | "devDependencies": {
26 | "@types/react": "^18.0.28",
27 | "@types/react-dom": "^18.0.11",
28 | "@types/react-syntax-highlighter": "^15.5.6",
29 | "@typescript-eslint/eslint-plugin": "^5.57.1",
30 | "@typescript-eslint/parser": "^5.57.1",
31 | "@vitejs/plugin-react-swc": "^3.0.0",
32 | "eslint": "^8.38.0",
33 | "eslint-plugin-react-hooks": "^4.6.0",
34 | "eslint-plugin-react-refresh": "^0.3.4",
35 | "typescript": "^5.0.2",
36 | "vite": "^4.3.9"
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------
1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the
2 | // README at: https://github.com/devcontainers/templates/tree/main/src/python
3 | {
4 | "name": "GPT-Code-UI",
5 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
6 | "image": "mcr.microsoft.com/devcontainers/python:0-3.11",
7 | // Features to add to the dev container. More info: https://containers.dev/features.
8 | "features": {
9 | "ghcr.io/jungaretti/features/make:1": {},
10 | "ghcr.io/akhildevelops/devcontainer-features/pip:0": {},
11 | "ghcr.io/devcontainers/features/node:1": {}
12 | },
13 |
14 | // Use 'forwardPorts' to make a list of ports inside the container available locally.
15 | "forwardPorts": [8080],
16 |
17 | "postCreateCommand": "make compile_frontend",
18 | "postStartCommand": "pip3 install --user gpt-code-ui",
19 | "postAttachCommand": "gptcode",
20 |
21 | // Configure tool-specific properties.
22 | // "customizations": {},
23 |
24 | // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
25 | // Is there a way to allow the make command to execute without being root in the container?
26 | // An error occurs when attempting to rsync as anything other than root user.
27 | "remoteUser": "root"
28 | }
29 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: all compile_frontend bundle_pypi upload_pypi increment_version release check_env_var
2 |
3 | all: check_env_var build upload_pypi
4 |
5 | build: check_env_var compile_frontend bundle_pypi
6 |
7 | increment_version:
8 | @VERSION=$$(grep -e "^\s*version='[^']*'" setup.py | cut -d "'" -f 2) && \
9 | MAJOR=$$(echo $$VERSION | cut -d. -f1) && \
10 | MINOR=$$(echo $$VERSION | cut -d. -f2) && \
11 | PATCH=$$(echo $$VERSION | cut -d. -f3) && \
12 | NEW_PATCH=$$((PATCH + 1)) && \
13 | NEW_VERSION="$$MAJOR.$$MINOR.$$NEW_PATCH" && \
14 | sed -i.bak "s/version='[^']*'/version='$$NEW_VERSION'/" setup.py && \
15 | rm setup.py.bak && \
16 | echo "Updated version to $$NEW_VERSION"
17 |
18 |
19 | release:
20 | bash scripts/create_release.sh
21 |
22 | compile_frontend:
23 | cd frontend && \
24 | npm install && \
25 | VITE_APP_VERSION=$$(grep -e "^\s*version='[^']*'" ../setup.py | cut -d "'" -f 2) npm run build && \
26 | find ../gpt_code_ui/webapp/static -mindepth 1 ! -name '.gitignore' -delete && \
27 | rsync -av dist/ ../gpt_code_ui/webapp/static
28 |
29 | bundle_pypi:
30 | rm -rf dist build && \
31 | python3 setup.py sdist bdist_wheel
32 |
33 | upload_pypi:
34 | twine upload dist/*
35 |
36 | check_env_var:
37 | ifeq ($(VITE_WEB_ADDRESS),)
38 | @echo "VITE_WEB_ADDRESS not set, proceeding..."
39 | else
40 | $(error "VITE_WEB_ADDRESS is set, aborting...")
41 | endif
42 |
--------------------------------------------------------------------------------
/frontend/src/components/Chat.css:
--------------------------------------------------------------------------------
1 | div.message.generator {
2 | background: rgba(247,247,248);
3 | }
4 |
5 | div.message.system {
6 | background: rgba(247,247,248);
7 | }
8 |
9 | div.avatar {
10 | background: rgb(194, 142, 210);
11 | padding: 5px;
12 | color: #fff;
13 | border-radius: 2px;
14 | }
15 |
16 | div.avatar svg {
17 | display: block;
18 | color: #fff;
19 | }
20 |
21 | .chat-messages {
22 | flex: 1;
23 | overflow-y: auto;
24 | overflow-x: hidden;
25 | }
26 |
27 | div.message.generator div.avatar {
28 | background: #74a89b;
29 | }
30 |
31 | div.message.system div.avatar {
32 | background: #74a89b;
33 | }
34 |
35 | .cell-output {
36 | white-space: pre-wrap
37 | }
38 |
39 | div.message {
40 | padding: 1rem;
41 | border-bottom: rgba(0,0,0,.1);
42 | flex-direction: row;
43 | width: 100%;
44 | display: flex;
45 | }
46 |
47 | div.message a:hover {
48 | color: #000;
49 | text-decoration: underline;
50 | }
51 |
52 | div.message-body {
53 | padding-left: 1rem;
54 | width: 100%;
55 | }
56 |
57 | .loader {
58 | border: 5px solid #74a89b;
59 | border-radius: 50%;
60 | border-top: 5px solid #457e70;
61 | width: 20px;
62 | height: 20px;
63 | -webkit-animation: spin 2s linear infinite; /* Safari */
64 | animation: spin 2s linear infinite;
65 | margin: 0 0.5rem;
66 | margin-bottom: -5px;
67 | display: inline-block;
68 | }
69 |
70 | /* Define spin animation */
71 | @keyframes spin {
72 | 0% { transform: rotate(0deg); }
73 | 100% { transform: rotate(360deg); }
74 | }
75 |
76 |
--------------------------------------------------------------------------------
/frontend/src/components/Sidebar.tsx:
--------------------------------------------------------------------------------
1 | import AssistantIcon from '@mui/icons-material/Assistant';
2 |
3 | import "./Sidebar.css";
4 |
5 | export default function Sidebar(props: {
6 | models: Array<{ name: string; displayName: string }>;
7 | selectedModel: string;
8 | onSelectModel: any;
9 | setOpenAIKey: any;
10 | openAIKey: string;
11 | }) {
12 | const handleOpenAIButtonClick = () => {
13 | const key = prompt("Please enter your OpenAI key", props.openAIKey);
14 | if (key != null) {
15 | props.setOpenAIKey(key);
16 | }
17 | };
18 | return (
19 | <>
20 |
21 |
22 |
GPT-Code UI
23 |
24 |
27 |
28 |
29 | Settings
30 | Model
31 | props.onSelectModel(event.target.value)}
34 | >
35 | {props.models.map((model, index) => {
36 | return (
37 |
38 | {model.displayName}
39 |
40 | );
41 | })}
42 |
43 | Credentials
44 | Set OpenAI key
45 |
46 |
47 | >
48 | );
49 | }
50 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.yaml:
--------------------------------------------------------------------------------
1 | name: Feature Request / Enhancement
2 | description: Suggest a new feature or feature enhancement for the project
3 | labels: ["enhancement", "needs triage"]
4 | body:
5 | - type: checkboxes
6 | id: no-duplicate-issues
7 | attributes:
8 | label: "⚠️ Please check that this feature request hasn't been suggested before."
9 | description: "There are two locations for previous feature requests. Please search in both. Thank you. The **Label filters** may help make your search more focussed."
10 | options:
11 | - label: "I searched previous [Ideas in Discussions](https://github.com/ricklamers/gpt-code-ui/discussions/categories/ideas) didn't find any similar feature requests."
12 | required: true
13 | - label: "I searched previous [Issues](https://github.com/ricklamers/gpt-code-ui/labels/enhancement) didn't find any similar feature requests."
14 | required: true
15 |
16 | - type: textarea
17 | id: feature-description
18 | validations:
19 | required: true
20 | attributes:
21 | label: "🔖 Feature description"
22 | description: "A clear and concise description of what the feature request is."
23 | placeholder: "You should add ..."
24 |
25 | - type: textarea
26 | id: solution
27 | validations:
28 | required: true
29 | attributes:
30 | label: "✔️ Solution"
31 | description: "A clear and concise description of what you want to happen, and why."
32 | placeholder: "In my use-case, ..."
33 |
34 | - type: textarea
35 | id: alternatives
36 | validations:
37 | required: false
38 | attributes:
39 | label: "❓ Alternatives"
40 | description: "A clear and concise description of any alternative solutions or features you've considered."
41 | placeholder: "I have considered ..."
42 |
43 | - type: textarea
44 | id: additional-context
45 | validations:
46 | required: false
47 | attributes:
48 | label: "📝 Additional Context"
49 | description: "Add any other context or screenshots about the feature request here."
50 | placeholder: "..."
51 |
52 | - type: checkboxes
53 | id: acknowledgements
54 | attributes:
55 | label: 'Acknowledgements'
56 | description: 'Please confirm the following:'
57 | options:
58 | - label: 'My issue title is concise, descriptive, and in title casing.'
59 | required: true
60 | - label: 'I have searched the existing issues to make sure this feature has not been requested yet.'
61 | required: true
62 | - label: 'I have provided enough information for the maintainers to understand and evaluate this request.'
63 | required: true
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | An open source implementation of OpenAI's ChatGPT [Code interpreter](https://openai.com/blog/chatgpt-plugins#code-interpreter).
4 |
5 | Simply ask the OpenAI model to do something and it will generate & execute the code for you.
6 |
7 | Read the [blog post](https://ricklamers.io/posts/gpt-code) to find out more.
8 |
9 | ## Community
10 | Judah Cooper offered to start & curate a Discord community. Join [here](https://discord.gg/ZmTQwpkYu6).
11 |
12 | ## Installation
13 |
14 | Open a terminal and run:
15 |
16 | ```
17 | pip install gpt-code-ui
18 | gptcode
19 | ```
20 |
21 | In order to make basic dependencies available it's recommended to run the following `pip` install
22 | in the Python environment that is used in the shell where you run `gptcode`:
23 |
24 | ```sh
25 | pip install "numpy>=1.24,<1.25" "dateparser>=1.1,<1.2" "pandas>=1.5,<1.6" "geopandas>=0.13,<0.14" "tabulate>=0.9.0<1.0" "PyPDF2>=3.0,<3.1" "pdfminer>=20191125,<20191200" "pdfplumber>=0.9,<0.10" "matplotlib>=3.7,<3.8"
26 | ```
27 |
28 | ## User interface
29 |
30 |
31 | ## Features
32 | - File upload
33 | - File download
34 | - Context awareness (it can refer to your previous messages)
35 | - Generate code
36 | - Run code (Python kernel)
37 | - Model switching (GPT-3.5 and GPT-4)
38 |
39 | ## Misc.
40 | ### Using .env for OpenAI key
41 | You can put a .env in the working directory to load the `OPENAI_API_KEY` environment variable.
42 |
43 | ### Configurables
44 | Set the `API_PORT`, `WEB_PORT`, `SNAKEMQ_PORT` variables to override the defaults.
45 |
46 | Set `OPENAI_BASE_URL` to change the OpenAI API endpoint that's being used (note this environment variable includes the protocol `https://...`).
47 |
48 | You can use the `.env.example` in the repository (make sure you `git clone` the repo to get the file first).
49 |
50 | For Azure OpenAI Services, there are also other configurable variables like deployment name. See `.env.azure-example` for more information.
51 | Note that model selection on the UI is currently not supported for Azure OpenAI Services.
52 |
53 | ```
54 | cp .env.example .env
55 | vim .env
56 | gptcode
57 | ```
58 |
59 | ### Docker
60 | [localagi](https://github.com/localagi) took the effort of bundling the Python package in a Docker container. Check it out here: [gpt-code-ui-docker](https://github.com/localagi/gpt-code-ui-docker).
61 |
62 | ## Contributing
63 | Please do and have a look at the [contributions guide](.github/CONTRIBUTING.md)! This should be a community initiative. I'll try my best to be responsive.
64 |
65 |
66 | Thank you for your interest in this project!
--------------------------------------------------------------------------------
/frontend/src/components/Input.tsx:
--------------------------------------------------------------------------------
1 | import { useRef, useState } from "react";
2 |
3 | import FileUploadIcon from "@mui/icons-material/FileUpload";
4 | import SendIcon from "@mui/icons-material/Send";
5 | import TextareaAutosize from "react-textarea-autosize";
6 | import Config from "../config";
7 | import "./Input.css";
8 |
9 | export default function Input(props: { onSendMessage: any, onStartUpload: any, onCompletedUpload: any }) {
10 |
11 | let fileInputRef = useRef(null);
12 | let [inputIsFocused, setInputIsFocused] = useState(false);
13 | let [userInput, setUserInput] = useState("");
14 |
15 | const handleInputFocus = () => {
16 | setInputIsFocused(true);
17 | };
18 |
19 | const handleInputBlur = () => {
20 | setInputIsFocused(false);
21 | };
22 |
23 | const handleUpload = (e: any) => {
24 | e.preventDefault();
25 | fileInputRef.current?.click();
26 | };
27 |
28 | const handleFileChange = async (e: any) => {
29 | if (e.target.files.length > 0) {
30 | const file = e.target.files[0];
31 |
32 | // Create a new FormData instance
33 | const formData = new FormData();
34 |
35 | // Append the file to the form data
36 | formData.append("file", file);
37 |
38 | props.onStartUpload(file.name);
39 |
40 | try {
41 | const response = await fetch(Config.WEB_ADDRESS + "/upload", {
42 | method: "POST",
43 | body: formData,
44 | });
45 |
46 | if (!response.ok) {
47 | throw new Error("Network response was not ok");
48 | }
49 |
50 | const json = await response.json();
51 | props.onCompletedUpload(json["message"]);
52 |
53 | } catch (error) {
54 | console.error("Error:", error);
55 | }
56 | }
57 | };
58 |
59 |
60 | const handleSendMessage = async () => {
61 | props.onSendMessage(userInput);
62 | setUserInput("");
63 | }
64 |
65 | const handleInputChange = (e: any) => {
66 | setUserInput(e.target.value);
67 | };
68 |
69 | const handleKeyDown = (e: any) => {
70 | if (e.key === "Enter" && e.shiftKey === false) {
71 | e.preventDefault();
72 | handleSendMessage();
73 | }
74 | };
75 |
76 | return (
77 |
78 |
79 |
90 |
99 |
100 |
101 |
102 |
103 |
104 | );
105 | }
106 |
--------------------------------------------------------------------------------
/gpt_code_ui/main.py:
--------------------------------------------------------------------------------
1 | # Run the webapp and kernel_program in separate processes
2 |
3 | # webapp is a Flask app (in webapp/main.py relative to this main.py)
4 | # kernel_program is a Python script (in kernel_program/main.py relative to this main.py)
5 |
6 | import sys
7 | import logging
8 | import asyncio
9 | import time
10 | import webbrowser
11 |
12 | from multiprocessing import Process
13 |
14 | from gpt_code_ui.webapp.main import app, APP_PORT
15 | from gpt_code_ui.kernel_program.main import main as kernel_program_main, cleanup_kernel_program
16 |
17 | APP_URL = "http://localhost:%s" % APP_PORT
18 |
19 | def run_webapp():
20 | try:
21 | app.run(host="0.0.0.0", port=APP_PORT, use_reloader=False)
22 | except Exception as e:
23 | logging.exception("Error running the webapp:")
24 | sys.exit(1)
25 |
26 | def run_kernel_program():
27 | try:
28 | asyncio.run(kernel_program_main())
29 | except Exception as e:
30 | logging.exception("Error running the kernel_program:")
31 | sys.exit(1)
32 |
33 | def setup_logging():
34 | log_format = "%(asctime)s [%(levelname)s]: %(message)s"
35 | logging.basicConfig(level=logging.INFO, format=log_format)
36 | log_filename = "app.log"
37 | file_handler = logging.FileHandler(log_filename)
38 | file_handler.setFormatter(logging.Formatter(log_format))
39 | logging.getLogger().addHandler(file_handler)
40 |
41 | def print_color(text, color="gray"):
42 | # Default to gray
43 | code="242"
44 |
45 | if color == "green":
46 | code="35"
47 |
48 | gray_code = "\033[38;5;%sm" % code
49 | reset_code = "\033[0m"
50 | print(f"{gray_code}{text}{reset_code}")
51 |
52 |
53 | def print_banner():
54 |
55 | print("""
56 | █▀▀ █▀█ ▀█▀ ▄▄ █▀▀ █▀█ █▀▄ █▀▀
57 | █▄█ █▀▀ ░█░ ░░ █▄▄ █▄█ █▄▀ ██▄
58 | """)
59 |
60 | print("> Open GPT-Code UI in your browser %s" % APP_URL)
61 | print("")
62 | print("You can inspect detailed logs in app.log.")
63 | print("")
64 | print("Find your OpenAI API key at https://platform.openai.com/account/api-keys")
65 | print("")
66 | print_color("Contribute to GPT-Code UI at https://github.com/ricklamers/gpt-code-ui")
67 |
68 | def main():
69 | setup_logging()
70 |
71 | webapp_process = Process(target=run_webapp)
72 | kernel_program_process = Process(target=run_kernel_program)
73 |
74 | try:
75 | webapp_process.start()
76 | kernel_program_process.start()
77 |
78 | # Poll until the webapp is running
79 | while True:
80 | try:
81 | app.test_client().get("/")
82 | break
83 | except:
84 | time.sleep(0.1)
85 |
86 | print_banner()
87 |
88 | webbrowser.open(APP_URL)
89 |
90 | webapp_process.join()
91 | kernel_program_process.join()
92 |
93 |
94 | except KeyboardInterrupt:
95 | print("Terminating processes...")
96 |
97 | cleanup_kernel_program()
98 | kernel_program_process.terminate()
99 |
100 | webapp_process.terminate()
101 |
102 | webapp_process.join()
103 | kernel_program_process.join()
104 |
105 | print("Processes terminated.")
106 |
107 | if __name__ == '__main__':
108 | main()
109 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.yaml:
--------------------------------------------------------------------------------
1 | name: Bug Report
2 | description: File a bug report
3 | labels: ["bug", "needs triage"]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | ## Before you start
9 | Please **make sure you are on the latest version.**
10 | If you encountered the issue after you installed, updated, or reloaded, **please try restarting before reporting the bug**.
11 |
12 | - type: checkboxes
13 | id: no-duplicate-issues
14 | attributes:
15 | label: "Please check that this issue hasn't been reported before."
16 | description: "The **Label filters** may help make your search more focussed."
17 | options:
18 | - label: "I searched previous [Bug Reports](https://github.com/ricklamers/gpt-code-ui/labels/bug) didn't find any similar reports."
19 | required: true
20 |
21 | - type: textarea
22 | id: expected
23 | attributes:
24 | label: Expected Behavior
25 | description: Tell us what **should** happen.
26 | validations:
27 | required: true
28 |
29 | - type: textarea
30 | id: what-happened
31 | attributes:
32 | label: Current behaviour
33 | description: |
34 | Tell us what happens instead of the expected behavior.
35 | Adding of screenshots really helps.
36 | validations:
37 | required: true
38 |
39 | - type: textarea
40 | id: reproduce
41 | attributes:
42 | label: Steps to reproduce
43 | description: |
44 | Which exact steps can a developer take to reproduce the issue?
45 | The more detail you provide, the easier it will be to narrow down and fix the bug.
46 | Please paste in tasks and/or queries **as text, not screenshots**.
47 | placeholder: |
48 | Example of the level of detail needed to reproduce any bugs efficiently and reliably.
49 | 1. Go to the '...' page.
50 | 2. Click on the '...' button.
51 | 3. Scroll down to '...'.
52 | 4. Observe the error.
53 | validations:
54 | required: true
55 |
56 | - type: textarea
57 | id: possible-solution
58 | attributes:
59 | label: Possible solution
60 | description: |
61 | Not obligatory, but please suggest a fix or reason for the bug, if you have an idea.
62 |
63 |
64 | - type: checkboxes
65 | id: operating-systems
66 | attributes:
67 | label: Which Operating Systems are you using?
68 | description: You may select more than one.
69 | options:
70 | - label: Android
71 | - label: iPhone/iPad
72 | - label: Linux
73 | - label: macOS
74 | - label: Windows
75 |
76 | - type: input
77 | id: Programming-version
78 | attributes:
79 | label: Programming Version
80 | description: For which programming language, what version are you using?
81 | placeholder: Typescript vX.Y / Python v3.xx
82 | validations:
83 | required: true
84 |
85 | - type: input
86 | id: gpt-code-ui-version
87 | attributes:
88 | label: gpt-code-ui Version
89 | description: Which gpt-code-ui version are you using?
90 | placeholder: Release v0.0.xx
91 | validations:
92 | required: true
93 |
94 | - type: checkboxes
95 | id: acknowledgements
96 | attributes:
97 | label: 'Acknowledgements'
98 | description: 'Please confirm the following:'
99 | options:
100 | - label: 'My issue title is concise, descriptive, and in title casing.'
101 | required: true
102 | - label: 'I have searched the existing issues to make sure this bug has not been reported yet.'
103 | required: true
104 | - label: 'I am using the latest version of gpt-code-ui.'
105 | required: true
106 | - label: 'I have provided enough information for the maintainers to reproduce and diagnose the issue.'
107 | required: true
108 |
109 |
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to gpt-code-ui
2 |
3 | First of all, thank you for your interest in contributing to gpt-code-ui! We appreciate the time and effort you're willing to invest in making our project better. This document provides guidelines and information to make the contribution process as smooth as possible.
4 |
5 | ## Table of Contents
6 |
7 | - [Code of Conduct](#code-of-conduct)
8 | - [Getting Started](#getting-started)
9 | - [How to Contribute](#how-to-contribute)
10 | - [Reporting Bugs](#reporting-bugs)
11 | - [Suggesting Enhancements](#suggesting-enhancements)
12 | - [Submitting Pull Requests](#submitting-pull-requests)
13 | - [Style Guidelines](#style-guidelines)
14 | - [Code Style](#code-style)
15 | - [Commit Messages](#commit-messages)
16 | - [Additional Resources](#additional-resources)
17 |
18 | ## Code of Conduct
19 |
20 | All contributors are expected to adhere to our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it before participating in the gpt-code-ui community.
21 |
22 | ## Getting Started
23 |
24 | 1. Fork the repository and clone it to your local machine.
25 | 2. Set up the development environment as follows:
26 |
27 | To install the package as an editable Python package run:
28 | ```sh
29 | pip install -e .
30 | ```
31 |
32 | To run the backend:
33 | ```sh
34 | gptcode
35 | ```
36 |
37 | To run the frontend in dev mode such that frontend code changes automatically update, set
38 | the following environment variable such that the frontend knows where to
39 | find the backend API server.
40 |
41 | `export VITE_WEB_ADDRESS=http://localhost:8080`
42 |
43 | Run the frontend in development mode (served by Vite):
44 | ```sh
45 | cd frontend
46 | npm run dev
47 | ```
48 |
49 | The HMR/auto reloading version of the frontend can now be found at http://localhost:5173 (default Vite port)
50 |
51 | Note, the frontend served at `http://localhost:8080` is stale comes from the static bundled files that can
52 | be generated by running `make compile_frontend`.
53 |
54 | With this setup, code changes to the Flask backend still require restarting `gptcode` (backend).
55 |
56 | 3. Explore the codebase, run tests, and verify that everything works as expected.
57 |
58 | ## How to Contribute
59 |
60 | ### Reporting Bugs
61 |
62 | If you encounter a bug or issue while using gpt-code-ui, please open a new issue on the [GitHub Issues](https://github.com/ricklamers/gpt-code-ui/issues) page. Provide a clear and concise description of the problem, steps to reproduce it, and any relevant error messages or logs.
63 |
64 | ### Suggesting Enhancements
65 |
66 | We welcome ideas for improvements and new features. To suggest an enhancement, open a new issue on the [GitHub Issues](https://github.com/ricklamers/gpt-code-ui/issues) page. Describe the enhancement in detail, explain the use case, and outline the benefits it would bring to the project.
67 |
68 | ### Submitting Pull Requests
69 |
70 | 1. Create a new branch for your feature or bugfix. Use a descriptive name like `feature/your-feature-name` or `fix/your-bugfix-name`.
71 | 2. Make your changes, following the [Style Guidelines](#style-guidelines) below.
72 | 3. Test your changes and ensure that they don't introduce new issues or break existing functionality.
73 | 4. Commit your changes, following the [commit message guidelines](#commit-messages).
74 | 5. Push your branch to your fork on GitHub.
75 | 6. Open a new pull request against the `main` branch of the gpt-code-ui repository. Include a clear and concise description of your changes, referencing any related issues.
76 |
77 | ## Style Guidelines
78 |
79 | ### Code Style
80 |
81 | gpt-code-ui uses [Black](https://black.readthedocs.io/en/stable/the_black_code_style/index.html) as its code style guide. Please ensure that your code follows these guidelines.
82 |
83 | ### Commit Messages
84 |
85 | Write clear and concise commit messages that briefly describe the changes made in each commit. Use the imperative mood and start with a capitalized verb, e.g., "Add new feature" or "Fix bug in function".
86 |
87 | ## Additional Resources
88 |
89 | - [GitHub Help](https://help.github.com/)
90 | - [GitHub Pull Request Documentation](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests)
91 | - [Black](https://black.readthedocs.io/en/stable/the_black_code_style/index.html)
92 |
93 | Thank you once again for your interest in contributing to gpt-code-ui. We look forward to collaborating with you and creating an even better project together!
94 |
95 |
--------------------------------------------------------------------------------
/frontend/src/components/Chat.tsx:
--------------------------------------------------------------------------------
1 | import "./Chat.css";
2 |
3 | import VoiceChatIcon from "@mui/icons-material/VoiceChat";
4 | import PersonIcon from "@mui/icons-material/Person";
5 | import TerminalIcon from '@mui/icons-material/Terminal';
6 | import FileUploadIcon from '@mui/icons-material/FileUpload';
7 | import { MessageDict } from "../App";
8 |
9 | import remarkGfm from 'remark-gfm';
10 | import SyntaxHighlighter from "react-syntax-highlighter";
11 | import { RefObject } from "react";
12 | import ReactMarkdown from 'react-markdown';
13 |
14 | function Message(props: {
15 | text: string;
16 | role: string;
17 | type: string;
18 | showLoader?: boolean;
19 | }) {
20 | let { text, role } = props;
21 |
22 | const isMarkdown = (input: string) => {
23 | const mdRegex = /\[.*\]\(.*\)|\*\*.*\*\*|__.*__|\#.*|\!\[.*\]\(.*\)|`.*`|\- .*|\|.*\|/g;
24 | return mdRegex.test(input);
25 | };
26 |
27 | let ICONS = {
28 | "upload": ,
29 | "generator": ,
30 | "system": ,
31 | "user": ,
32 | };
33 |
34 | return (
35 |
36 |
37 |
38 | { ICONS[role as keyof typeof ICONS] }
39 |
40 |
41 |
42 | {props.type == "message" &&
43 | (props.showLoader ? (
44 |
45 | {text} {props.showLoader ?
: null}
46 |
47 | ) : (
48 | isMarkdown(text) ?
49 |
63 | ) : (
64 |
65 | {children}
66 |
67 | )
68 | }
69 | }}
70 | />
71 | :
72 |
73 | ))}
74 |
75 | {(props.type == "message_raw") &&
76 | (props.showLoader ? (
77 |
78 | {text} {props.showLoader ?
: null}
79 |
80 | ) : (
81 |
82 | ))}
83 |
84 | {props.type == "image/png" &&
85 |
` }}>
86 | }
87 | {props.type == "image/jpeg" &&
88 |
` }}>
89 | }
90 |
91 |
92 | );
93 | }
94 |
95 |
96 | export enum WaitingStates {
97 | GeneratingCode = "Generating code",
98 | RunningCode = "Running code",
99 | UploadingFile = "Uploading file",
100 | Idle = "Idle",
101 | }
102 |
103 | export default function Chat(props: {
104 | waitingForSystem: WaitingStates;
105 | chatScrollRef: RefObject;
106 | messages: Array;
107 | }) {
108 | return (
109 | <>
110 |
111 | {props.messages.map((message, index) => {
112 | return (
113 |
119 | );
120 | })}
121 | {props.waitingForSystem != WaitingStates.Idle ? (
122 |
128 | ) : null}
129 |
130 | >
131 | );
132 | }
133 |
--------------------------------------------------------------------------------
/gpt_code_ui/kernel_program/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import sys
4 | import pathlib
5 | import json
6 | import logging
7 | import time
8 |
9 | import asyncio
10 | import json
11 | import threading
12 |
13 | from queue import Queue
14 |
15 | from flask import Flask, request, jsonify
16 | from flask_cors import CORS # Import the CORS library
17 |
18 | from dotenv import load_dotenv
19 | load_dotenv('.env')
20 |
21 | import gpt_code_ui.kernel_program.kernel_manager as kernel_manager
22 | import gpt_code_ui.kernel_program.config as config
23 | import gpt_code_ui.kernel_program.utils as utils
24 |
25 |
26 | APP_PORT = int(os.environ.get("API_PORT", 5010))
27 |
28 | # Get global logger
29 | logger = config.get_logger()
30 |
31 | # Note, only one kernel_manager_process can be active
32 | kernel_manager_process = None
33 |
34 | # Use efficient Python queues to store messages
35 | result_queue = Queue()
36 | send_queue = Queue()
37 |
38 | messaging = None
39 |
40 | # We know this Flask app is for local use. So we can disable the verbose Werkzeug logger
41 | log = logging.getLogger('werkzeug')
42 | log.setLevel(logging.ERROR)
43 |
44 | cli = sys.modules['flask.cli']
45 | cli.show_server_banner = lambda *x: None
46 |
47 | app = Flask(__name__)
48 | CORS(app)
49 |
50 | def start_kernel_manager():
51 | global kernel_manager_process
52 |
53 | kernel_manager_script_path = os.path.join(
54 | pathlib.Path(__file__).parent.resolve(), "kernel_manager.py"
55 | )
56 | kernel_manager_process = subprocess.Popen(
57 | [sys.executable, kernel_manager_script_path]
58 | )
59 |
60 | # Write PID as .pid to config.KERNEL_PID_DIR
61 | os.makedirs(config.KERNEL_PID_DIR, exist_ok=True)
62 | with open(os.path.join(config.KERNEL_PID_DIR, "%d.pid" % kernel_manager_process.pid), "w") as p:
63 | p.write("kernel_manager")
64 |
65 | def cleanup_kernel_program():
66 | kernel_manager.cleanup_spawned_processes()
67 |
68 | async def start_snakemq():
69 | global messaging
70 |
71 | messaging, link = utils.init_snakemq(config.IDENT_MAIN)
72 |
73 | def on_recv(conn, ident, message):
74 | message = json.loads(message.data.decode("utf-8"))
75 |
76 | if message["type"] == "status":
77 | if message["value"] == "ready":
78 | logger.debug("Kernel is ready.")
79 | result_queue.put({
80 | "value":"Kernel is ready.",
81 | "type": "message"
82 | })
83 |
84 | elif message["type"] in ["message", "message_raw", "image/png", "image/jpeg"]:
85 | # TODO: 1:1 kernel <> channel mapping
86 | logger.debug("%s of type %s" % (message["value"], message["type"]))
87 |
88 | result_queue.put({
89 | "value": message["value"],
90 | "type": message["type"]
91 | })
92 |
93 | messaging.on_message_recv.add(on_recv)
94 | logger.info("Starting snakemq loop")
95 |
96 | def send_queued_messages():
97 | while True:
98 | if send_queue.qsize() > 0:
99 | message = send_queue.get()
100 | utils.send_json(messaging,
101 | {"type": "execute", "value": message["command"]},
102 | config.IDENT_KERNEL_MANAGER
103 | )
104 | time.sleep(0.1)
105 |
106 | async def async_send_queued_messages():
107 | loop = asyncio.get_event_loop()
108 | await loop.run_in_executor(None, send_queued_messages)
109 |
110 | async def async_link_loop():
111 | loop = asyncio.get_event_loop()
112 | await loop.run_in_executor(None, link.loop)
113 |
114 | # Wrap the snakemq_link.Link loop in an asyncio task
115 | await asyncio.gather(async_send_queued_messages(), async_link_loop())
116 |
117 |
118 | @app.route("/api", methods=["POST", "GET"])
119 | def handle_request():
120 |
121 | if request.method == "GET":
122 | # Handle GET requests by sending everything that's in the receive_queue
123 | results = [result_queue.get() for _ in range(result_queue.qsize())]
124 | return jsonify({"results": results})
125 | elif request.method == "POST":
126 | data = request.json
127 |
128 | send_queue.put(data)
129 |
130 | return jsonify({"result": "success"})
131 |
132 | @app.route("/restart", methods=["POST"])
133 | def handle_restart():
134 |
135 | cleanup_kernel_program()
136 | start_kernel_manager()
137 |
138 | return jsonify({"result": "success"})
139 |
140 |
141 | async def main():
142 | start_kernel_manager()
143 |
144 | # Run Flask app in a separate thread
145 | flask_thread = threading.Thread(target=run_flask_app)
146 | flask_thread.start()
147 |
148 | # Run in background
149 | await start_snakemq()
150 |
151 |
152 | def run_flask_app():
153 | app.run(host="0.0.0.0", port=APP_PORT)
154 |
155 | if __name__ == "__main__":
156 | asyncio.run(main())
157 |
158 |
159 |
160 |
--------------------------------------------------------------------------------
/.github/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 |
2 | # Contributor Covenant Code of Conduct
3 |
4 | ## Our Pledge
5 |
6 | We as members, contributors, and leaders pledge to make participation in our
7 | community a harassment-free experience for everyone, regardless of age, body
8 | size, visible or invisible disability, ethnicity, sex characteristics, gender
9 | identity and expression, level of experience, education, socio-economic status,
10 | nationality, personal appearance, race, caste, color, religion, or sexual
11 | identity and orientation.
12 |
13 | We pledge to act and interact in ways that contribute to an open, welcoming,
14 | diverse, inclusive, and healthy community.
15 |
16 | ## Our Standards
17 |
18 | Examples of behavior that contributes to a positive environment for our
19 | community include:
20 |
21 | * Demonstrating empathy and kindness toward other people
22 | * Being respectful of differing opinions, viewpoints, and experiences
23 | * Giving and gracefully accepting constructive feedback
24 | * Accepting responsibility and apologizing to those affected by our mistakes,
25 | and learning from the experience
26 | * Focusing on what is best not just for us as individuals, but for the overall
27 | community
28 |
29 | Examples of unacceptable behavior include:
30 |
31 | * The use of sexualized language or imagery, and sexual attention or advances of
32 | any kind
33 | * Trolling, insulting or derogatory comments, and personal or political attacks
34 | * Public or private harassment
35 | * Publishing others' private information, such as a physical or email address,
36 | without their explicit permission
37 | * Other conduct which could reasonably be considered inappropriate in a
38 | professional setting
39 |
40 | ## Enforcement Responsibilities
41 |
42 | Community leaders are responsible for clarifying and enforcing our standards of
43 | acceptable behavior and will take appropriate and fair corrective action in
44 | response to any behavior that they deem inappropriate, threatening, offensive,
45 | or harmful.
46 |
47 | Community leaders have the right and responsibility to remove, edit, or reject
48 | comments, commits, code, wiki edits, issues, and other contributions that are
49 | not aligned to this Code of Conduct, and will communicate reasons for moderation
50 | decisions when appropriate.
51 |
52 | ## Scope
53 |
54 | This Code of Conduct applies within all community spaces, and also applies when
55 | an individual is officially representing the community in public spaces.
56 | Examples of representing our community include using an official e-mail address,
57 | posting via an official social media account, or acting as an appointed
58 | representative at an online or offline event.
59 |
60 | ## Enforcement
61 |
62 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
63 | reported to the community leaders responsible for enforcement at
64 | ricklamers at gmail dot com.
65 | All complaints will be reviewed and investigated promptly and fairly.
66 |
67 | All community leaders are obligated to respect the privacy and security of the
68 | reporter of any incident.
69 |
70 | ## Enforcement Guidelines
71 |
72 | Community leaders will follow these Community Impact Guidelines in determining
73 | the consequences for any action they deem in violation of this Code of Conduct:
74 |
75 | ### 1. Correction
76 |
77 | **Community Impact**: Use of inappropriate language or other behavior deemed
78 | unprofessional or unwelcome in the community.
79 |
80 | **Consequence**: A private, written warning from community leaders, providing
81 | clarity around the nature of the violation and an explanation of why the
82 | behavior was inappropriate. A public apology may be requested.
83 |
84 | ### 2. Warning
85 |
86 | **Community Impact**: A violation through a single incident or series of
87 | actions.
88 |
89 | **Consequence**: A warning with consequences for continued behavior. No
90 | interaction with the people involved, including unsolicited interaction with
91 | those enforcing the Code of Conduct, for a specified period of time. This
92 | includes avoiding interactions in community spaces as well as external channels
93 | like social media. Violating these terms may lead to a temporary or permanent
94 | ban.
95 |
96 | ### 3. Temporary Ban
97 |
98 | **Community Impact**: A serious violation of community standards, including
99 | sustained inappropriate behavior.
100 |
101 | **Consequence**: A temporary ban from any sort of interaction or public
102 | communication with the community for a specified period of time. No public or
103 | private interaction with the people involved, including unsolicited interaction
104 | with those enforcing the Code of Conduct, is allowed during this period.
105 | Violating these terms may lead to a permanent ban.
106 |
107 | ### 4. Permanent Ban
108 |
109 | **Community Impact**: Demonstrating a pattern of violation of community
110 | standards, including sustained inappropriate behavior, harassment of an
111 | individual, or aggression toward or disparagement of classes of individuals.
112 |
113 | **Consequence**: A permanent ban from any sort of public interaction within the
114 | community.
115 |
116 | ## Attribution
117 |
118 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
119 | version 2.1, available at
120 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
121 |
122 | Community Impact Guidelines were inspired by
123 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC].
124 |
125 | For answers to common questions about this code of conduct, see the FAQ at
126 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
127 | [https://www.contributor-covenant.org/translations][translations].
128 |
129 | [homepage]: https://www.contributor-covenant.org
130 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
131 | [Mozilla CoC]: https://github.com/mozilla/diversity
132 | [FAQ]: https://www.contributor-covenant.org/faq
133 | [translations]: https://www.contributor-covenant.org/translations
134 |
--------------------------------------------------------------------------------
/gpt_code_ui/kernel_program/kernel_manager.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import subprocess
3 | import os
4 | import queue
5 | import json
6 | import signal
7 | import pathlib
8 | import threading
9 | import time
10 | import atexit
11 | import traceback
12 |
13 | from time import sleep
14 | from jupyter_client import BlockingKernelClient
15 |
16 | from dotenv import load_dotenv
17 | load_dotenv('.env')
18 |
19 | import gpt_code_ui.kernel_program.utils as utils
20 | import gpt_code_ui.kernel_program.config as config
21 |
22 | # Set up globals
23 | messaging = None
24 | logger = config.get_logger()
25 |
26 |
27 | class FlushingThread(threading.Thread):
28 | def __init__(self, kc, kill_sema):
29 | threading.Thread.__init__(self)
30 | self.kill_sema = kill_sema
31 | self.kc = kc
32 |
33 | def run(self):
34 | logger.info("Running message flusher...")
35 | while True:
36 |
37 | if self.kill_sema.acquire(blocking=False):
38 | logger.info("Sema was released to kill thread")
39 | sys.exit()
40 |
41 | flush_kernel_msgs(self.kc)
42 | time.sleep(1)
43 |
44 |
45 | def cleanup_spawned_processes():
46 | print("Cleaning up kernels...")
47 | for filename in os.listdir(config.KERNEL_PID_DIR):
48 | fp = os.path.join(config.KERNEL_PID_DIR, filename)
49 | if os.path.isfile(fp):
50 | try:
51 | pid = int(filename.split(".pid")[0])
52 | logger.debug("Killing process with pid %s" % pid)
53 | os.remove(fp)
54 | try:
55 | if os.name == "nt":
56 | os.kill(pid, signal.CTRL_BREAK_EVENT)
57 | else:
58 | os.kill(pid, signal.SIGKILL)
59 |
60 | # After successful kill, cleanup pid file
61 | os.remove(fp)
62 |
63 | except Exception:
64 | # Windows process killing is flaky
65 | pass
66 | except Exception as e:
67 | logger.debug(e)
68 |
69 |
70 | def start_snakemq(kc):
71 | global messaging
72 |
73 | messaging, link = utils.init_snakemq(config.IDENT_KERNEL_MANAGER, "connect")
74 |
75 | def on_recv(conn, ident, message):
76 | if ident == config.IDENT_MAIN:
77 | message = json.loads(message.data.decode("utf-8"))
78 |
79 | if message["type"] == "execute":
80 | logger.debug("Executing command: %s" % message["value"])
81 | kc.execute(message["value"])
82 | # Try direct flush with default wait (0.2)
83 | flush_kernel_msgs(kc)
84 |
85 | messaging.on_message_recv.add(on_recv)
86 |
87 | start_flusher(kc)
88 |
89 | # Send alive
90 | utils.send_json(messaging, {"type": "status", "value": "ready"}, config.IDENT_MAIN)
91 | logger.info("Python kernel ready to receive messages!")
92 |
93 | logger.info("Starting snakemq loop")
94 |
95 | try:
96 | link.loop()
97 | except KeyboardInterrupt:
98 | logger.info("Keyboard interrupt received, exiting...")
99 | sys.exit(0)
100 | except Exception as e:
101 | logger.error("Error in snakemq loop: %s" % e)
102 | sys.exit(1)
103 |
104 |
105 | def start_flusher(kc):
106 | # Start FlushMessenger
107 | kill_sema = threading.Semaphore()
108 | kill_sema.acquire()
109 | t = FlushingThread(kc, kill_sema)
110 | t.start()
111 |
112 | def end_thread():
113 | kill_sema.release()
114 |
115 | atexit.register(end_thread)
116 |
117 |
118 | def send_message(message, message_type="message"):
119 | utils.send_json(
120 | messaging, {"type": message_type, "value": message}, config.IDENT_MAIN
121 | )
122 |
123 |
124 | def flush_kernel_msgs(kc, tries=1, timeout=0.2):
125 | try:
126 | hit_empty = 0
127 |
128 | while True:
129 | try:
130 | msg = kc.get_iopub_msg(timeout=timeout)
131 | if msg["msg_type"] == "execute_result":
132 | if "text/plain" in msg["content"]["data"]:
133 | send_message(
134 | msg["content"]["data"]["text/plain"], "message_raw"
135 | )
136 | if msg["msg_type"] == "display_data":
137 | if "image/png" in msg["content"]["data"]:
138 | # Convert to Slack upload
139 | send_message(
140 | msg["content"]["data"]["image/png"],
141 | message_type="image/png",
142 | )
143 | elif "text/plain" in msg["content"]["data"]:
144 | send_message(msg["content"]["data"]["text/plain"])
145 |
146 | elif msg["msg_type"] == "stream":
147 | logger.debug("Received stream output %s" % msg["content"]["text"])
148 | send_message(msg["content"]["text"])
149 | elif msg["msg_type"] == "error":
150 | send_message(
151 | utils.escape_ansi("\n".join(msg["content"]["traceback"])),
152 | "message_raw",
153 | )
154 | except queue.Empty:
155 | hit_empty += 1
156 | if hit_empty == tries:
157 | # Empty queue for one second, give back control
158 | break
159 | except (ValueError, IndexError):
160 | # get_iopub_msg suffers from message fetch errors
161 | break
162 | except Exception as e:
163 | logger.debug(f"{e} [{type(e)}")
164 | logger.debug(traceback.format_exc())
165 | break
166 | except Exception as e:
167 | logger.debug(f"{e} [{type(e)}")
168 |
169 |
170 | def start_kernel():
171 | kernel_connection_file = os.path.join(os.getcwd(), "kernel_connection_file.json")
172 |
173 | if os.path.isfile(kernel_connection_file):
174 | os.remove(kernel_connection_file)
175 | if os.path.isdir(kernel_connection_file):
176 | os.rmdir(kernel_connection_file)
177 |
178 | launch_kernel_script_path = os.path.join(
179 | pathlib.Path(__file__).parent.resolve(), "launch_kernel.py"
180 | )
181 |
182 | os.makedirs('workspace/', exist_ok=True)
183 |
184 | kernel_process = subprocess.Popen(
185 | [
186 | sys.executable,
187 | launch_kernel_script_path,
188 | "--IPKernelApp.connection_file",
189 | kernel_connection_file,
190 | "--matplotlib=inline",
191 | "--quiet",
192 | ],
193 | cwd='workspace/'
194 | )
195 | # Write PID for caller to kill
196 | str_kernel_pid = str(kernel_process.pid)
197 | os.makedirs(config.KERNEL_PID_DIR, exist_ok=True)
198 | with open(os.path.join(config.KERNEL_PID_DIR, str_kernel_pid + ".pid"), "w") as p:
199 | p.write("kernel")
200 |
201 | # Wait for kernel connection file to be written
202 | while True:
203 | if not os.path.isfile(kernel_connection_file):
204 | sleep(0.1)
205 | else:
206 | # Keep looping if JSON parsing fails, file may be partially written
207 | try:
208 | with open(kernel_connection_file, 'r') as fp:
209 | json.load(fp)
210 | break
211 | except json.JSONDecodeError:
212 | pass
213 |
214 | # Client
215 | kc = BlockingKernelClient(connection_file=kernel_connection_file)
216 | kc.load_connection_file()
217 | kc.start_channels()
218 | kc.wait_for_ready()
219 | return kc
220 |
221 |
222 | if __name__ == "__main__":
223 | kc = start_kernel()
224 | start_snakemq(kc)
--------------------------------------------------------------------------------
/frontend/src/App.tsx:
--------------------------------------------------------------------------------
1 | import "./App.css";
2 | import Input from "./components/Input";
3 | import Sidebar from "./components/Sidebar";
4 | import Chat, { WaitingStates } from "./components/Chat";
5 | import React, { useState, useEffect } from "react";
6 | import Config from "./config";
7 | import { useLocalStorage } from "usehooks-ts";
8 |
9 | export type MessageDict = {
10 | text: string;
11 | role: string;
12 | type: string;
13 | };
14 |
15 | function App() {
16 | const COMMANDS = ["reset"];
17 |
18 | let [MODELS, setModels] = useState([{displayName: "GPT-3.5", name: "gpt-3.5-turbo"}]);
19 |
20 | useEffect(() => {
21 | const getModels = async () => {
22 | try {
23 | const response = await fetch(`${Config.WEB_ADDRESS}/models`);
24 | const json = await response.json();
25 | setModels(json);
26 | } catch (e) {
27 | console.error(e);
28 | };
29 | };
30 |
31 | getModels();
32 | }, []);
33 |
34 | let [selectedModel, setSelectedModel] = useLocalStorage(
35 | "model",
36 | MODELS[0].name
37 | );
38 |
39 | let [openAIKey, setOpenAIKey] = useLocalStorage("OpenAIKey", "");
40 |
41 | let [messages, setMessages] = useState>(
42 | Array.from([
43 | {
44 | text: "Hello! I'm a GPT Code assistant. Ask me to do something for you! Pro tip: you can upload a file and I'll be able to use it.",
45 | role: "generator",
46 | type: "message",
47 | },
48 | {
49 | text: "If I get stuck just type 'reset' and I'll restart the kernel.",
50 | role: "generator",
51 | type: "message",
52 | },
53 | ])
54 | );
55 | let [waitingForSystem, setWaitingForSystem] = useState(
56 | WaitingStates.Idle
57 | );
58 | const chatScrollRef = React.useRef(null);
59 |
60 | const submitCode = async (code: string) => {
61 | fetch(`${Config.API_ADDRESS}/api`, {
62 | method: "POST",
63 | headers: {
64 | "Content-Type": "application/json",
65 | },
66 | body: JSON.stringify({ command: code }),
67 | })
68 | .then(() => {})
69 | .catch((error) => console.error("Error:", error));
70 | };
71 |
72 | const addMessage = (message: MessageDict) => {
73 | setMessages((state: any) => {
74 | return [...state, message];
75 | });
76 | };
77 |
78 | const handleCommand = (command: string) => {
79 | if (command == "reset") {
80 | addMessage({ text: "Restarting the kernel.", type: "message", role: "system" });
81 |
82 | fetch(`${Config.API_ADDRESS}/restart`, {
83 | method: "POST",
84 | headers: {
85 | "Content-Type": "application/json",
86 | },
87 | body: JSON.stringify({}),
88 | })
89 | .then(() => {})
90 | .catch((error) => console.error("Error:", error));
91 | }
92 | };
93 |
94 | const sendMessage = async (userInput: string) => {
95 | try {
96 | if (COMMANDS.includes(userInput)) {
97 | handleCommand(userInput);
98 | return;
99 | }
100 |
101 | if (userInput.length == 0) {
102 | return;
103 | }
104 |
105 | addMessage({ text: userInput, type: "message", role: "user" });
106 | setWaitingForSystem(WaitingStates.GeneratingCode);
107 |
108 | const response = await fetch(`${Config.WEB_ADDRESS}/generate`, {
109 | method: "POST",
110 | headers: {
111 | "Content-Type": "application/json",
112 | },
113 | body: JSON.stringify({
114 | prompt: userInput,
115 | model: selectedModel,
116 | openAIKey: openAIKey,
117 | }),
118 | });
119 |
120 | const data = await response.json();
121 | const code = data.code;
122 |
123 | addMessage({ text: data.text, type: "message", role: "generator" });
124 |
125 | if (response.status != 200) {
126 | setWaitingForSystem(WaitingStates.Idle);
127 | return;
128 | }
129 |
130 | if (!!code) {
131 | submitCode(code);
132 | setWaitingForSystem(WaitingStates.RunningCode);
133 | } else {
134 | setWaitingForSystem(WaitingStates.Idle);
135 | }
136 | } catch (error) {
137 | console.error(
138 | "There has been a problem with your fetch operation:",
139 | error
140 | );
141 | }
142 | };
143 |
144 | async function getApiData() {
145 | if(document.hidden){
146 | return;
147 | }
148 |
149 | let response = await fetch(`${Config.API_ADDRESS}/api`);
150 | let data = await response.json();
151 | data.results.forEach(function (result: {value: string, type: string}) {
152 | if (result.value.trim().length == 0) {
153 | return;
154 | }
155 |
156 | addMessage({ text: result.value, type: result.type, role: "system" });
157 | setWaitingForSystem(WaitingStates.Idle);
158 | });
159 | }
160 |
161 | function completeUpload(message: string) {
162 | addMessage({ text: message, type: "message", role: "upload" });
163 | setWaitingForSystem(WaitingStates.Idle);
164 |
165 | // Inform prompt server
166 | fetch(`${Config.WEB_ADDRESS}/inject-context`, {
167 | method: "POST",
168 | headers: {
169 | "Content-Type": "application/json",
170 | },
171 | body: JSON.stringify({
172 | prompt: message,
173 | }),
174 | })
175 | .then(() => {})
176 | .catch((error) => console.error("Error:", error));
177 | }
178 |
179 | function startUpload(_: string) {
180 | setWaitingForSystem(WaitingStates.UploadingFile);
181 | }
182 |
183 | React.useEffect(() => {
184 | const interval = setInterval(getApiData, 1000);
185 | return () => clearInterval(interval);
186 | }, [getApiData]);
187 |
188 | React.useEffect(() => {
189 | // Scroll down container by setting scrollTop to the height of the container
190 | chatScrollRef.current!.scrollTop = chatScrollRef.current!.scrollHeight;
191 | }, [chatScrollRef, messages]);
192 |
193 |
194 | // Capture clicks for download links
195 | React.useEffect(() => {
196 | const clickHandler = (event: any) => {
197 | let element = event.target;
198 |
199 | // If an element was found, prevent default action and do something else
200 | if (element != null && element.tagName === 'A') {
201 | // Check if href starts with /download
202 |
203 | if (element.getAttribute("href").startsWith(`/download`)) {
204 | event.preventDefault();
205 |
206 | // Make request to ${Config.WEB_ADDRESS}/download instead
207 | // make it by opening a new tab
208 | window.open(`${Config.WEB_ADDRESS}${element.getAttribute("href")}`);
209 | }
210 | }
211 | };
212 |
213 | // Add the click event listener to the document
214 | document.addEventListener('click', clickHandler);
215 |
216 | // Cleanup function to remove the event listener when the component unmounts
217 | return () => {
218 | document.removeEventListener('click', clickHandler);
219 | };
220 | }, []);
221 |
222 | return (
223 | <>
224 |
249 | >
250 | );
251 | }
252 |
253 | export default App;
254 |
--------------------------------------------------------------------------------
/gpt_code_ui/webapp/main.py:
--------------------------------------------------------------------------------
1 | # The GPT web UI as a template based Flask app
2 | import os
3 | import requests
4 | import asyncio
5 | import json
6 | import re
7 | import logging
8 | import sys
9 | import openai
10 | import pandas as pd
11 |
12 | from collections import deque
13 |
14 | from flask_cors import CORS
15 | from flask import Flask, request, jsonify, send_from_directory, Response
16 | from dotenv import load_dotenv
17 |
18 | from gpt_code_ui.kernel_program.main import APP_PORT as KERNEL_APP_PORT
19 |
20 | load_dotenv('.env')
21 |
22 | openai.api_version = os.environ.get("OPENAI_API_VERSION")
23 | openai.log = os.getenv("OPENAI_API_LOGLEVEL")
24 | OPENAI_EXTRA_HEADERS = json.loads(os.environ.get("OPENAI_EXTRA_HEADERS", "{}"))
25 |
26 | if openai.api_type == "open_ai":
27 | AVAILABLE_MODELS = json.loads(os.environ.get("OPENAI_MODELS", '''[{"displayName": "GPT-3.5", "name": "gpt-3.5-turbo"}, {"displayName": "GPT-4", "name": "gpt-4"}]'''))
28 | elif openai.api_type == "azure":
29 | try:
30 | AVAILABLE_MODELS = json.loads(os.environ["AZURE_OPENAI_DEPLOYMENTS"])
31 | except KeyError as e:
32 | raise RuntimeError('AZURE_OPENAI_DEPLOYMENTS environment variable not set') from e
33 | else:
34 | raise ValueError(f'Invalid OPENAI_API_TYPE: {openai.api_type}')
35 |
36 | UPLOAD_FOLDER = 'workspace/'
37 | os.makedirs(UPLOAD_FOLDER, exist_ok=True)
38 |
39 |
40 | APP_PORT = int(os.environ.get("WEB_PORT", 8080))
41 |
42 |
43 | class LimitedLengthString:
44 | def __init__(self, maxlen=2000):
45 | self.data = deque()
46 | self.len = 0
47 | self.maxlen = maxlen
48 |
49 | def append(self, string):
50 | self.data.append(string)
51 | self.len += len(string)
52 | while self.len > self.maxlen:
53 | popped = self.data.popleft()
54 | self.len -= len(popped)
55 |
56 | def get_string(self):
57 | result = ''.join(self.data)
58 | return result[-self.maxlen:]
59 |
60 |
61 | message_buffer = LimitedLengthString()
62 |
63 |
64 | def allowed_file(filename):
65 | return True
66 |
67 |
68 | def inspect_file(filename: str) -> str:
69 | READER_MAP = {
70 | '.csv': pd.read_csv,
71 | '.tsv': pd.read_csv,
72 | '.xlsx': pd.read_excel,
73 | '.xls': pd.read_excel,
74 | '.xml': pd.read_xml,
75 | '.json': pd.read_json,
76 | '.hdf': pd.read_hdf,
77 | '.hdf5': pd.read_hdf,
78 | '.feather': pd.read_feather,
79 | '.parquet': pd.read_parquet,
80 | '.pkl': pd.read_pickle,
81 | '.sql': pd.read_sql,
82 | }
83 |
84 | _, ext = os.path.splitext(filename)
85 |
86 | try:
87 | df = READER_MAP[ext.lower()](filename)
88 | return f'The file contains the following columns: {", ".join(df.columns)}'
89 | except KeyError:
90 | return '' # unsupported file type
91 | except Exception:
92 | return '' # file reading failed. - Don't want to know why.
93 |
94 |
95 | async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):
96 |
97 | prompt = f"""First, here is a history of what I asked you to do earlier.
98 | The actual prompt follows after ENDOFHISTORY.
99 | History:
100 | {message_buffer.get_string()}
101 | ENDOFHISTORY.
102 | Write Python code, in a triple backtick Markdown code block, that does the following:
103 | {user_prompt}
104 |
105 | Notes:
106 | First, think step by step what you want to do and write it down in English.
107 | Then generate valid Python code in a code block
108 | Make sure all code is valid - it be run in a Jupyter Python 3 kernel environment.
109 | Define every variable before you use it.
110 | For data munging, you can use
111 | 'numpy', # numpy==1.24.3
112 | 'dateparser' #dateparser==1.1.8
113 | 'pandas', # matplotlib==1.5.3
114 | 'geopandas' # geopandas==0.13.2
115 | For pdf extraction, you can use
116 | 'PyPDF2', # PyPDF2==3.0.1
117 | 'pdfminer', # pdfminer==20191125
118 | 'pdfplumber', # pdfplumber==0.9.0
119 | For data visualization, you can use
120 | 'matplotlib', # matplotlib==3.7.1
121 | Be sure to generate charts with matplotlib. If you need geographical charts, use geopandas with the geopandas.datasets module.
122 | If the user has just uploaded a file, focus on the file that was most recently uploaded (and optionally all previously uploaded files)
123 |
124 | Teacher mode: if the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: Download file . Replace INSERT_FILENAME_HERE with the actual filename."""
125 |
126 | if user_openai_key:
127 | openai.api_key = user_openai_key
128 |
129 | arguments = dict(
130 | temperature=0.7,
131 | headers=OPENAI_EXTRA_HEADERS,
132 | messages=[
133 | # {"role": "system", "content": system},
134 | {"role": "user", "content": prompt},
135 | ]
136 | )
137 |
138 | if openai.api_type == 'open_ai':
139 | arguments["model"] = model
140 | elif openai.api_type == 'azure':
141 | arguments["deployment_id"] = model
142 | else:
143 | return None, f"Error: Invalid OPENAI_PROVIDER: {openai.api_type}", 500
144 |
145 | try:
146 | result_GPT = openai.ChatCompletion.create(**arguments)
147 |
148 | if 'error' in result_GPT:
149 | raise openai.APIError(code=result_GPT.error.code, message=result_GPT.error.message)
150 |
151 | if result_GPT.choices[0].finish_reason == 'content_filter':
152 | raise openai.APIError('Content Filter')
153 |
154 | except openai.OpenAIError as e:
155 | return None, f"Error from API: {e}", 500
156 |
157 | try:
158 | content = result_GPT.choices[0].message.content
159 |
160 | except AttributeError:
161 | return None, f"Malformed answer from API: {content}", 500
162 |
163 | def extract_code(text):
164 | # Match triple backtick blocks first
165 | triple_match = re.search(r'```(?:\w+\n)?(.+?)```', text, re.DOTALL)
166 | if triple_match:
167 | return triple_match.group(1).strip()
168 | else:
169 | # If no triple backtick blocks, match single backtick blocks
170 | single_match = re.search(r'`(.+?)`', text, re.DOTALL)
171 | if single_match:
172 | return single_match.group(1).strip()
173 |
174 | return extract_code(content), content.strip(), 200
175 |
176 | # We know this Flask app is for local use. So we can disable the verbose Werkzeug logger
177 | log = logging.getLogger('werkzeug')
178 | log.setLevel(logging.ERROR)
179 |
180 | cli = sys.modules['flask.cli']
181 | cli.show_server_banner = lambda *x: None
182 |
183 | app = Flask(__name__)
184 | app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
185 |
186 | CORS(app)
187 |
188 |
189 | @app.route('/')
190 | def index():
191 |
192 | # Check if index.html exists in the static folder
193 | if not os.path.exists(os.path.join(app.root_path, 'static/index.html')):
194 | print("index.html not found in static folder. Exiting. Did you forget to run `make compile_frontend` before installing the local package?")
195 |
196 | return send_from_directory('static', 'index.html')
197 |
198 |
199 | @app.route("/models")
200 | def models():
201 | return jsonify(AVAILABLE_MODELS)
202 |
203 |
204 | @app.route('/api/', methods=["GET", "POST"])
205 | def proxy_kernel_manager(path):
206 | if request.method == "POST":
207 | resp = requests.post(
208 | f'http://localhost:{KERNEL_APP_PORT}/{path}', json=request.get_json())
209 | else:
210 | resp = requests.get(f'http://localhost:{KERNEL_APP_PORT}/{path}')
211 |
212 | excluded_headers = ['content-encoding',
213 | 'content-length', 'transfer-encoding', 'connection']
214 | headers = [(name, value) for (name, value) in resp.raw.headers.items()
215 | if name.lower() not in excluded_headers]
216 |
217 | response = Response(resp.content, resp.status_code, headers)
218 | return response
219 |
220 |
221 | @app.route('/assets/')
222 | def serve_static(path):
223 | return send_from_directory('static/assets/', path)
224 |
225 |
226 | @app.route('/download')
227 | def download_file():
228 |
229 | # Get query argument file
230 | file = request.args.get('file')
231 | # from `workspace/` send the file
232 | # make sure to set required headers to make it download the file
233 | return send_from_directory(os.path.join(os.getcwd(), 'workspace'), file, as_attachment=True)
234 |
235 |
236 | @app.route('/inject-context', methods=['POST'])
237 | def inject_context():
238 | user_prompt = request.json.get('prompt', '')
239 |
240 | # Append all messages to the message buffer for later use
241 | message_buffer.append(user_prompt + "\n\n")
242 |
243 | return jsonify({"result": "success"})
244 |
245 |
246 | @app.route('/generate', methods=['POST'])
247 | def generate_code():
248 | user_prompt = request.json.get('prompt', '')
249 | user_openai_key = request.json.get('openAIKey', None)
250 | model = request.json.get('model', None)
251 |
252 | loop = asyncio.new_event_loop()
253 | asyncio.set_event_loop(loop)
254 |
255 | code, text, status = loop.run_until_complete(
256 | get_code(user_prompt, user_openai_key, model))
257 | loop.close()
258 |
259 | # Append all messages to the message buffer for later use
260 | message_buffer.append(user_prompt + "\n\n")
261 |
262 | return jsonify({'code': code, 'text': text}), status
263 |
264 |
265 | @app.route('/upload', methods=['POST'])
266 | def upload_file():
267 | # check if the post request has the file part
268 | if 'file' not in request.files:
269 | return jsonify({'error': 'No file part in the request'}), 400
270 | file = request.files['file']
271 | # if user does not select file, browser also
272 | # submit an empty part without filename
273 | if file.filename == '':
274 | return jsonify({'error': 'No selected file'}), 400
275 | if file and allowed_file(file.filename):
276 | file_target = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
277 | file.save(file_target)
278 | file_info = inspect_file(file_target)
279 | return jsonify({'message': f'File {file.filename} uploaded successfully.\n{file_info}'}), 200
280 | else:
281 | return jsonify({'error': 'File type not allowed'}), 400
282 |
283 |
284 | if __name__ == '__main__':
285 | app.run(host="0.0.0.0", port=APP_PORT, debug=True, use_reloader=False)
286 |
--------------------------------------------------------------------------------