├── bottybot.bat
├── bottybot.sh
├── images
├── ui.png
├── bottalk.jpg
├── bot_config.png
├── bot_library.png
└── bottybotmodels.png
├── static
└── favicon.ico
├── wsgi.py
├── Dockerfile
├── sample.env
├── templates
├── login.html
├── augment.html
├── library.html
├── index.html
├── history.html
├── config.html
└── bot_base.html
├── requirements.txt
├── LICENSE
├── README.md
├── models_config.json
├── .gitignore
├── bots.json
└── app.py
/bottybot.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | flask run -p 7860 --host 0.0.0.0
--------------------------------------------------------------------------------
/bottybot.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | python3 -m flask run -p 7860 --host 0.0.0.0
3 |
--------------------------------------------------------------------------------
/images/ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/patw/Bottybot/HEAD/images/ui.png
--------------------------------------------------------------------------------
/images/bottalk.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/patw/Bottybot/HEAD/images/bottalk.jpg
--------------------------------------------------------------------------------
/static/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/patw/Bottybot/HEAD/static/favicon.ico
--------------------------------------------------------------------------------
/images/bot_config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/patw/Bottybot/HEAD/images/bot_config.png
--------------------------------------------------------------------------------
/images/bot_library.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/patw/Bottybot/HEAD/images/bot_library.png
--------------------------------------------------------------------------------
/images/bottybotmodels.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/patw/Bottybot/HEAD/images/bottybotmodels.png
--------------------------------------------------------------------------------
/wsgi.py:
--------------------------------------------------------------------------------
1 | # This is used for hosting on a WSGI compliant server
2 | # gUnicorn seems to work fine
3 |
4 | from app import app
5 |
6 | if __name__ == '__main__':
7 | app.run(debug=False)
8 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.10
2 | WORKDIR /app
3 | COPY requirements.txt .
4 | RUN pip install --no-cache-dir -r requirements.txt
5 | COPY . .
6 | CMD [ "python3", "-m" , "flask", "run", "--host=0.0.0.0"]
7 |
--------------------------------------------------------------------------------
/sample.env:
--------------------------------------------------------------------------------
1 | SECRET_KEY=whateverbutmakeitlongandcomplex
2 | BOTTY_KEY=forAPICallingBottyBot
3 | USERS={"user1":"pass1", "user2":"pass2"}
4 | LOCAL_MODELS={"local-llama3-8b": "http://127.0.0.1:8080/v1"}
5 | OPENAI_API_KEY=sk-8Kxxxxxxxxxx
6 | MISTRAL_API_KEY=xxxxxxxxx
7 | ANTHROPIC_API_KEY=sk-ant-xxxxxxxx
8 | CEREBRAS_API_KEY=csk-xxxxxxxxxx
9 | GEMINI_API_KEY=xxxxxxxxx
10 | DEEPSEEK_API_KEY=xxxxxx
--------------------------------------------------------------------------------
/templates/login.html:
--------------------------------------------------------------------------------
1 | {% extends 'bootstrap/base.html' %}
2 | {% import "bootstrap/wtf.html" as wtf %}
3 |
4 | {% block content %}
5 |
6 |
7 |
8 |
9 |
10 |
BottyBot Login
11 | {{ wtf.quick_form(form) }}
12 |
13 |
14 |
15 |
16 |
17 | {% endblock %}
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # This file is used by pip to install required python packages
2 | # Usage: pip install -r requirements.txt
3 |
4 | # Flask Framework
5 | Flask
6 | Flask-WTF
7 | Flask-Bootstrap4
8 |
9 | # Environment file handling
10 | python-dotenv
11 |
12 | # REST Stuff
13 | requests
14 |
15 | # Rendering stuff
16 | misaka
17 |
18 | # OpenAI
19 | openai
20 |
21 | # Mistral AI
22 | mistralai
23 |
24 | # Anthropic
25 | anthropic
26 |
27 | # Cerebras
28 | cerebras_cloud_sdk
29 |
--------------------------------------------------------------------------------
/templates/augment.html:
--------------------------------------------------------------------------------
1 | {% extends 'bot_base.html' %}
2 | {% import "bootstrap/wtf.html" as wtf %}
3 |
4 | {% block content %}
5 |
6 |
7 |
8 | {{ wtf.quick_form(form) }}
9 |
10 |
Augmentation allows you to add this text to the prompt before the LLM is called.
11 | It's useful for pasting in documents to ask questions about it.
12 |
13 |
14 |
15 | {% endblock %}
--------------------------------------------------------------------------------
/templates/library.html:
--------------------------------------------------------------------------------
1 | {% extends 'bot_base.html' %}
2 | {% import "bootstrap/wtf.html" as wtf %}
3 |
4 | {% block content %}
5 |
6 |
7 |
8 | {{ wtf.quick_form(form) }}
9 |
10 |
Bot Library
11 |
12 |
13 | The Bot Library is used to select a pre-made bot which can be found in the bots.json file located
14 | in your BottyBot installation. Feel free to edit this file to include whatever personalities you enjoy talking
15 | to.
16 |
17 |
18 |
19 |
20 | {% endblock %}
--------------------------------------------------------------------------------
/templates/index.html:
--------------------------------------------------------------------------------
1 | {% extends 'bot_base.html' %}
2 | {% import "bootstrap/wtf.html" as wtf %}
3 |
4 | {% block content %}
5 |
6 |
7 |
8 |
9 |
10 | {% for response in history %}
11 |
12 | {{ response.user }}
13 | {{ response.text | safe }}
14 |
15 | {% endfor %}
16 |
17 |
18 | {{ wtf.quick_form(form) }}
19 |
20 |
21 |
22 |
23 |
24 |
33 |
34 | {% endblock %}
--------------------------------------------------------------------------------
/templates/history.html:
--------------------------------------------------------------------------------
1 | {% extends 'bootstrap/base.html' %}
2 |
3 | {% block title %}
4 | Botty Bot
5 | {% endblock %}
6 |
7 | {% block head %}
8 | {{super()}}
9 |
10 | {% endblock %}
11 |
12 | {% block styles %}
13 | {{ super() }}
14 |
19 | {% endblock %}
20 |
21 | {% block content %}
22 |
23 |
24 |
25 | Conversation between {{ user }} and {{ bot }} on {{ date }}
26 |
27 |
28 |
29 |
30 |
31 |
32 | {% for response in history %}
33 |
34 | {{ response.user }}
35 | {{ response.text | safe }}
36 |
37 | {% endfor %}
38 |
39 |
40 |
41 |
42 |
43 | {% endblock %}
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 2-Clause License
2 |
3 | Copyright (c) 2023, Pat Wendorf
4 |
5 | Redistribution and use in source and binary forms, with or without
6 | modification, are permitted provided that the following conditions are met:
7 |
8 | 1. Redistributions of source code must retain the above copyright notice, this
9 | list of conditions and the following disclaimer.
10 |
11 | 2. Redistributions in binary form must reproduce the above copyright notice,
12 | this list of conditions and the following disclaimer in the documentation
13 | and/or other materials provided with the distribution.
14 |
15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 |
--------------------------------------------------------------------------------
/templates/config.html:
--------------------------------------------------------------------------------
1 | {% extends 'bot_base.html' %}
2 | {% import "bootstrap/wtf.html" as wtf %}
3 |
4 | {% block content %}
5 |
6 |
7 |
8 | {{ wtf.quick_form(form) }}
9 |
10 |
11 |
12 | Feature
13 | Purpose
14 |
15 | Name
16 | This is the name you've given the bot identity. Bots work better when the name reflects the identity.
17 |
18 |
19 | Identity
20 | This describes the bot's personality and how it responds to prompts. You can make it as helpful or spicy as you'd like!
21 |
22 |
23 | Temperature
24 | This is a tuning value for the creativity of the bot output. Put the value to 0.1 to 0.3 for more informational personalities and 0.6-0.8 for more creative discussions.
25 |
26 |
27 |
28 |
Reset Bot to Wizard
29 |
30 |
31 |
32 | {% endblock %}
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # BottyBot
2 |
3 | 
4 |
5 | A simple UI for debugging prompts across multiple models. Supports local models (llama.cpp server or ollama server), as well as OpenAI, Anthropic, Deepseek, Google Gemini, Mistral and Cerebras. Supports loadable system messages for bot identity.
6 |
7 | 
8 | 
9 | 
10 | 
11 |
12 | ## Local Installation
13 |
14 | ```
15 | pip install -r requirements.txt
16 | ```
17 |
18 | ## Docker Installation
19 |
20 | ```
21 | git pull https://github.com/patw/Bottybot.git
22 | cd Bottybot
23 | ```
24 |
25 | Follow the instructions below on configuring the .env and model.json
26 |
27 | ```
28 | docker build -t bottybot .
29 | docker run -d -p 7860:5000 bottybot
30 | ```
31 |
32 | ## Configuration
33 |
34 | Copy the sample.env file to .env and create a proper user/password. Add API keys for various providers, but be sure to remove the saample keys for ones you are not currently using! LOCAL_MODELS takes dict values of "local-": ""
35 |
36 | ## Downloading an LLM model
37 |
38 | The easiest way to do this is with ollama, consult their documentation on downloading a model.
39 |
40 | ## Running BottyBot
41 |
42 | ```
43 | flask run
44 | ```
45 |
46 | ### Optionally you can run this so your entire network can access it
47 |
48 | ```
49 | flask run -p 5000 --host 0.0.0.0
50 | ```
51 |
52 | This starts the process on port 5000 and accessible on any network interface
53 |
54 | ## Accessing BottyBot
55 |
56 | http://localhost:5000
--------------------------------------------------------------------------------
/templates/bot_base.html:
--------------------------------------------------------------------------------
1 | {% extends 'bootstrap/base.html' %}
2 |
3 | {% block title %}
4 | Botty Bot
5 | {% endblock %}
6 |
7 | {% block head %}
8 | {{super()}}
9 |
10 | {% endblock %}
11 |
12 | {% block styles %}
13 | {{ super() }}
14 |
19 | {% endblock %}
20 |
21 | {% block navbar %}
22 |
23 |
24 | 🤖 BottyBot
25 |
26 |
27 |
28 |
29 |
48 |
51 |
52 | {% endblock %}
53 |
--------------------------------------------------------------------------------
/models_config.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "gpt-4.1",
4 | "provider": "openai",
5 | "api_key_env": "OPENAI_API_KEY"
6 | },
7 | {
8 | "name": "gpt-4.1-mini",
9 | "provider": "openai",
10 | "api_key_env": "OPENAI_API_KEY"
11 | },
12 | {
13 | "name": "gpt-4.1-nano",
14 | "provider": "openai",
15 | "api_key_env": "OPENAI_API_KEY"
16 | },
17 | {
18 | "name": "o4-mini",
19 | "provider": "openai",
20 | "api_key_env": "OPENAI_API_KEY"
21 | },
22 | {
23 | "name": "mistral-small-latest",
24 | "provider": "mistral",
25 | "api_key_env": "MISTRAL_API_KEY"
26 | },
27 | {
28 | "name": "mistral-medium-latest",
29 | "provider": "mistral",
30 | "api_key_env": "MISTRAL_API_KEY"
31 | },
32 | {
33 | "name": "mistral-large-latest",
34 | "provider": "mistral",
35 | "api_key_env": "MISTRAL_API_KEY"
36 | },
37 | {
38 | "name": "claude-3-5-haiku-latest",
39 | "provider": "anthropic",
40 | "api_key_env": "ANTHROPIC_API_KEY"
41 | },
42 | {
43 | "name": "claude-sonnet-4-0",
44 | "provider": "anthropic",
45 | "api_key_env": "ANTHROPIC_API_KEY"
46 | },
47 | {
48 | "name": "claude-opus-4-0",
49 | "provider": "anthropic",
50 | "api_key_env": "ANTHROPIC_API_KEY"
51 | },
52 |
53 | {
54 | "name": "cerebras-llama3.1-8b",
55 | "provider": "cerebras",
56 | "api_key_env": "CEREBRAS_API_KEY"
57 | },
58 | {
59 | "name": "cerebras-llama-3.3-70b",
60 | "provider": "cerebras",
61 | "api_key_env": "CEREBRAS_API_KEY"
62 | },
63 | {
64 | "name": "cerebras-qwen-3-32b",
65 | "provider": "cerebras",
66 | "api_key_env": "CEREBRAS_API_KEY"
67 | },
68 | {
69 | "name": "gemini-2.5-flash-preview-05-20",
70 | "provider": "gemini",
71 | "api_key_env": "GEMINI_API_KEY",
72 | "base_url": "https://generativelanguage.googleapis.com/v1beta/"
73 | },
74 | {
75 | "name": "gemini-2.5-pro-preview-05-06",
76 | "provider": "gemini",
77 | "api_key_env": "GEMINI_API_KEY",
78 | "base_url": "https://generativelanguage.googleapis.com/v1beta/"
79 | },
80 | {
81 | "name": "deepseek-chat",
82 | "provider": "deepseek",
83 | "api_key_env": "DEEPSEEK_API_KEY",
84 | "base_url": "https://api.deepseek.com"
85 | },
86 | {
87 | "name": "deepseek-reasoner",
88 | "provider": "deepseek",
89 | "api_key_env": "DEEPSEEK_API_KEY",
90 | "base_url": "https://api.deepseek.com"
91 | }
92 | ]
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Config and history files
10 | *-bot.json
11 | *-history.json
12 |
13 | # Distribution / packaging
14 | .Python
15 | build/
16 | develop-eggs/
17 | dist/
18 | downloads/
19 | eggs/
20 | .eggs/
21 | lib/
22 | lib64/
23 | parts/
24 | sdist/
25 | var/
26 | wheels/
27 | pip-wheel-metadata/
28 | share/python-wheels/
29 | *.egg-info/
30 | .installed.cfg
31 | *.egg
32 | MANIFEST
33 |
34 | # PyInstaller
35 | # Usually these files are written by a python script from a template
36 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
37 | *.manifest
38 | *.spec
39 |
40 | # Installer logs
41 | pip-log.txt
42 | pip-delete-this-directory.txt
43 |
44 | # Unit test / coverage reports
45 | htmlcov/
46 | .tox/
47 | .nox/
48 | .coverage
49 | .coverage.*
50 | .cache
51 | nosetests.xml
52 | coverage.xml
53 | *.cover
54 | *.py,cover
55 | .hypothesis/
56 | .pytest_cache/
57 |
58 | # Translations
59 | *.mo
60 | *.pot
61 |
62 | # Django stuff:
63 | *.log
64 | local_settings.py
65 | db.sqlite3
66 | db.sqlite3-journal
67 |
68 | # Flask stuff:
69 | instance/
70 | .webassets-cache
71 |
72 | # Scrapy stuff:
73 | .scrapy
74 |
75 | # Sphinx documentation
76 | docs/_build/
77 |
78 | # PyBuilder
79 | target/
80 |
81 | # Jupyter Notebook
82 | .ipynb_checkpoints
83 |
84 | # IPython
85 | profile_default/
86 | ipython_config.py
87 |
88 | # pyenv
89 | .python-version
90 |
91 | # pipenv
92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
95 | # install all needed dependencies.
96 | #Pipfile.lock
97 |
98 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
99 | __pypackages__/
100 |
101 | # Celery stuff
102 | celerybeat-schedule
103 | celerybeat.pid
104 |
105 | # SageMath parsed files
106 | *.sage.py
107 |
108 | # Environments
109 | .env
110 | .venv
111 | env/
112 | venv/
113 | ENV/
114 | env.bak/
115 | venv.bak/
116 |
117 | # Spyder project settings
118 | .spyderproject
119 | .spyproject
120 |
121 | # Rope project settings
122 | .ropeproject
123 |
124 | # mkdocs documentation
125 | /site
126 |
127 | # mypy
128 | .mypy_cache/
129 | .dmypy.json
130 | dmypy.json
131 |
132 | # Pyre type checker
133 | .pyre/
134 |
--------------------------------------------------------------------------------
/bots.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "Wizard 🧙",
4 | "identity": "You are Wizard, a friendly chatbot. You help the user answer questions, solve problems and make plans. You think deeply about the question and provide a detailed, accurate response.",
5 | "temperature": "0.7"
6 | },
7 | {
8 | "name": "Life Coach Pro 🎯",
9 | "identity": "You are Life Coach Pro, a results-driven mentor who combines practical psychology, habit formation, and strategic planning to help users transform their lives. You provide actionable steps, accountability frameworks, and evidence-based strategies for personal development.",
10 | "temperature": "0.7"
11 | },
12 | {
13 | "name": "Tech Guru 💻",
14 | "identity": "You are Tech Guru, a knowledgeable technology expert who explains complex technical concepts in simple terms. You stay current with the latest tech trends and provide practical advice on everything from coding to cybersecurity to consumer electronics.",
15 | "temperature": "0.7"
16 | },
17 | {
18 | "name": "Financial Sage 💰",
19 | "identity": "You are Financial Sage, an experienced financial advisor who helps users understand personal finance, investing, and wealth building. You break down complex financial concepts and provide practical, actionable advice for all experience levels.",
20 | "temperature": "0.7"
21 | },
22 | {
23 | "name": "Relationship Expert 💕",
24 | "identity": "You are Relationship Expert, a compassionate counselor who helps users navigate relationships, dating, and interpersonal dynamics. You provide evidence-based advice while maintaining sensitivity to complex emotional situations.",
25 | "temperature": "0.7"
26 | },
27 | {
28 | "name": "Productivity Master ⚡",
29 | "identity": "You are Productivity Master, an efficiency expert who helps users optimize their work and life processes. You combine time management techniques, technology tools, and psychological insights to boost personal and professional productivity.",
30 | "temperature": "0.7"
31 | },
32 | {
33 | "name": "Language Mentor 🗣️",
34 | "identity": "You are Language Mentor, a polyglot teacher who helps users learn new languages effectively. You provide learning strategies, cultural insights, and practical conversation tips while making language acquisition fun and engaging.",
35 | "temperature": "0.7"
36 | },
37 | {
38 | "name": "Mental Wellness Guide 🧘",
39 | "identity": "You are Mental Wellness Guide, a supportive counselor who helps users maintain psychological well-being. You provide practical coping strategies, mindfulness techniques, and evidence-based approaches to managing stress and anxiety.",
40 | "temperature": "0.7"
41 | },
42 | {
43 | "name": "Career Strategist 📈",
44 | "identity": "You are Career Strategist, an experienced professional who helps users navigate their career paths. You provide guidance on job searching, skill development, networking, and professional growth while considering current market trends.",
45 | "temperature": "0.7"
46 | },
47 | {
48 | "name": "Creative Muse 🎨",
49 | "identity": "You are Creative Muse, an inspiring mentor who helps users unlock their creative potential. You provide unique prompts, brainstorming techniques, and artistic guidance while encouraging experimental thinking and original expression.",
50 | "temperature": "0.7"
51 | },
52 | {
53 | "name": "Debate Master 🎭",
54 | "identity": "You are Debate Master, a skilled rhetorician who helps users understand multiple perspectives on complex issues. You challenge assumptions, identify logical fallacies, and teach the art of constructive argumentation while maintaining objectivity.",
55 | "temperature": "0.7"
56 | }
57 | ]
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | # Basic flask stuff for building http APIs and rendering html templates
2 | from flask import Flask, render_template, redirect, url_for, request, session, jsonify
3 |
4 | # Bootstrap integration with flask so we can make pretty pages
5 | from flask_bootstrap import Bootstrap
6 |
7 | # Flask forms integrations which save insane amounts of time
8 | from flask_wtf import FlaskForm
9 | from wtforms import StringField, SubmitField, PasswordField, TextAreaField, IntegerField, FloatField, SelectField, BooleanField
10 | from wtforms.validators import DataRequired
11 |
12 | # Basic python stuff
13 | import os
14 | import json
15 | import functools
16 | import datetime
17 |
18 | # Some nice formatting for code
19 | import misaka
20 |
21 | # Need OpenAI for a few providers including local with llama.cpp server or ollama server
22 | from openai import OpenAI
23 |
24 | # Nice way to load environment variables for deployments
25 | from dotenv import load_dotenv
26 | load_dotenv()
27 |
28 | # The total amount of message history in the chat
29 | HISTORY_LENGTH = 8
30 |
31 | # Create the Flask app object
32 | app = Flask(__name__)
33 |
34 | # Session key
35 | app.config['SECRET_KEY'] = os.environ["SECRET_KEY"]
36 | app.config['SESSION_COOKIE_NAME'] = 'bottybot2'
37 |
38 | # BottyBot API Key for /api/chat endpoint
39 | BOTTY_KEY = os.environ["BOTTY_KEY"]
40 |
41 | # --- Model Configuration ---
42 | # Global dictionary to store initialized API clients
43 | clients = {}
44 | # Global list of available model names for the form
45 | available_models_for_form = []
46 | # Global dictionary for local model names and their base URLs
47 | local_model_urls = {}
48 |
49 | def initialize_models_and_clients():
50 | global available_models_for_form, clients, local_model_urls
51 |
52 | # 1. Handle Local Models from LOCAL_MODELS env var
53 | if "LOCAL_MODELS" in os.environ:
54 | try:
55 | local_models_config = json.loads(os.environ["LOCAL_MODELS"])
56 | for model_name, base_url in local_models_config.items():
57 | # Ensure local model names are added to the list and their URLs stored
58 | if model_name not in available_models_for_form:
59 | available_models_for_form.append(model_name)
60 | local_model_urls[model_name] = base_url
61 | except json.JSONDecodeError:
62 | print("Error decoding LOCAL_MODELS environment variable. Local models may not be available.")
63 |
64 | # 2. Handle models from models_config.json
65 | try:
66 | with open("models_config.json", 'r', encoding='utf-8') as f:
67 | external_model_configs = json.load(f)
68 | except FileNotFoundError:
69 | print("models_config.json not found. No additional external models will be configured.")
70 | external_model_configs = []
71 | except json.JSONDecodeError:
72 | print("Error decoding models_config.json. External models may not be configured correctly.")
73 | external_model_configs = []
74 |
75 | for config in external_model_configs:
76 | provider = config.get("provider")
77 | api_key_env = config.get("api_key_env")
78 | model_name = config.get("name")
79 | base_url_from_config = config.get("base_url")
80 |
81 | if not provider or not api_key_env or not model_name:
82 | print(f"Skipping invalid model configuration in models_config.json: {config}")
83 | continue
84 |
85 | if api_key_env in os.environ:
86 | api_key = os.environ[api_key_env]
87 |
88 | if model_name not in available_models_for_form:
89 | available_models_for_form.append(model_name)
90 |
91 | # Initialize client if not already done for this provider
92 | if provider == "openai" and "openai" not in clients:
93 | clients["openai"] = OpenAI(api_key=api_key)
94 | elif provider == "mistral" and "mistral" not in clients:
95 | from mistralai import Mistral
96 | clients["mistral"] = Mistral(api_key=api_key)
97 | elif provider == "anthropic" and "anthropic" not in clients:
98 | import anthropic
99 | clients["anthropic"] = anthropic.Anthropic(api_key=api_key)
100 | elif provider == "cerebras" and "cerebras" not in clients:
101 | from cerebras.cloud.sdk import Cerebras
102 | clients["cerebras"] = Cerebras(api_key=api_key)
103 | elif provider == "gemini" and "gemini" not in clients:
104 | if base_url_from_config:
105 | clients["gemini"] = OpenAI(api_key=api_key, base_url=base_url_from_config)
106 | else:
107 | print(f"Warning: Gemini model {model_name} missing 'base_url' in config. It may not be available.")
108 | if model_name in available_models_for_form: available_models_for_form.remove(model_name)
109 | elif provider == "deepseek" and "deepseek" not in clients:
110 | if base_url_from_config:
111 | clients["deepseek"] = OpenAI(api_key=api_key, base_url=base_url_from_config)
112 | else:
113 | print(f"Warning: DeepSeek model {model_name} missing 'base_url' in config. It may not be available.")
114 | if model_name in available_models_for_form: available_models_for_form.remove(model_name)
115 | else:
116 | # API key not found, so this model isn't available
117 | # print(f"API key {api_key_env} for {model_name} not found. Model will not be available.")
118 | if model_name in available_models_for_form:
119 | available_models_for_form.remove(model_name) # Remove if its API key is missing
120 |
121 | available_models_for_form = sorted(list(set(available_models_for_form)))
122 |
123 | # Load environment variables first
124 | load_dotenv()
125 |
126 | # Initialize models and clients after loading .env and before defining forms or routes
127 | initialize_models_and_clients()
128 | # --- End Model Configuration ---
129 |
130 | # User Auth
131 | users_string = os.environ["USERS"]
132 | users = json.loads(users_string)
133 |
134 | # Make it pretty because I can't :(
135 | Bootstrap(app)
136 |
137 | # Load the chat history array
138 | # Chat history looks like an array of events like {"user": "blah", "text": "How do I thing?"}
139 | def load_chat_history(file_path):
140 | try:
141 | with open(file_path, 'r', encoding='utf-8') as file:
142 | data = json.load(file)
143 | except FileNotFoundError:
144 | # If the file doesn't exist, return an empty history
145 | data = []
146 | except json.JSONDecodeError:
147 | # Handle JSON decoding errors if the file contains invalid JSON
148 | print(f"Error decoding JSON in file: {file_path}")
149 | data = []
150 | return data
151 |
152 | # Load the current bot config
153 | def load_bot_config(file_path):
154 |
155 | # Our default Wizard persona. Use this if there's no user defined config.
156 | data = {
157 | "name": "Wizard 🧙",
158 | "identity": "You are Wizard, a friendly chatbot. You help the user answer questions, solve problems and make plans. You think deeply about the question and provide a detailed, accurate response.",
159 | "temperature": "0.7"
160 | }
161 |
162 | # Load our user configured bot config if there is one.
163 | try:
164 | with open(file_path, 'r', encoding='utf-8') as file:
165 | data = json.load(file)
166 | except:
167 | pass
168 |
169 | return data
170 |
171 | # Load the bot library - A library of useful bots to talk to about different subjects
172 | def load_bot_library():
173 | data = []
174 | with open("bots.json", 'r', encoding='utf-8') as file:
175 | data = json.load(file)
176 | return data
177 |
178 | # Load the augmentation file if there is one. This is used to augment the prompt with additional data
179 | def load_augmentation(file_path):
180 | data = ""
181 | try:
182 | with open(file_path, 'r', encoding='utf-8') as file:
183 | data = json.load(file)
184 | except:
185 | pass
186 | return data
187 |
188 | # Output the whole history as a text blob
189 | def text_history(history):
190 | text_history = ""
191 | for item in history:
192 | text_history = text_history + item["user"] + ": " + item["text"] + "\n"
193 | return text_history
194 |
195 | def llm_proxy(prompt, bot_config, model_type):
196 | if model_type.startswith("local-"):
197 | return llm_local(prompt, model_type, bot_config)
198 | if model_type.startswith("mistral-") or model_type.startswith("ministral-"):
199 | return llm_mistral(prompt, model_type, bot_config)
200 | if model_type.startswith("gpt-") or model_type.startswith("chatgpt-"):
201 | return llm_oai(prompt, model_type, bot_config)
202 | if model_type.startswith("o4"):
203 | return llm_o1(prompt, model_type, bot_config)
204 | if model_type.startswith("claude-"):
205 | return llm_anthropic(prompt, model_type, bot_config)
206 | if model_type.startswith("cerebras-"):
207 | return llm_cerebras(prompt, model_type, bot_config)
208 | if model_type.startswith("gemini-"):
209 | return llm_gemini(prompt, model_type, bot_config)
210 | if model_type.startswith("deepseek-"):
211 | return llm_deepseek(prompt, model_type, bot_config)
212 |
213 | # Query local models
214 | def llm_local(prompt, model_name, bot_config):
215 | if model_name not in local_model_urls:
216 | return {"user": "error", "text": f"Local model '{model_name}' configuration not found or base URL is missing."}
217 | base_url = local_model_urls[model_name]
218 | try:
219 | client = OpenAI(api_key="doesntmatter", base_url=base_url)
220 | messages=[{"role": "system", "content": bot_config["identity"]},{"role": "user", "content": prompt}]
221 | response = client.chat.completions.create(max_tokens=4096, model=model_name, temperature=float(bot_config["temperature"]), messages=messages)
222 | user = bot_config["name"] + " " + model_name
223 | return {"user": user, "text": response.choices[0].message.content}
224 | except Exception as e:
225 | print(f"Error in llm_local for {model_name}: {e}")
226 | return {"user": "error", "text": f"Error with local model {model_name}: {e}"}
227 |
228 | # Query mistral models
229 | def llm_mistral(prompt, model_name, bot_config):
230 | mistral_client_instance = clients.get("mistral")
231 | if not mistral_client_instance:
232 | return {"user": "error", "text": "Mistral client not configured. Check MISTRAL_API_KEY."}
233 | messages=[{"role": "system", "content": bot_config["identity"]},{"role": "user", "content": prompt}]
234 | response = mistral_client_instance.chat.complete(model=model_name, temperature=float(bot_config["temperature"]), messages=messages)
235 | user = bot_config["name"] + " " + model_name
236 | return {"user": user, "text": response.choices[0].message.content}
237 |
238 | # Query OpenAI models
239 | def llm_oai(prompt, model_name, bot_config):
240 | oai_client_instance = clients.get("openai")
241 | if not oai_client_instance:
242 | return {"user": "error", "text": "OpenAI client not configured. Check OPENAI_API_KEY."}
243 | messages=[{"role": "system", "content": bot_config["identity"]},{"role": "user", "content": prompt}]
244 | response = oai_client_instance.chat.completions.create(model=model_name, temperature=float(bot_config["temperature"]), messages=messages)
245 | user = bot_config["name"] + " " + model_name
246 | return {"user": user, "text": response.choices[0].message.content}
247 |
248 | # Query OpenAI o1 models, without a system message. O1 class models don't support identities or temperature.
249 | def llm_o1(prompt, model_name, bot_config):
250 | oai_client_instance = clients.get("openai") # o1 models use the OpenAI client
251 | if not oai_client_instance:
252 | return {"user": "error", "text": "OpenAI client for o1 models not configured. Check OPENAI_API_KEY."}
253 | messages=[{"role": "user", "content": prompt}]
254 | response = oai_client_instance.chat.completions.create(model=model_name, messages=messages)
255 | user = bot_config["name"] + " " + model_name
256 | return {"user": user, "text": response.choices[0].message.content}
257 |
258 | # Query Anthropic models
259 | def llm_anthropic(prompt, model_name, bot_config):
260 | anthropic_client_instance = clients.get("anthropic")
261 | if not anthropic_client_instance:
262 | return {"user": "error", "text": "Anthropic client not configured. Check ANTHROPIC_API_KEY."}
263 | messages=[{"role": "user", "content": prompt}]
264 | response = anthropic_client_instance.messages.create(system=bot_config["identity"], max_tokens=8192, model=model_name, temperature=float(bot_config["temperature"]), messages=messages)
265 | user = bot_config["name"] + " " + model_name
266 | return {"user": user, "text": response.content[0].text}
267 |
268 | # Query Cerebras models
269 | def llm_cerebras(prompt, model_name, bot_config):
270 | cerebras_client_instance = clients.get("cerebras")
271 | if not cerebras_client_instance:
272 | return {"user": "error", "text": "Cerebras client not configured. Check CEREBRAS_API_KEY."}
273 |
274 | # model_name is the original (e.g., "cerebras-llama3.1-8b")
275 | # model_name_for_api will be the processed name for the API call (e.g., "llama3.1-8b")
276 | model_name_for_api = model_name.replace("cerebras-", "")
277 |
278 | messages=[{"role": "system", "content": bot_config["identity"]},{"role": "user", "content": prompt}]
279 | response = cerebras_client_instance.chat.completions.create(model=model_name_for_api, temperature=float(bot_config["temperature"]), messages=messages)
280 | # The user string in the return will use the original model_name for display consistency.
281 | return {"user": bot_config["name"] + " " + model_name, "text": response.choices[0].message.content}
282 |
283 | # Google Gemini
284 | def llm_gemini(prompt, model_name, bot_config):
285 | gemini_client_instance = clients.get("gemini")
286 | if not gemini_client_instance:
287 | return {"user": "error", "text": "Gemini client not configured. Check GEMINI_API_KEY and models_config.json for base_url."}
288 | messages=[{"role": "system", "content": bot_config["identity"]},{"role": "user", "content": prompt}]
289 | response = gemini_client_instance.chat.completions.create(model=model_name, temperature=float(bot_config["temperature"]), messages=messages)
290 | user = bot_config["name"] + " " + model_name
291 | return {"user": user, "text": response.choices[0].message.content}
292 |
293 | # Deepseek Chat (coding)
294 | def llm_deepseek(prompt, model_name, bot_config):
295 | deepseek_client_instance = clients.get("deepseek")
296 | if not deepseek_client_instance:
297 | return {"user": "error", "text": "DeepSeek client not configured. Check DEEPSEEK_API_KEY and models_config.json for base_url."}
298 |
299 | if model_name.endswith("-reasoner"):
300 | messages=[{"role": "user", "content": prompt}]
301 | response = deepseek_client_instance.chat.completions.create(model=model_name, messages=messages)
302 | else:
303 | messages=[{"role": "system", "content": bot_config["identity"]},{"role": "user", "content": prompt}]
304 | response = deepseek_client_instance.chat.completions.create(model=model_name, temperature=float(bot_config["temperature"]), messages=messages)
305 | user = bot_config["name"] + " " + model_name
306 | return {"user": user, "text": response.choices[0].message.content}
307 |
308 | # Flask forms is magic
309 | class PromptForm(FlaskForm):
310 | prompt = StringField('Prompt 💬', validators=[DataRequired()])
311 | model_type = SelectField('Model', choices=available_models_for_form, validators=[DataRequired()])
312 | raw_output = BooleanField('Raw Output')
313 | submit = SubmitField('Submit')
314 |
315 | # Config form for bot
316 | class BotConfigForm(FlaskForm):
317 | name = StringField('Name', validators=[DataRequired()])
318 | identity = TextAreaField('Identity', validators=[DataRequired()])
319 | temperature = FloatField('LLM Temperature', validators=[DataRequired()])
320 | submit = SubmitField('Save')
321 |
322 | # Bot library drop down and selection form
323 | class BotLibraryForm(FlaskForm):
324 | bot = SelectField('Select Premade Bot', choices=[], validators=[DataRequired()])
325 | load_bot = SubmitField('Load')
326 |
327 | # Augmentation edit/clear form
328 | class AugmentationForm(FlaskForm):
329 | augmentation = TextAreaField('Augmentation')
330 | save = SubmitField('Save')
331 | clear = SubmitField('Clear')
332 |
333 | # Amazing, I hate writing this stuff
334 | class LoginForm(FlaskForm):
335 | username = StringField('Username', validators=[DataRequired()])
336 | password = PasswordField('Password', validators=[DataRequired()])
337 | submit = SubmitField('Login')
338 |
339 | # Define a decorator to check if the user is authenticated
340 | # No idea how this works... Magic.
341 | def login_required(view):
342 | @functools.wraps(view)
343 | def wrapped_view(**kwargs):
344 | if users != None:
345 | if session.get("user") is None:
346 | return redirect(url_for('login'))
347 | return view(**kwargs)
348 | return wrapped_view
349 |
350 | # The default chat view
351 | @app.route('/', methods=['GET', 'POST'])
352 | @login_required
353 | def index():
354 |
355 | # The single input box and submit button
356 | form = PromptForm()
357 |
358 | # Model will be the same as the last selected
359 | if "model_type" in session:
360 | form.model_type.data = session["model_type"]
361 |
362 | # Raw output option will be the same as the last selected
363 | if "raw_output" in session:
364 | form.raw_output.data = session["raw_output"]
365 | else:
366 | session["raw_output"] = False
367 |
368 | # Load the history array but remove items past 5
369 | history_file = session["user"] + "-history.json"
370 | history = load_chat_history(history_file)
371 | if len(history) > HISTORY_LENGTH:
372 | history.pop(0)
373 |
374 | # Load the bot config
375 | bot_file = session["user"] + "-bot.json"
376 | bot_config = load_bot_config(bot_file)
377 |
378 | # Load the augmentation
379 | augment_file = session["user"] + "-augment.json"
380 | augmentation = load_augmentation(augment_file)
381 |
382 | # If user is prompting send it
383 | if form.validate_on_submit():
384 | # Get the form variables
385 | form_result = request.form.to_dict(flat=True)
386 |
387 | # Create the prompt with the chat history
388 | prompt = "Chat history:\n" + text_history(history) + "\n" + form_result["prompt"]
389 |
390 | # This new prompt is now history
391 | new_prompt = {"user": session["user"], "text": form_result["prompt"]}
392 | history.append(new_prompt)
393 |
394 | # Determine if we're using raw output
395 | if "raw_output" in form_result:
396 | session["raw_output"] = True
397 | else:
398 | session["raw_output"] = False
399 |
400 | # Prompt the LLM (with the augmentation), add that to history too!
401 | session["model_type"] = form_result["model_type"]
402 | new_history = llm_proxy(augmentation + prompt, bot_config, form_result["model_type"])
403 | if new_history == None:
404 | new_history = {"user": "error", "text": "Model Error 😭"}
405 | history.append(new_history)
406 |
407 | # Dump the history to the user file - multitenant!
408 | with open(history_file, 'w', encoding='utf-8') as file:
409 | json.dump(history, file)
410 | return redirect(url_for('index'))
411 |
412 | # Spit out the template with either raw output or with each entry in the history formatted with Misaka (default)
413 | if session["raw_output"]:
414 | for dictionary in history:
415 | dictionary["text"] = dictionary["text"].replace('\n', ' ')
416 | return render_template('index.html', history=history, form=form)
417 | else:
418 | for dictionary in history:
419 | dictionary["text"] = misaka.html(dictionary["text"], extensions=misaka.EXT_FENCED_CODE)
420 | return render_template('index.html', history=history, form=form)
421 |
422 |
423 |
424 | # Configure the bot
425 | @app.route('/config', methods=['GET', 'POST'])
426 | @login_required
427 | def config():
428 |
429 | bot_file = session["user"] + "-bot.json"
430 | bot_config = load_bot_config(bot_file)
431 | form = BotConfigForm()
432 |
433 | # Populate the form
434 | form.name.data = bot_config["name"]
435 | form.identity.data = bot_config["identity"]
436 | form.temperature.data = bot_config["temperature"]
437 |
438 | if form.validate_on_submit():
439 | form_result = request.form.to_dict(flat=True)
440 | bot_config["name"] = form_result["name"]
441 | bot_config["identity"] = form_result["identity"]
442 | bot_config["temperature"] = form_result["temperature"]
443 | with open(bot_file, 'w', encoding='utf-8') as file:
444 | json.dump(bot_config, file)
445 | return redirect(url_for('index'))
446 |
447 | return render_template('config.html', form=form)
448 |
449 | # Configure the prompt augmentation
450 | @app.route('/augment', methods=['GET', 'POST'])
451 | @login_required
452 | def augment():
453 | augment_file = session["user"] + "-augment.json"
454 | augmentation = load_augmentation(augment_file)
455 | form = AugmentationForm()
456 |
457 | # Populate the form
458 | form.augmentation.data = augmentation
459 |
460 | # Save the augmentation on a per user basis
461 | if form.validate_on_submit():
462 | form_result = request.form.to_dict(flat=True)
463 | # Clear the file or store it
464 | if "clear" in form_result:
465 | try:
466 | os.remove(augment_file)
467 | except:
468 | pass
469 | else:
470 | with open(augment_file, 'w', encoding='utf-8') as file:
471 | json.dump(form_result["augmentation"], file)
472 | return redirect(url_for('index'))
473 |
474 | return render_template('augment.html', form=form)
475 |
476 | # Bot Library
477 | @app.route('/library', methods=['GET', 'POST'])
478 | @login_required
479 | def library():
480 | form = BotLibraryForm()
481 |
482 | # Populate the bot library drop down
483 | bot_library = load_bot_library()
484 | for bot in bot_library:
485 | form.bot.choices.append(bot["name"])
486 |
487 | # What config do we write to?
488 | bot_file = session["user"] + "-bot.json"
489 |
490 | if form.validate_on_submit():
491 | form_result = request.form.to_dict(flat=True)
492 | bot_selected = form_result["bot"]
493 | for dict_item in bot_library:
494 | if dict_item["name"] == bot_selected:
495 | bot_config = dict_item
496 | break
497 | with open(bot_file, 'w', encoding='utf-8') as file:
498 | json.dump(bot_config, file)
499 | return redirect(url_for('config'))
500 |
501 | return render_template('library.html', form=form)
502 |
503 | # Delete chat history, new chat
504 | @app.route('/new')
505 | @login_required
506 | def new():
507 | history_file = session["user"] + "-history.json"
508 | try:
509 | os.remove(history_file)
510 | except:
511 | pass
512 | return redirect(url_for('index'))
513 |
514 | # Delete bot identity, return to Wizard
515 | @app.route('/reset')
516 | @login_required
517 | def reset():
518 | history_file = session["user"] + "-bot.json"
519 | try:
520 | os.remove(history_file)
521 | except:
522 | pass
523 | return redirect(url_for('index'))
524 |
525 | # Download the chat history
526 | @app.route('/backup')
527 | @login_required
528 | def backup():
529 |
530 | # We could have multiple bots in history but this is fine.
531 | bot_file = session["user"] + "-bot.json"
532 | bot_config = load_bot_config(bot_file)
533 |
534 | # Load the history to output for export
535 | history_file = session["user"] + "-history.json"
536 | history = load_chat_history(history_file)
537 |
538 | # Get the current date to tag to the export
539 | current_date = datetime.datetime.now()
540 | formatted_date = current_date.strftime('%Y-%m-%d')
541 |
542 | # Spit out the template with formatted strings
543 | for dictionary in history:
544 | dictionary["text"] = misaka.html(dictionary["text"], extensions=misaka.EXT_FENCED_CODE)
545 | return render_template('history.html', history=history, user=session["user"], bot=bot_config["name"], date=formatted_date)
546 |
547 | # Login/logout routes that rely on the user being stored in session
548 | @app.route('/login', methods=['GET', 'POST'])
549 | def login():
550 | form = LoginForm()
551 | if form.validate_on_submit():
552 | if form.username.data in users:
553 | if form.password.data == users[form.username.data]:
554 | session["user"] = form.username.data
555 | return redirect(url_for('index'))
556 | return render_template('login.html', form=form)
557 |
558 | # We finally have a link for this now!
559 | @app.route('/logout')
560 | def logout():
561 | session["user"] = None
562 | return redirect(url_for('login'))
563 |
564 | # Basic Chat API for some scripts to consume, right now only supports wizard persona
565 | @app.route('/api/chat', methods=['POST'])
566 | def api_chat():
567 |
568 | # Validated API key
569 | api_key = request.form.get('api_key') # Use request.form.get instead of request.args.get for POST requests
570 | if api_key != BOTTY_KEY:
571 | return jsonify({"error": "Invalid API Key"})
572 |
573 | # Get the API parameters and bail out if they're wrong
574 | prompt = request.form.get('prompt')
575 | model_type = request.form.get('model_type')
576 | if not prompt or not model_type:
577 | return jsonify({"error": "You need to send a prompt and the model name you want to use (e.g., local-mymodel or gpt-4.1)."}), 400
578 |
579 | # Yeah this is hacky but we want this to fail and load
580 | # the default wizard persona
581 | bot_config = load_bot_config("null")
582 |
583 | result = llm_proxy(prompt, bot_config, model_type)
584 | return jsonify(result)
--------------------------------------------------------------------------------