├── .gitattributes
├── .github
└── workflows
│ ├── lint-format-test-backend.yml
│ ├── lint-format-test-frontend.yml
│ └── test-end-to-end.yml
├── .gitignore
├── .vscode
├── launch.json
└── settings.json
├── DOCUMENTATION.md
├── LICENSE
├── README.md
├── backend
├── .flake8
├── Dockerfile
├── Dockerfile.dev
├── pyproject.toml
├── requirements-dev.txt
├── requirements.txt
├── tacheles_backend
│ ├── api
│ │ ├── __init__.py
│ │ └── routes.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── database.py
│ │ └── models.py
│ ├── tacheles_backend.py
│ └── utils
│ │ ├── __init__.py
│ │ └── logging.py
└── tests
│ └── test_backend.py
├── docker-compose.dev.mock.yaml
├── docker-compose.dev.openai.yaml
├── docker-compose.dev.yaml
├── docker-compose.openai.yaml
├── docker-compose.test.yaml
├── docker-compose.yaml
├── docs
├── architecture_deploy_mode.svg
├── architecture_dev_mode.svg
└── screenshot.png
├── frontend
├── .eslintrc.json
├── .gitignore
├── .prettierrc
├── Dockerfile
├── README.md
├── package-lock.json
├── package.json
├── public
│ ├── favicon.ico
│ ├── index.html
│ ├── logo192.png
│ ├── logo512.png
│ ├── manifest.json
│ └── robots.txt
└── src
│ ├── App.css
│ ├── AppWithConversationList.js
│ ├── AppWithoutConversationList.js
│ ├── ChatInterface.css
│ ├── ChatInterface.js
│ ├── ChatInterfaceWithConversationList.css
│ ├── ChatInterfaceWithConversationList.js
│ ├── MessageList.css
│ ├── MessageList.js
│ ├── UserInput.css
│ ├── UserInput.js
│ ├── __tests__
│ ├── App.integration.test.js
│ ├── AppWithoutConversationList.integration.test.js
│ ├── ChatInterface.test.js
│ ├── ChatInterfaceWithConversationList.test.js
│ ├── MessageList.test.js
│ └── UserInput.test.js
│ ├── __tests__e2e__
│ └── endtoend.test.js
│ ├── api.js
│ ├── index.css
│ ├── index.js
│ ├── logo.svg
│ └── setupTests.js
└── inference
├── Dockerfile.mock
├── Dockerfile.sglang
├── Dockerfile.vllm
└── mock_contents.txt
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/.github/workflows/lint-format-test-backend.yml:
--------------------------------------------------------------------------------
1 | name: Lint, Format, and Test Backend
2 |
3 | on:
4 | pull_request:
5 | branches: [main]
6 | workflow_dispatch:
7 |
8 | jobs:
9 | test:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - uses: actions/checkout@v4
14 |
15 | - name: Set up Python
16 | uses: actions/setup-python@v5
17 | with:
18 | python-version: "3.12"
19 |
20 | - name: Install dependencies
21 | run: |
22 | python -m pip install --upgrade pip
23 | pip install -r requirements.txt
24 | pip install -r requirements-dev.txt
25 | working-directory: backend/
26 |
27 | - name: Run Black formatter
28 | run: black --check .
29 | working-directory: backend/
30 |
31 | - name: Run Flake8 linter
32 | run: flake8 .
33 | working-directory: backend/
34 |
35 | - name: Run isort
36 | run: isort --check --diff .
37 | working-directory: backend/
38 |
39 | - name: Run tests
40 | run: pytest
41 | working-directory: backend/
42 |
--------------------------------------------------------------------------------
/.github/workflows/lint-format-test-frontend.yml:
--------------------------------------------------------------------------------
1 | name: Lint, Format, and Test Frontend
2 |
3 | on:
4 | pull_request:
5 | branches: [main]
6 | workflow_dispatch:
7 |
8 | jobs:
9 | lint-format-test:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - uses: actions/checkout@v4
14 | - name: Use Node.js
15 | uses: actions/setup-node@v4
16 | with:
17 | node-version: "20.x"
18 | - name: Install dependencies
19 | run: npm ci
20 | working-directory: frontend
21 | - name: Lint
22 | run: npm run lint
23 | working-directory: frontend
24 | - name: Format
25 | run: npm run format
26 | working-directory: frontend
27 | - name: Test
28 | run: npm test
29 | working-directory: frontend
30 |
--------------------------------------------------------------------------------
/.github/workflows/test-end-to-end.yml:
--------------------------------------------------------------------------------
1 | name: End-to-End Tests
2 |
3 | on:
4 | pull_request:
5 | branches: [main]
6 | workflow_dispatch:
7 |
8 | jobs:
9 | e2e-tests:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - uses: actions/checkout@v2
14 |
15 | - name: Build and run containers
16 | run: docker-compose -f docker-compose.test.yaml up --build --abort-on-container-exit
17 |
18 | - name: Check frontend exit code
19 | run: |
20 | FRONTEND_EXIT_CODE=$(docker-compose -f docker-compose.test.yaml ps -q frontend | xargs docker inspect -f '{{ .State.ExitCode }}')
21 | if [ $FRONTEND_EXIT_CODE -ne 0 ]; then
22 | echo "Frontend container exited with code $FRONTEND_EXIT_CODE"
23 | exit 1
24 | fi
25 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 | .DS_Store
162 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | "configurations": [
3 | {
4 | "name": "Python: Debug tacheles backend",
5 | "type": "debugpy",
6 | "request": "attach",
7 | "connect": { "host": "localhost", "port": 5678 },
8 | "pathMappings": [
9 | {
10 | "localRoot": "${workspaceFolder}/backend",
11 | "remoteRoot": "/app"
12 | }
13 | ],
14 | "justMyCode": true
15 | },
16 | {
17 | "name": "Python: Debug tacheles inference engine",
18 | "type": "debugpy",
19 | "request": "attach",
20 | "connect": { "host": "localhost", "port": 5679 },
21 | "justMyCode": false
22 | }
23 | ]
24 | }
25 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "editor.formatOnSave": true,
3 | "editor.defaultFormatter": "esbenp.prettier-vscode",
4 | "editor.codeActionsOnSave": {
5 | "source.fixAll.eslint": "explicit"
6 | },
7 | "[python]": {
8 | "editor.defaultFormatter": "ms-python.black-formatter"
9 | },
10 | "[javascript]": {
11 | "editor.formatOnSave": true
12 | },
13 | "flake8.cwd": "${workspaceFolder}/backend",
14 | "python.testing.pytestArgs": ["."],
15 | "python.testing.unittestEnabled": false,
16 | "python.testing.pytestEnabled": true,
17 | "python.testing.cwd": "${workspaceFolder}/backend"
18 | }
19 |
--------------------------------------------------------------------------------
/DOCUMENTATION.md:
--------------------------------------------------------------------------------
1 | # tacheles Documentation
2 |
3 | ## Table of Contents
4 |
5 | 1. [Introduction](#introduction)
6 | 2. [Architecture Overview](#architecture-overview)
7 | - [Frontend](#frontend)
8 | - [Backend](#backend)
9 | - [Inference Engines](#inference-engines)
10 | - [Database](#database)
11 | 3. [Extending and Customizing tacheles](#extending-and-customizing-tacheles)
12 | - [Development Environment Setup](#development-environment-setup)
13 | - [Adding New Features](#adding-new-features)
14 | 4. [Deploying tacheles Applications](#deploying-tacheles-applications)
15 | - [Small Deployments](#small-deployments)
16 | - [Scaling Up](#scaling-up-to-big-deployments)
17 | 5. [Conclusion](#conclusion)
18 |
19 | ## Introduction
20 |
21 | tacheles is a comprehensive blueprint for building LLM chat applications. It provides a solid foundation for developers to create sophisticated LLM-powered chat interfaces or integrate chat capabilities into existing systems. The project consists of a React frontend, a FastAPI backend, and various inference engine options, all orchestrated using Docker and Docker Compose.
22 |
23 | ## Architecture Overview
24 |
25 | The tacheles architecture is designed to be modular, scalable, and easily extensible. It comprises three main components: the frontend, the backend, and the inference engine. Additionally the backend uses a database, either within its own container, or in another container or external service. How these components work together can vary:
26 |
27 |
28 |
29 | In development mode, the frontend container hosts the compiled React frontend on a dev web server running on `http://localhost:3000/`. Once the frontend is loaded from there, the user's browser communicates directly with the backend by sending API requests to `http://localhost:8001`, which is the exposed port of the backend container. The backend processes these requests, interacts with the inference engine and the database, and sends responses back to the frontend.
30 |
31 | To start tacheles in development mode, use `MODEL=username/model docker compose -f docker-compose.dev.yaml up`. Alternatively you can also use `docker-compose.dev.mock.yaml` - this does all of the above, but additionally starts tacheles with a _mocked_ inference API server. This allows development of the frontend and backend, without having to run an actual LLM.
32 |
33 |
34 |
35 | In deployment mode, the frontend is typically served as a static file bundle instead. In the configuration that comes with tacheles, this static file bundle is served by the backend FastAPI server on port 80, eliminating the need for a separate static file host container. The user's browser communicates with the backend server both to load the static frontend files, as well as for subsequent API calls. However, many other deployment architectures are possible, which we discuss toward the end of this document.
36 |
37 | To start tacheles in deployment mode, use `MODEL=username/model docker compose -f docker-compose.yaml up`.
38 |
39 | ### Main Components Overview
40 |
41 | #### Frontend
42 |
43 | The frontend of tacheles is built using React and is located in the `frontend` directory. Two main components are worth noting: One, `ChatInterface` provides a basic interface showing prior messages in a conversation (using the `MessageList` component) and allowing the user to send new messages (using the `UserInput` component). This component also implements streaming responses on the client side. Two, `ChatInterfaceWithConversationList` wraps around this basic interface and provides a sidebar with a menu showing all previous conversations and allows the user to switch between them. (But note that this is optional - `ChatInterface` also functions standalone, and we show an example of this in `AppWithoutConversationList`.)
44 |
45 | For those unfamiliar with React, these components are not directly executable themselves, but are compiled by React into Javascript and HTML that can be run on a client web browser. During development, React runs its own web server from which it serves these compiled files, so that code changes can be hot-reloaded - this is why we run a frontend docker container during development. For deployment, this compilation is done once ahead of time, and the compiled files are served by a production web server. As one option for doing so, we could serve them as static files from the backend FastAPI server, but this is just one option (see also the section on deployment).
46 |
47 | #### Backend
48 |
49 | The backend of tacheles is built using FastAPI and is located in the `backend` directory. It acts as the intermediary between the frontend and the inference engine, handling API requests, managing the database, and processing chat messages. For instance, the inference API (be it vllm, sglang or OpenAI) doesn't remember prior messages in a conversation - when the browser sends a new user message, the backend retrieves the conversation history from its database, and ensures we feed the correct sequence of messages into the LLM. The backend is written using [FastAPI](https://fastapi.tiangolo.com/) and [SQLModel](https://sqlmodel.tiangolo.com/). Together, these provide a very easy way to define API endpoints and interact with a database, as a single class definition can be used to define a database table and relationships, as well as API endpoint arguments and return types.
50 |
51 | For extensibility, the backend is split into multiple files, the main two of note are `api/routes.py` which defines all API endpoints, and `models/models.py` which defines database tables (and thus also API argument and return types). Most API routes are very simple thanks to the FastAPI & SQLModel magic, except `api/routes.py:chat()` uses some slightly non-trivial code to enable streaming responses. `tacheles_backend.py` pulls these two together, and `models/database.py` and `utils/logging.py` contain utility functions for database and logging setup (which are mostly stubs, but separated out for future development).
52 |
53 | #### Inference Engines
54 |
55 | tacheles supports multiple inference engines for running the language model and generating responses. The inference engines are hosted in separate Docker containers and expose an OpenAI-compatible API for seamless integration with the backend.
56 |
57 | The supported inference engines include:
58 |
59 | - [vllm](https://github.com/vllm-project/vllm): A high-performance inference engine for running large language models. It provides an optimized runtime environment for efficient inference.
60 |
61 | - [sglang](https://github.com/sgl-project/sglang): A flexible inference engine that supports multi-modal models and fast JSON decoding, and radix attention for even faster inference.
62 |
63 | - [Mock API](https://github.com/polly3d/mockai): For development purposes, tacheles includes a "mock" inference API option, that allows local development without having to run an actual LLM.
64 |
65 | - Commercial APIs: tacheles can also integrate with commercial API such as the official OpenAI API, instead of locally hosted models.
66 |
67 | The "default" `docker-compose.yaml` and `docker-compose.dev.yaml` configurations use sglang, or optionally vllm (see inside the files). If you'd like to use a commercial API, use the `...openai.yaml` files. You can change the `INFERENCE_API_URI` variable in those files to use an API different from the standard OpenAI endpoints. The mock API is used only for development, through the `docker-compose.dev.mock.yaml` configuration.
68 |
69 | #### Database
70 |
71 | tacheles uses an SQL database to store conversation history and user information. In development docker-compose configurations this is set to a local sqlite database for simplicity. Deployment docker-compose files include a mysql database container, but this could be replaced by any other supported SQL database container, or by an externally hosted database.
72 |
73 | ## Extending and Customizing tacheles
74 |
75 | tacheles is designed to be easily extensible and customizable to fit various use cases and requirements. Let's first explore how to set up a development environment that makes working with tacheles easy, and then how to add new features, modify the frontend and backend, and write tests.
76 |
77 | ### Development Environment Setup
78 |
79 | #### Dev Mode Containers & Debugging
80 |
81 | tacheles is built to make developing new features as easy as possible. In development mode, both the frontend and backend are hot-reloaded from the host OS, so you don't need to restart docker containers when you make changes. The `docker-compose.dev.mock.yaml` setup additionally allows development without loading a real LLM, and without a GPU or other hardware capable of doing so!
82 |
83 | Both development setups also allow debugging the backend from the host OS on port `5678`. In order to connect to this from VS Code, add the following configuration to your `launch.json` file:
84 |
85 | ```json
86 | {
87 | "configurations": [
88 | {
89 | "name": "Python: Debug tacheles backend",
90 | "type": "debugpy",
91 | "request": "attach",
92 | "connect": { "host": "localhost", "port": 5678 },
93 | "pathMappings": [
94 | {
95 | "localRoot": "${workspaceFolder}/backend",
96 | "remoteRoot": "/app"
97 | }
98 | ],
99 | "justMyCode": true
100 | }
101 | ]
102 | }
103 | ```
104 |
105 | Then, spin up the containers, and select the new configuration in the debugging tab, and select `Start Debugging`. If you run into any issues that crash the backend before you're able to connect, add `--wait-for-client` to the entrypoint in `backend/Dockerfile.dev` between `debugpy` and `--listen`. The backend debugger will then wait for a connection before actually starting the backend server. (Note, however, that this means you _have to_ connect a debugger, or the backend will never start - this is why the option is not enabled by default.)
106 |
107 | You could similarly debug the inference engine (i.e., vllm or sglang), but this should rarely be necessary. If you do find yourself in need of this, check the commented out entrypoint in `docker-compose.dev.yaml` for how to.
108 |
109 | #### Formatting, Testing, and GitHub Actions
110 |
111 | tacheles is also set up to enable automatic formatting, linting and testing, both locally and through GitHub Actions, to help maintain code quality over time.
112 |
113 | To run these locally, use `black .`, `flake8 .` and `pytest .` inside the backend directory to format, lint and test the backend, respectively. Similarly, run `npm run format`, `npm run lint` and `npm run test` inside the frontend directory to format, lint and test the frontend. If you use VS Code, the provided configuration file should also set these up to happen automatically in VS Code.
114 |
115 | Additionally, there are end-to-end tests that test the frontend against a real running backend and the mock inference engine. To run these locally, run `REACT_APP_BACKEND_URL=http://localhost:8001 npm run test:e2e`. You must have the `docker-compose.dev.mock.yaml` configuration up and running for this to work.
116 |
117 | Finally, tacheles ships with GitHub Action configurations that can run all of these on GitHub. By default, these actions are configured to be run manually, as well as on pull requests on the main branch. You can adjust this in `.github/workflows`.
118 |
119 | ### Adding New Features
120 |
121 | tacheles is meant as a foundation for building specialized LLM apps, and was built to make it easy to extend and add new features. Generally to do so, you would add or modify frontend components, and backend API routes (and possibly database models).
122 |
123 | #### Modifying the Frontend
124 |
125 | The frontend currently provides four main components that work together to implement a basic chat interface: `MessageList`, `UserInput`, `ChatInterface` and `ChatInterfaceWithConversationList`. These are modular and could each be used standalone - for instance, `MessageList` by itself could be used to display a read-only conversation that the user cannot respond to. `ChatInterface` by itself could be used to implement a single-conversation interface, without the ability to go back to earlier conversations. We provide an example of implementing this in `AppWithoutConversationList.js`, and this can be enabled by commenting out a line in `index.js`.
126 |
127 | When modifying the frontend you may wish to use these components as starting points and as examples. For instance, `ChatInterfaceWithMessageList` is a good example of how to add an additional feature to the more basic `ChatInterface`.
128 |
129 | #### Modifying the Backend
130 |
131 | Extending the backend will typically involve two types of additions: API routes, and database models. Database models in SQLModel are essentially Python dataclasses with a few extra annotations, which SQLModel converts into both database tables as well as Pydantic models for automatic data validation. Using these, writing API routes is often trivially easy. For instance, check out `backend/api/routes.py:get_conversations()` which implements an API endpoint to list all conversations for a user in just two lines.
132 |
133 | #### Writing Tests
134 |
135 | tacheles includes tests for the frontend and backend, and you might wish to add tests for your own features as well.
136 |
137 | Frontend tests are located in the `frontend/src/__tests__` directory. They use the `@testing-library/react` library to render components, simulate user interactions, and make assertions. Most of these take the form of "render a component, find a button in the DOM and click it, then check that certain components or text fragments are present in the DOM afterwards", and should be easy to adapt to new components. Calls to the backend API are mocked.
138 |
139 | Backend tests are located in the `backend/tests` directory. They use the `pytest` testing framework and `fastapi.testclient.TestClient` to test the API endpoints and database operations. Here in turn, calls to the inference API are mocked.
140 |
141 | When adding new components or features, use these as templates for adding your own tests. Additionally, there are end-to-end tests in `frontend/src/__tests__e2e__` which test the entire application end-to-end using the mock inference container. These follow similar patterns as the frontend integration tests, and should be easy to extend to cover new functionality.
142 |
143 | ## Deploying tacheles Applications
144 |
145 | The included `docker-compose.yaml` and `docker-compose.openai.yaml` configurations are one relatively straightforward way to deploy tacheles. They serve the compiled frontend from the backend FastAPI server, and use a mysql container as a database. This is only one of many possible configurations, and you may wish to consider other options.
146 |
147 | ### Small Deployments
148 |
149 | If you use an external inference API (e.g., OpenAI) as well as an externally hosted database (e.g., Amazon RDS) you could host tacheles from a single web server / docker container, and even on the free tier of many hosting services. You have two options for doing so:
150 |
151 | - In a **dockerized** setup, you could simply build and run only the (deployment) backend container, and point it to inference API and database using environment variables `DATABASE_URL` and `INFERENCE_API_URI` (or leave this unset for the OpenAI API). This will automatically build the frontend and copy it into the backend container.
152 | - If you prefer or require a **non-dockerized** setup, you could host the backend on a host-OS Python runtime. In this case, you would need to manually compile the frontend using `npm run build`, and then copy the contents of `frontend/build` to your web server. You could then set the `HOST_FRONTEND_PATH` environment variable to tell the backend to serve the compiled frontend, or you could host the frontend through other means.
153 |
154 | ### Scaling Up to Big Deployments
155 |
156 | In the opposite direction, you may wish to use Kubernetes or a similar orchestration system to scale tacheles up to large deployments. A full discussion of this is beyond the scope of this document. However, we note several considerations:
157 |
158 | - The default configuration of hosting the frontend through the backend container is simple and convenient, but may not be as fast as a dedicated static web server such as `nginx`.
159 | - A common pattern is to then also use the frontend web server as a proxy for the backend API requests. If you choose to do this, make sure your proxy supports and is configured for server-sent events (SSE), or you will lose the ability to stream responses.
160 | - If you instead choose to host the frontend separately from the backend (i.e., under a different URL), you must set the `REACT_APP_BACKEND_URL` when compiling the frontend to point it to the correct backend URL.
161 | - The backend (in its current state) is stateless, and could be scaled horizontally, even without sticky sessions. (All state is stored in the database.)
162 | - Both vllm and sglang, however, or only partly stateless: For optimal performance, subsequent requests in one conversation should ideally be directed to the same inference replica for best performance.
163 |
164 | ## Conclusion
165 |
166 | I hope this document, and tacheles, is useful to you. If there is anything you found missing, let me know via email or through a GitHub issue.
167 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # tacheles
2 |
3 | > "Tacheles: German, borrowed from Yiddish: A conversation about an essential or difficult topic, straight talk"
4 |
5 | tacheles is a lightweight, open-source blueprint for building powerful and scalable language model (LLM) chat applications. It provides a an end-to-end implementation of LLM chat together with comprehensive scaffolding for development and deployment.
6 |
7 | Read on for more info, or jump straight to [Getting Started](#getting-started).
8 |
9 |
10 |
11 | ## Features
12 |
13 | tacheles includes a complete end-to-end LLM chat interface. It implements all the basic functionality, including a few features that are slightly non-trivial to implement:
14 |
15 | - **Complete UI & Backend**: including tracking conversations, switching between conversations, and a modern, responsive UI.
16 | - **State-of-the-Art Inference Engines**: tacheles includes support for inference through both sglang and vllm, as well as commercial APIs such as OpenAI.
17 | - **Streaming Output**: LLM responses are streamed token by token, and aborted requests are handled gracefully.
18 | - **Markdown Rendering**: Messages support markdown rendering and copy-pasting rendered code. Partial markdown is rendered while responses are streaming.
19 | - **Detailed Documentation**: All of tacheles is documented and commented in detail, to allow even novice developers to get started quickly.
20 |
21 | Additionally, tacheles includes all the scaffolding around the core functionality:
22 |
23 | - **Dockerized Setup**: The entire application is dockerized, and includes example configurations for a variety of deployment scenarios.
24 | - **Complete Development Environment**: tacheles provides a completely set up development environment through docker, including hot-reloading code changes from the host OS, debugging through docker, and a mock inference API so no GPU is needed for development.
25 | - **Tests**: The project includes unit, integration and end-to-end tests of the entire application.
26 | - **GitHub Actions**: Additionally, GitHub actions are set up for formatting, linting and testing.
27 |
28 | ## Who is tacheles for?
29 |
30 | Anyone who's wanted to build LLM application, but didn't know where to start! tacheles is meant to provide a solid foundation on which you can build custom application, and is designed with extensibility, scalability and easy development in mind. tacheles could be for you if
31 |
32 | - you're an ML practitioner and know how LLMs work, but aren't as familiar with web and frontend technologies
33 | - all code in tacheles is simple, and copiously commented, to help get you started
34 | - you've been building prototypes using ML-focused tools such as gradio but would like to scale them up or integrate them with other projects
35 | - tacheles components are built to be modular and easily integrated with existing React or FastAPI apps
36 | - you know individual technologies such as React or FastAPI, but not how to put them together into a complete project
37 | - tacheles ships with example docker files and documentation on architecture
38 | - you know how to build a project like this, but not how to deploy it or scale it up to large numbers of users
39 | - tacheles is built with scalability in mind and the documentation has some pointers to get you started
40 | - you know how to build this, but you hate starting from scratch or setting up things like tests and CI/CD
41 | - tacheles does all the tedious bits for you
42 | - you want to build something highly specialized and find existing LLM chat UI projects too complex for your needs
43 | - tacheles is intentionally lean with just a few hundred lines of code at its core
44 | - or you want to learn how to build a modern and scalable web app from scratch
45 | - tacheles provides ample documentation and commentary throughout its code and is meant in part also as an educational project
46 |
47 | ## What is tacheles **not**?
48 |
49 | tacheles is _not_ a full-featured app by itself. It is meant to be a starting point for you to build your own specialized applications with, rather than an off-the-shelf replacement of existing general-purpose LLM chat apps.
50 |
51 | ## Getting Started
52 |
53 | To get started with tacheles, follow these steps:
54 |
55 | 1. Clone the repository: `git clone https://github.com/mgerstgrasser/tacheles.git`
56 | 2. Navigate to the project directory: `cd tacheles`
57 |
58 | - If you have a GPU and want to run local inference, run
59 |
60 | ```bash
61 | MODEL=username/model HF_TOKEN=your_token docker-compose -f docker-compose.dev.yaml up --build
62 | ```
63 |
64 | - If you have money and want to run inference through OpenAI, run
65 |
66 | ```bash
67 | MODEL=gpt-3.5-turbo OPENAI_API_KEY=sk-... docker-compose -f docker-compose.dev.openai.yaml up --build
68 | ```
69 |
70 | - If you have neither and want to just develop without a real LLM, run
71 |
72 | ```bash
73 | docker-compose -f docker-compose.dev.mock.yaml up --build
74 | ```
75 |
76 | 3. Open your browser and visit `http://localhost:3000` to access the chat interface.
77 |
78 | Then, head over to the [documentation](DOCUMENTATION.md) for more detailed information on usage, architecture, development, debugging, and production deployment.
79 |
80 | ## License
81 |
82 | tacheles is open-source software licensed under the [Apache License](LICENSE).
83 |
84 | If you find this useful in academic work, consider citing this repository:
85 |
86 | ```bibtex
87 | @misc{gerstgrasser2024tacheles,
88 | author = {Matthias Gerstgrasser},
89 | title = {tacheles: a scalable foundation for LLM chat applications},
90 | year = {2024},
91 | publisher = {GitHub},
92 | journal = {GitHub repository},
93 | howpublished = {\url{https://github.com/mgerstgrasser/tacheles}}
94 | }
95 | ```
96 |
--------------------------------------------------------------------------------
/backend/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 88
--------------------------------------------------------------------------------
/backend/Dockerfile:
--------------------------------------------------------------------------------
1 | # We use the frontend container to build the frontend, and then copy the build into the backend container.
2 | FROM node:20-alpine as frontendbuild
3 | WORKDIR /app
4 | COPY --from=frontend package.json .
5 | RUN npm install
6 | COPY --from=frontend . .
7 | ENV REACT_APP_BACKEND_URL ''
8 | RUN npm run build
9 | # Now we are done with the frontend.
10 |
11 | # Use a base image with Python installed
12 | FROM python:3.12-slim
13 |
14 | # Set the working directory
15 | WORKDIR /app
16 |
17 | # Copy the tacheles_backend directory into the container
18 | COPY . /app
19 |
20 | # Install the required dependencies
21 | COPY requirements.txt /app
22 | RUN pip install -r requirements.txt
23 |
24 | # Copy the frontend build into the container
25 | COPY --from=frontendbuild /app/build /app/frontend
26 |
27 | # Expose the port
28 | EXPOSE 8001
29 |
30 | # Tell the FastAPI app to host the frontend files
31 | ENV HOST_FRONTEND_PATH=/app/frontend
32 |
33 | # Set the command to run the FastAPI app
34 | CMD ["uvicorn", "tacheles_backend.tacheles_backend:app", "--host", "0.0.0.0", "--port", "8001", "--log-level", "info"]
35 |
--------------------------------------------------------------------------------
/backend/Dockerfile.dev:
--------------------------------------------------------------------------------
1 | # Use a base image with Python installed
2 | FROM python:3.12
3 |
4 | # Set the working directory
5 | WORKDIR /app
6 |
7 | # Copy the tacheles_backend directory into the container
8 | COPY . /app
9 |
10 | # Install the required dependencies
11 | COPY requirements.txt /app
12 | RUN pip install -r requirements.txt
13 |
14 | RUN pip install debugpy
15 |
16 | # Expose the port
17 | EXPOSE 8001
18 |
19 | # Create volume for /database
20 | VOLUME /database
21 |
22 | # Set the command to run the FastAPI app
23 | CMD ["python", "-m", "debugpy", "--listen", "0.0.0.0:5678", "-m", "uvicorn", "tacheles_backend.tacheles_backend:app", "--host", "0.0.0.0", "--port", "8001", "--reload", "--reload-dir", "/app/tacheles_backend", "--log-level", "debug"]
24 |
--------------------------------------------------------------------------------
/backend/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 88
3 | target-version = ['py39', 'py310', 'py311', 'py312']
4 | exclude = '''
5 | /(
6 | \.eggs
7 | | \.git
8 | | \.hg
9 | | \.mypy_cache
10 | | \.tox
11 | | \.venv
12 | | _build
13 | | buck-out
14 | | build
15 | | dist
16 | )/
17 | '''
18 |
19 | [tool.isort]
20 | profile = "black"
21 | multi_line_output = 3
22 | include_trailing_comma = true
23 | force_grid_wrap = 0
24 | use_parentheses = true
25 | line_length = 88
26 |
27 | [tool.pytest.ini_options]
28 | minversion = "6.0"
29 | addopts = "-ra --cov=tacheles_backend --cov-report=term-missing"
30 | testpaths = ["tests"]
31 |
32 | [tool.coverage.run]
33 | branch = true
34 | source = ["tacheles_backend"]
35 |
36 | [tool.coverage.report]
37 | exclude_lines = [
38 | "pragma: no cover",
39 | "def __repr__",
40 | "if self.debug:",
41 | "if settings.DEBUG",
42 | "raise AssertionError",
43 | "raise NotImplementedError",
44 | "if 0:",
45 | "if __name__ == .__main__.:",
46 | ]
47 |
48 | [tool.mypy]
49 | python_version = "3.9"
50 | check_untyped_defs = true
51 | ignore_missing_imports = true
52 | strict_optional = true
53 | warn_unused_ignores = true
54 | warn_redundant_casts = true
55 | warn_unused_configs = true
--------------------------------------------------------------------------------
/backend/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | pytest
2 | pytest-mock
3 | pytest-asyncio
4 | httpx
5 | black
6 | flake8
7 | isort
8 | pytest-cov
--------------------------------------------------------------------------------
/backend/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi
2 | httpx
3 | uvicorn
4 | openai
5 | PyYAML
6 | sqlmodel
7 | pymysql
8 | cryptography
9 | itsdangerous
--------------------------------------------------------------------------------
/backend/tacheles_backend/api/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgerstgrasser/tacheles/9331abc87bcadc47c8f3b8c7f9e7ff5eafd8f01b/backend/tacheles_backend/api/__init__.py
--------------------------------------------------------------------------------
/backend/tacheles_backend/api/routes.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from typing import List
4 |
5 | from fastapi import APIRouter, Depends, HTTPException, Request
6 | from fastapi.responses import StreamingResponse
7 | from openai import OpenAI, OpenAIError
8 | from sqlmodel import Session, select
9 |
10 | from ..models.database import get_db
11 | from ..models.models import Conversation, ConversationWithMessagesList, Message, User
12 | from ..utils.logging import get_logger
13 |
14 | # This file defines all the API endpoints for the backend.
15 |
16 | # --------------------
17 | # Setup
18 | # --------------------
19 |
20 | # We first set up a few things:
21 | # First, an APIRouter. This is needed because we keep our API routes here instead
22 | # of the main FastAPI app file, to keep things organized.
23 | router = APIRouter()
24 |
25 | # Then, we set up a logger.
26 | # Have a look at utils/logger.py for details.
27 | # For now this is just a stub that re-uses the uvicorn/FastAPI logger.
28 | # But you could define more sophisticated logging here.
29 | logger = get_logger(__name__)
30 |
31 | # Finally, we set up an OpenAI library client.
32 | # We use this even for vllm and sglang as well as the mock inference backend, as all
33 | # of them support the same OpenAI API. However, it would be fairly simple to switch
34 | # this out for a different library if needed, it's really only used once in chat().
35 | # For now, we keep this here for simplicity and to not scare you away with a backend
36 | # spread across dozens of files. However, if you were to do more sophisticated things
37 | # here, e.g. multiple models or even different inference APIs, prompt re-writing, etc.,
38 | # you might want to move this to a separate file similarly to the logger.
39 | client = OpenAI(
40 | api_key=os.environ.get("OPENAI_API_KEY", "123"),
41 | base_url=os.environ.get("INFERENCE_API_URI", None),
42 | )
43 | model = os.environ.get("MODEL", "model")
44 | system_prompt = "You are a helpful assistant."
45 |
46 |
47 | # --------------------
48 | # API Endpoints
49 | # --------------------
50 |
51 |
52 | # Now we define the actually API endpoints. The first is just a simple health check,
53 | # which can be used (e.g., by docker compose) to see if the backend is up and running.
54 | @router.get("/api/healthcheck", tags=["Healthcheck"])
55 | async def healthcheck():
56 | """
57 | Health check endpoint for the backend.
58 |
59 | Returns:
60 | str: "OK" if the backend is running.
61 | """
62 | return "OK"
63 |
64 |
65 | # This is our first "real" endpoint.
66 | # Notice how thanks to FastAPI and SQLModel adding a new user is as simple as
67 | # saying `db.add(user)`. SQLModel takes care of the rest. Notice also how the same
68 | # User class is used to return the output to the client, without any additional effort
69 | # on our part.
70 | @router.post("/api/new_user", response_model=User, tags=["Users"])
71 | async def new_user(request: Request, db: Session = Depends(get_db)) -> User:
72 | """
73 | Create a new user and return the user ID.
74 |
75 | Returns:
76 | User: The created user object.
77 | """
78 | try:
79 | user = User()
80 | db.add(user)
81 | db.commit()
82 | db.refresh(user)
83 | logger.debug(f"New user created: {user}")
84 | # We use a session middleware to provide some very basic security.
85 | # Here we store the user ID in the session, so we can check it in later
86 | # requests to other API endpoints.
87 | # If you wanted to do something more sophisticated, like letting users
88 | # sign up via email and password, check out FastAPI's OAuth2 support.
89 | request.session["user_id"] = user.id
90 | return user
91 | except Exception as e:
92 | # The following is a generic error handling: If we raise a HTTPException
93 | # elsewhere in the try block, we pass it on. For anything else, we log
94 | # the error and raise a 500 error.
95 | if isinstance(e, HTTPException):
96 | raise e
97 | logger.error(f"Error creating new user: {str(e)}")
98 | raise HTTPException(status_code=500, detail="Internal Server Error")
99 |
100 |
101 | # Note how we use the ConversationWithMessagesList response model here. This would
102 | # allow us to return an initial list of messages when the user starts a new
103 | # conversation. We could use this e.g., to show a welcome message, or in research
104 | # projects where the user is given a pre-defined initial prompt, etc.
105 | @router.post(
106 | "/api/new_conversation",
107 | response_model=ConversationWithMessagesList,
108 | tags=["Conversations"],
109 | )
110 | async def new_conversation(
111 | user: User, request: Request, db: Session = Depends(get_db)
112 | ) -> Conversation:
113 | """
114 | Create a new conversation for a user and return the conversation ID.
115 |
116 | Args:
117 | user (User): The user object for whom the conversation is created.
118 |
119 | Returns:
120 | Conversation: The created conversation object.
121 | """
122 | if user.id != request.session.get("user_id"):
123 | raise HTTPException(status_code=403, detail="Unauthorized")
124 | try:
125 | conversation = Conversation(user_id=user.id)
126 | db.add(conversation)
127 | db.commit()
128 | db.refresh(conversation)
129 | logger.debug(f"New conversation created: {conversation}")
130 | logger.debug(f"Conversation messages: {conversation.messages}")
131 | return conversation
132 | except Exception as e:
133 | if isinstance(e, HTTPException):
134 | raise e
135 | logger.error(f"Error creating new conversation: {str(e)}")
136 | raise HTTPException(status_code=500, detail="Internal Server Error")
137 |
138 |
139 | @router.post("/api/chat", tags=["Chat"])
140 | def chat(
141 | usermessage: Message, request: Request, db: Session = Depends(get_db)
142 | ) -> StreamingResponse:
143 | """
144 | Receive a chat message from the user, process it using an LLM,
145 | and return the response as a streamed text.
146 |
147 | Args:
148 | usermessage (Message): The user's chat message.
149 |
150 | Returns:
151 | StreamingResponse: The LLM-generated response, streamed as plain text.
152 | """
153 | logger.debug(
154 | f"New chat request: ID {usermessage.conversation_id},\
155 | message: {usermessage.content}"
156 | )
157 |
158 | # This is the chat endpoint that lets users send a message and receive a streaming
159 | # response from the LLM. Because of the token-by-token streaming, this endpoint
160 | # is slightly more complex than the others.
161 |
162 | # First, we must wrap the actual processing in a generator function that yields
163 | # the response in chunks. We can then later pass this function to FastAPI's
164 | # StreamingResponse, which will handle the streaming for us.
165 |
166 | def generate(db: Session):
167 | try:
168 | # First we get the conversation and do some standard checks and error
169 | # handling including basic authentication.
170 | conversation = db.get(Conversation, int(usermessage.conversation_id))
171 | if conversation is None:
172 | logger.warning(f"Conversation {usermessage.conversation_id} not found")
173 | raise HTTPException(status_code=404, detail="Conversation not found.")
174 |
175 | if conversation.user_id != request.session.get("user_id"):
176 | raise HTTPException(status_code=403, detail="Unauthorized")
177 |
178 | logger.debug(f"Conversation messages: {conversation.messages}")
179 |
180 | # Then, we format the user's conversation using a system prompt message,
181 | # the prior conversation history, and the user's current message, and
182 | # send it to the LLM for completion.
183 | try:
184 | completion = client.chat.completions.create(
185 | model=model,
186 | messages=[{"role": "system", "content": system_prompt}]
187 | + conversation.to_list()
188 | + [{"role": "user", "content": usermessage.content}],
189 | stream=True,
190 | max_tokens=2000,
191 | )
192 | except OpenAIError as e:
193 | logger.error(f"Error communicating with LLM backend: {str(e)}")
194 | raise HTTPException(
195 | status_code=500, detail="Error processing chat message"
196 | )
197 |
198 | # Then, we pass on each received chunk to the client as we receive it.
199 | llmmessage = ""
200 | for chunk in completion:
201 | cur_message = chunk.choices[0].delta
202 | if cur_message.content is not None:
203 | llmmessage += cur_message.content
204 | response = json.dumps(
205 | {"type": "content", "data": cur_message.content}
206 | )
207 | logger.debug(f"Sending chunk: {response}")
208 | yield f"{response} \n"
209 |
210 | logger.debug(f"Entire message: {llmmessage}")
211 |
212 | # Finally, we let the client know that the response is complete.
213 | response = json.dumps({"type": "end", "data": ""})
214 | logger.debug(f"Sending chunk: {response}")
215 | yield f"{response} \n"
216 |
217 | # Afterwards, we save the user's message and the LLM's response to the
218 | # database, so we can use them in future requests.
219 | conversation.messages.append(usermessage)
220 | conversation.messages.append(Message(role="assistant", content=llmmessage))
221 | db.commit()
222 |
223 | except Exception as e:
224 | if isinstance(e, HTTPException):
225 | raise e
226 | logger.error(f"Error processing chat request: {str(e)}")
227 | raise HTTPException(status_code=500, detail="Internal Server Error")
228 |
229 | return StreamingResponse(generate(db), media_type="application/x-ndjson")
230 |
231 |
232 | @router.get(
233 | "/api/conversations/{user_id}",
234 | response_model=List[Conversation],
235 | tags=["Conversations"],
236 | )
237 | async def get_conversations(
238 | user_id: int, request: Request, db: Session = Depends(get_db)
239 | ):
240 | try:
241 | if user_id != request.session.get("user_id"):
242 | raise HTTPException(status_code=403, detail="Unauthorized")
243 | conversations = db.exec(
244 | select(Conversation).where(Conversation.user_id == user_id)
245 | ).all()
246 |
247 | # We intentionally don't handle "empty" conversations specially in general,
248 | # as there are many possible design choices for dealing with them. E.g.,
249 | # you might want to only create a conversation when the user sends the first
250 | # message. On the other hand, you might also source the first user message from
251 | # elsewhere, in which case you'd want a very different design. Or you might want
252 | # to auto-save drafts of user messages, including the first one, on the backend
253 | # so the user can come back to them later. All of these would be need different
254 | # design choices.
255 |
256 | # However, as one "quick" way of dealing with empty conversations, we could
257 | # filter them out here. In the following, we filter out all conversations that
258 | # have no messages, except the last one. (So that the last one still shows up
259 | # in the frontend conversation list.)
260 |
261 | # for conversation in conversations:
262 | # conversation.messages = db.exec(
263 | # select(Message).where(Message.conversation_id == conversation.id)
264 | # ).all()
265 |
266 | # return (
267 | # [
268 | # conversation
269 | # for conversation in conversations[:-1]
270 | # if len(conversation.messages) > 0
271 | # ]
272 | # + [conversations[-1]]
273 | # if len(conversations) > 0
274 | # else []
275 | # )
276 |
277 | return conversations
278 | except Exception as e:
279 | if isinstance(e, HTTPException):
280 | raise e
281 | logger.error(f"Error retrieving conversations: {str(e)}")
282 | raise HTTPException(status_code=500, detail="Internal Server Error")
283 |
284 |
285 | @router.get(
286 | "/api/conversations/{conversation_id}/messages",
287 | response_model=List[Message],
288 | tags=["Conversations"],
289 | )
290 | async def get_conversation_messages(
291 | conversation_id: int, request: Request, db: Session = Depends(get_db)
292 | ):
293 | try:
294 | conversation = db.get(Conversation, conversation_id)
295 | if conversation is None:
296 | raise HTTPException(status_code=404, detail="Conversation not found.")
297 | if conversation.user_id != request.session.get("user_id"):
298 | raise HTTPException(status_code=403, detail="Unauthorized")
299 | messages = db.exec(
300 | select(Message).where(Message.conversation_id == conversation_id)
301 | ).all()
302 | # We can't have two user messages in a row, otherwise the inference backend
303 | # will throw an error.
304 | # In theory, the last message should always be from the assistant anyway.
305 | # But just in case, we remove the last message if it's from the user.
306 | # Otherwise, the frontend might call /api/chat again with another user message.
307 | if len(messages) > 0 and messages[-1].role == "user":
308 | messages.pop()
309 | return messages
310 | except Exception as e:
311 | if isinstance(e, HTTPException):
312 | raise e
313 | logger.error(f"Error retrieving conversation messages: {str(e)}")
314 | raise HTTPException(status_code=500, detail="Internal Server Error")
315 |
--------------------------------------------------------------------------------
/backend/tacheles_backend/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgerstgrasser/tacheles/9331abc87bcadc47c8f3b8c7f9e7ff5eafd8f01b/backend/tacheles_backend/models/__init__.py
--------------------------------------------------------------------------------
/backend/tacheles_backend/models/database.py:
--------------------------------------------------------------------------------
1 | from os import environ
2 |
3 | from sqlmodel import Session, SQLModel, create_engine
4 |
5 | from .models import * # noqa
6 |
7 | # Here, we set up the database connection. We use the DATABASE_URL environment.
8 | # If the DATABASE_URL environment variable is not set, we default to a SQLite.
9 | # This is already very flexible, as it allows us to use any database supported
10 | # by SQLAlchemy, just by setting the DATABASE_URL environment variable.
11 | # But you could still wish to make changes here, e.g. to set up a connection pool.
12 |
13 | DATABASE_URL = environ.get("DATABASE_URL", "sqlite:////database/database.db")
14 |
15 | engine = create_engine(DATABASE_URL)
16 |
17 |
18 | def get_db():
19 | with Session(engine) as session:
20 | yield session
21 |
22 |
23 | def create_db_and_tables():
24 | SQLModel.metadata.create_all(engine)
25 |
26 |
27 | if __name__ == "__main__":
28 | # We can also use this script to create all the database tables.
29 | # By default this is run on uvicorn startup for convenience,
30 | # but in production environments you may wish to disable that,
31 | # and use this script to set up the database manually.
32 | create_db_and_tables()
33 | print("Database tables created")
34 |
--------------------------------------------------------------------------------
/backend/tacheles_backend/models/models.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from pydantic import BaseModel
4 | from sqlmodel import TEXT, Column, Field, Relationship, SQLModel
5 |
6 | # Here we define the database schema using SQLModel.
7 | # You can think of the following classes as a sort of "dataclass" that automagically
8 | # gets transformed into a database schema. They're also used for data validation and
9 | # even some data transforms (e.g., returning messages as part of a conversation) in
10 | # the API routes.
11 |
12 |
13 | # A conversation, identified by a unique ID, and linking back to a user, and a
14 | # list of messages. We also define a to_dict() method which allows us to convert
15 | # a conversation to a list of messages that we can directly feed into the OpenAI API.
16 | class Conversation(SQLModel, table=True):
17 | """
18 | A conversation identified by a unique ID.
19 | List of messages is backpopulated.
20 | """
21 |
22 | id: Optional[int] = Field(default=None, primary_key=True)
23 | user_id: int = Field(foreign_key="user.id")
24 | messages: List["Message"] = Relationship(back_populates="conversation")
25 | user: Optional["User"] = Relationship(back_populates="conversations")
26 |
27 | def to_list(self):
28 | """Convert a conversation to a list of messages."""
29 | return [message.to_dict() for message in self.messages]
30 |
31 |
32 | class User(SQLModel, table=True):
33 | """A single user identified by a unique ID."""
34 |
35 | id: Optional[int] = Field(default=None, primary_key=True)
36 | conversations: List[Conversation] = Relationship(back_populates="user")
37 |
38 |
39 | class Message(SQLModel, table=True):
40 | """
41 | A single message.
42 | Linked back to the conversation through an ID.
43 | """
44 |
45 | id: Optional[int] = Field(default=None, primary_key=True)
46 | conversation_id: int = Field(foreign_key="conversation.id")
47 | role: str
48 | content: str = Field(sa_column=Column(TEXT))
49 | conversation: Optional[Conversation] = Relationship(back_populates="messages")
50 |
51 | def to_dict(self):
52 | """Convert a message to a dict."""
53 | return {"role": self.role, "content": self.content}
54 |
55 |
56 | # The following sets up a nice feature of FastAPI/SQLModel, where we can tell FastAPI
57 | # what data exactly to include or exclude in the response it sends to the client.
58 | # Here, we tell it that we'd like it to add the list of messages to the response,
59 | # instead of just returning the conversation ID.
60 | class ConversationWithMessagesList(BaseModel):
61 | """
62 | A conversation identified by a unique ID, with messages included as a list.
63 | This duplicates the Conversation type, and is needed so that FastAPI
64 | includes the list of messages in the return value.
65 | """
66 |
67 | id: int
68 | messages: List[Message] = []
69 |
--------------------------------------------------------------------------------
/backend/tacheles_backend/tacheles_backend.py:
--------------------------------------------------------------------------------
1 | import os
2 | from uuid import uuid4
3 |
4 | from fastapi import FastAPI
5 | from fastapi.middleware.cors import CORSMiddleware
6 | from fastapi.staticfiles import StaticFiles
7 | from starlette.middleware.sessions import SessionMiddleware
8 |
9 | from .api.routes import router
10 | from .models.database import create_db_and_tables
11 |
12 | # Here we create and set up the FastAPI, and pull together all the components
13 | # defined in api/routes.py and models/models.py
14 |
15 | # First, we create the FastAPI app
16 | app = FastAPI(
17 | title="Tacheles API",
18 | description="API backend for Tacheles, a blueprint for LLM chat applications.",
19 | version="1.0.0",
20 | )
21 |
22 | # Then we set up a session middleware. We use this for some basic session management
23 | # and authentication.
24 | app.add_middleware(SessionMiddleware, secret_key=uuid4().hex)
25 |
26 | # We set up CORS, by default allowing all origins. This is useful for development,
27 | # but you should restrict this to your frontend domain in production.
28 | origins = [
29 | # "http://localhost:3000", # This allows the frontend during development
30 | # "https://example.com", # Add your real production domain here
31 | "*", # Or allow all origins, effectively disabling CORS
32 | ]
33 |
34 | app.add_middleware(
35 | CORSMiddleware,
36 | allow_origins=origins,
37 | allow_credentials=True,
38 | allow_methods=["*"],
39 | allow_headers=["*"],
40 | )
41 |
42 | # We mount the API routes from `api/routes.py`
43 | app.include_router(router, tags=["api"])
44 |
45 | # Optionally, we mount the compiled frontend as a static directory
46 | if os.environ.get("HOST_FRONTEND_PATH", False):
47 | app.mount(
48 | "/",
49 | StaticFiles(
50 | directory=os.environ.get("HOST_FRONTEND_PATH"),
51 | html=True,
52 | check_dir=False,
53 | ),
54 | name="frontend",
55 | )
56 |
57 |
58 | # Create databases on startup
59 | # This is enabled here as a convenience for development.
60 | # In practice, you would set up databases manually, or
61 | # use `python -m tacheles_backend.models.database` to create them.
62 | @app.on_event("startup")
63 | def on_startup():
64 | create_db_and_tables()
65 |
66 |
67 | if __name__ == "__main__":
68 | import uvicorn
69 |
70 | uvicorn.run(app, host="0.0.0.0", port=8000)
71 |
--------------------------------------------------------------------------------
/backend/tacheles_backend/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgerstgrasser/tacheles/9331abc87bcadc47c8f3b8c7f9e7ff5eafd8f01b/backend/tacheles_backend/utils/__init__.py
--------------------------------------------------------------------------------
/backend/tacheles_backend/utils/logging.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 |
4 | def get_logger(name: str) -> logging.Logger:
5 | # To keep things simple, we just use the uvicorn logger for now.
6 | # This respects the --log-level argument passed to uvicorn, so
7 | # we don't clutter logs with debug messages in production.
8 | # In a real application, you can use this function to set up
9 | # more sophisticated logging, e.g. to log to a file.
10 | return logging.getLogger("uvicorn.error")
11 |
--------------------------------------------------------------------------------
/backend/tests/test_backend.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import sys
4 | from dataclasses import dataclass
5 |
6 | import pytest
7 | from fastapi.testclient import TestClient
8 | from sqlmodel import Session, SQLModel, create_engine
9 | from sqlmodel.pool import StaticPool
10 |
11 | # Here we define tests for the backend.
12 |
13 | # First, we need to add the backend directory to the Python path.
14 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
15 | from tacheles_backend.models.database import get_db # noqa
16 | from tacheles_backend.tacheles_backend import app # noqa
17 |
18 | # Then, we define a few classes so we can mock responses from the inference API.
19 |
20 |
21 | @dataclass
22 | class MockDelta:
23 | content: str
24 |
25 |
26 | @dataclass
27 | class MockChoice:
28 | delta: MockDelta
29 | index: int
30 | finish_reason: str
31 |
32 |
33 | @dataclass
34 | class MockResponse:
35 | choices: list[MockChoice]
36 |
37 |
38 | # Then, we set up a test database. We use an in-memory SQLite database for testing.
39 | @pytest.fixture(name="session")
40 | def initialize_test_database():
41 | # Set up the test database before each test
42 | # Create a test database for testing
43 | test_engine = create_engine(
44 | "sqlite://",
45 | connect_args={"check_same_thread": False},
46 | poolclass=StaticPool,
47 | )
48 | # Create tables in the test database
49 | SQLModel.metadata.create_all(test_engine)
50 | yield Session(test_engine)
51 | # Clean up the test database after each test
52 | SQLModel.metadata.drop_all(test_engine)
53 |
54 |
55 | # And we set up a test client for the FastAPI app.
56 | @pytest.fixture(name="client")
57 | def get_client(session: Session):
58 | # Create a TestClient
59 | client = TestClient(app)
60 |
61 | # Here we also override the get_db dependency to use the test database.
62 | def override():
63 | return session
64 |
65 | app.dependency_overrides[get_db] = override
66 | return client
67 |
68 |
69 | # Basic tests: Check we can create a new user and conversation.
70 | def test_new_user(client: TestClient):
71 | response = client.post("/api/new_user")
72 | assert response.status_code == 200
73 | assert "id" in response.json()
74 |
75 |
76 | def test_new_conversation(client: TestClient):
77 | # Create a new user
78 | user_response = client.post("/api/new_user")
79 | user_id = user_response.json()["id"]
80 |
81 | # Create a new conversation
82 | conversation_response = client.post("/api/new_conversation", json={"id": user_id})
83 | assert conversation_response.status_code == 200
84 | assert "id" in conversation_response.json()
85 | assert conversation_response.json()["messages"] == []
86 |
87 |
88 | # Slightly more complex: Test the chat endpoint.
89 | # We mock a streaming response, and check we receive the correct chunks.
90 | def test_chat(mocker, client: TestClient):
91 | # Mock the OpenAI client
92 | mock_openai = mocker.patch("tacheles_backend.api.routes.client")
93 |
94 | # Create a new user
95 | user_response = client.post("/api/new_user")
96 | user_id = user_response.json()["id"]
97 |
98 | # Create a new conversation
99 | conversation_response = client.post("/api/new_conversation", json={"id": user_id})
100 | conversation_id = conversation_response.json()["id"]
101 |
102 | # Send a chat message
103 | message = {"role": "user", "content": "Hello"}
104 | mock_openai.chat.completions.create.return_value = iter(
105 | [
106 | MockResponse(
107 | choices=[
108 | MockChoice(
109 | delta=MockDelta(content="Hello"), index=0, finish_reason=None
110 | )
111 | ]
112 | ),
113 | MockResponse(
114 | choices=[
115 | MockChoice(
116 | delta=MockDelta(content=" there!"),
117 | index=0,
118 | finish_reason="stop",
119 | )
120 | ]
121 | ),
122 | ]
123 | )
124 |
125 | chat_response = client.post(
126 | "/api/chat",
127 | json={
128 | "conversation_id": conversation_id,
129 | "role": message["role"],
130 | "content": message["content"],
131 | },
132 | )
133 |
134 | assert chat_response.status_code == 200
135 |
136 | # Parse the JSON-encoded chunks and concatenate the content
137 | content = ""
138 | for chunk in chat_response.iter_lines():
139 | chunk_data = json.loads(chunk)
140 | if chunk_data["type"] == "content":
141 | content += chunk_data["data"]
142 |
143 | assert "Hello there!" == content
144 |
145 |
146 | # No we check that we can get the list of conversations and messages.
147 | # For these we first create conversations / messages, and then check we
148 | # can retrieve them.
149 | def test_get_conversations(client: TestClient):
150 | user_id = client.post("/api/new_user").json()["id"]
151 | conversation1 = client.post("/api/new_conversation", json={"id": user_id}).json()
152 | conversation2 = client.post("/api/new_conversation", json={"id": user_id}).json()
153 |
154 | response = client.get(f"/api/conversations/{user_id}")
155 | assert response.status_code == 200
156 | assert len(response.json()) == 2
157 | assert response.json()[0]["id"] == conversation1["id"]
158 | assert response.json()[1]["id"] == conversation2["id"]
159 |
160 |
161 | def test_get_conversation_messages(client: TestClient, mocker):
162 | # Mock the OpenAI client
163 | mock_openai = mocker.patch("tacheles_backend.api.routes.client")
164 |
165 | # Create a new user
166 | user_response = client.post("/api/new_user")
167 | user_id = user_response.json()["id"]
168 |
169 | # Create a new conversation
170 | conversation_response = client.post("/api/new_conversation", json={"id": user_id})
171 | conversation_id = conversation_response.json()["id"]
172 |
173 | # Send a chat message
174 | message = {"role": "user", "content": "Hello"}
175 | mock_openai.chat.completions.create.return_value = iter(
176 | [
177 | MockResponse(
178 | choices=[
179 | MockChoice(
180 | delta=MockDelta(content="Hello there!"),
181 | index=0,
182 | finish_reason=None,
183 | )
184 | ]
185 | ),
186 | ]
187 | )
188 |
189 | client.post(
190 | "/api/chat",
191 | json={
192 | "conversation_id": conversation_id,
193 | "role": message["role"],
194 | "content": message["content"],
195 | },
196 | )
197 |
198 | response = client.get(f"/api/conversations/{conversation_id}/messages")
199 | assert response.status_code == 200
200 | assert len(response.json()) == 2
201 | assert response.json()[0]["content"] == "Hello"
202 | assert response.json()[1]["content"] == "Hello there!"
203 |
--------------------------------------------------------------------------------
/docker-compose.dev.mock.yaml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | inference:
4 | build:
5 | context: ./inference
6 | dockerfile: Dockerfile.mock
7 | ports:
8 | - "8000:8000"
9 |
10 | backend:
11 | build:
12 | context: ./backend
13 | dockerfile: Dockerfile.dev
14 | environment:
15 | INFERENCE_API_URI: http://inference:8000/v1
16 | MODEL: ${MODEL}
17 | ports:
18 | - "8001:8001"
19 | - "5678:5678"
20 | volumes:
21 | - ./backend:/app
22 |
23 | frontend:
24 | build:
25 | context: ./frontend
26 | ports:
27 | - "3000:3000"
28 | volumes:
29 | - ./frontend/src:/app/src
30 | - ./frontend/public:/app/public
31 | depends_on:
32 | - backend
33 | environment:
34 | REACT_APP_BACKEND_URL: http://localhost:8001
35 |
--------------------------------------------------------------------------------
/docker-compose.dev.openai.yaml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | backend:
4 | build:
5 | context: ./backend
6 | dockerfile: Dockerfile.dev
7 | environment:
8 | MODEL: ${MODEL}
9 | OPENAI_API_KEY: ${OPENAI_API_KEY}
10 | ports:
11 | - "8001:8001"
12 | - "5678:5678"
13 | volumes:
14 | - ./backend:/app
15 |
16 | frontend:
17 | build:
18 | context: ./frontend
19 | ports:
20 | - "3000:3000"
21 | volumes:
22 | - ./frontend/src:/app/src
23 | - ./frontend/public:/app/public
24 | depends_on:
25 | - backend
26 | environment:
27 | REACT_APP_BACKEND_URL: http://localhost:8001
28 |
--------------------------------------------------------------------------------
/docker-compose.dev.yaml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | # # Using vllm
4 | # inference:
5 | # image: vllm/vllm-openai:latest
6 | # # build:
7 | # # context: ./inference
8 | # # dockerfile: Dockerfile.vllm
9 | # ports:
10 | # - "8000:8000"
11 | # # - "5679:5679"
12 | # deploy:
13 | # resources:
14 | # reservations:
15 | # devices:
16 | # - driver: nvidia
17 | # device_ids: [ '0' ]
18 | # # count: 1
19 | # capabilities: [ gpu ]
20 | # volumes:
21 | # - ~/.cache/huggingface:/root/.cache/huggingface
22 | # # Uncomment to use local model by mounting it inside the container
23 | # # - ${MODELPATH}:/model
24 | # environment:
25 | # - HF_TOKEN=${HF_TOKEN}
26 | # - MODEL=${MODEL}
27 | # # entrypoint:
28 | # # [
29 | # # "sh",
30 | # # "-c",
31 | # # "pip install debugpy -t /tmp && python3 /tmp/debugpy --listen 0.0.0.0:5679 -m vllm.entrypoints.openai.api_server --model=${MODEL} --port=8000 --tensor-parallel-size=${NUM_GPUS:-1} ${EXTRA_INFERENCE_ARGS:-}"
32 | # # ]
33 | # command: --model=${MODEL} --port=8000 --tensor-parallel-size=${NUM_GPUS:-1} ${EXTRA_INFERENCE_ARGS:-}
34 | # shm_size: '2gb'
35 |
36 | # Using sglang:
37 | inference:
38 | build:
39 | context: ./inference
40 | dockerfile: Dockerfile.sglang
41 | ports:
42 | - "8000:8000"
43 | - "5679:5679"
44 | deploy:
45 | resources:
46 | reservations:
47 | devices:
48 | - driver: nvidia
49 | count: 1
50 | # Alternatively specify specific GPU device IDs
51 | # device_ids: [ '2' ]
52 | capabilities: [gpu]
53 | volumes:
54 | - ~/.cache/huggingface:/root/.cache/huggingface
55 | # Uncomment to use local model by mounting it inside the container
56 | # - ${MODELPATH}:/model
57 | environment:
58 | - HF_TOKEN=${HF_TOKEN}
59 | - MODEL=${MODEL}
60 | # entrypoint:
61 | # [
62 | # "sh",
63 | # "-c",
64 | # "pip install debugpy -t /tmp && python3 /tmp/debugpy --listen 0.0.0.0:5679 -m sglang.launch_server --port 8000 --host 0.0.0.0 --model-path ${MODEL} --tp ${NUM_GPUS:-1} ${EXTRA_INFERENCE_ARGS:-}",
65 | # ]
66 | # command: --model=${MODEL} --port=8000 --tensor-parallel-size=${NUM_GPUS:-1} ${EXTRA_INFERENCE_ARGS:-}
67 | shm_size: "2gb"
68 |
69 | backend:
70 | build:
71 | context: ./backend
72 | dockerfile: Dockerfile.dev
73 | environment:
74 | INFERENCE_API_URI: http://inference:8000/v1
75 | MODEL: ${MODEL}
76 | ports:
77 | - "8001:8001"
78 | - "5678:5678"
79 | volumes:
80 | - ./backend:/app
81 |
82 | frontend:
83 | build:
84 | context: ./frontend
85 | ports:
86 | - "3000:3000"
87 | volumes:
88 | - ./frontend/src:/app/src
89 | - ./frontend/public:/app/public
90 | depends_on:
91 | - backend
92 | environment:
93 | REACT_APP_BACKEND_URL: http://localhost:8001
94 |
--------------------------------------------------------------------------------
/docker-compose.openai.yaml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | backend:
4 | build:
5 | context: ./backend
6 | additional_contexts:
7 | - frontend=./frontend
8 | environment:
9 | MODEL: ${MODEL}
10 | OPENAI_API_KEY: ${OPENAI_API_KEY}
11 | # Change here if you use an external database.
12 | DATABASE_URL: mysql+pymysql://myuser:mypassword@db/mydb
13 | ports:
14 | - "80:8001"
15 | depends_on:
16 | db:
17 | condition: service_healthy
18 |
19 | db:
20 | image: mysql:latest
21 | environment:
22 | # You'll want to change these values in production.
23 | MYSQL_DATABASE: mydb
24 | MYSQL_USER: myuser
25 | MYSQL_PASSWORD: mypassword
26 | MYSQL_ROOT_PASSWORD: rootpassword
27 | volumes:
28 | - /var/lib/mysql
29 | healthcheck:
30 | test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
31 | timeout: 20s
32 | retries: 10
33 |
--------------------------------------------------------------------------------
/docker-compose.test.yaml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | # This file is used for running end-to-end tests on github.
3 | services:
4 | inference:
5 | build:
6 | context: ./inference
7 | dockerfile: Dockerfile.mock
8 | network_mode: host
9 |
10 | backend:
11 | build:
12 | context: ./backend
13 | dockerfile: Dockerfile.dev
14 | environment:
15 | INFERENCE_API_URI: http://localhost:8000/v1
16 | MODEL: ${MODEL}
17 | volumes:
18 | - ./backend:/app
19 | depends_on:
20 | - inference
21 | healthcheck:
22 | test: ["CMD", "curl", "-f", "http://localhost:8001/api/healthcheck"]
23 | interval: 5s
24 | timeout: 5s
25 | retries: 5
26 | network_mode: host
27 |
28 | frontend:
29 | build:
30 | context: ./frontend
31 | volumes:
32 | - ./frontend/src:/app/src
33 | - ./frontend/public:/app/public
34 | depends_on:
35 | backend:
36 | condition: service_healthy
37 | network_mode: host
38 | environment:
39 | REACT_APP_BACKEND_URL: http://localhost:8001
40 | command: ["npm", "run", "test:e2e"]
41 |
--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | # # Using vllm
4 | # inference:
5 | # image: vllm/vllm-openai:latest
6 | # # build:
7 | # # context: ./inference
8 | # # dockerfile: Dockerfile.vllm
9 | # ports:
10 | # - "8000:8000"
11 | # deploy:
12 | # resources:
13 | # reservations:
14 | # devices:
15 | # - driver: nvidia
16 | # device_ids: [ '0' ]
17 | # # count: 1
18 | # capabilities: [ gpu ]
19 | # volumes:
20 | # - ~/.cache/huggingface:/root/.cache/huggingface
21 | # # Uncomment to use local model by mounting it inside the container
22 | # # - ${MODELPATH}:/model
23 | # environment:
24 | # - HF_TOKEN=${HF_TOKEN}
25 | # - MODEL=${MODEL}
26 | # command: --model=${MODEL} --port=8000 --tensor-parallel-size=${NUM_GPUS:-1} ${EXTRA_INFERENCE_ARGS:-}
27 | # shm_size: '2gb'
28 |
29 | # Using sglang:
30 | inference:
31 | build:
32 | context: ./inference
33 | dockerfile: Dockerfile.sglang
34 | ports:
35 | - "8000:8000"
36 | deploy:
37 | resources:
38 | reservations:
39 | devices:
40 | - driver: nvidia
41 | count: 1
42 | # Alternatively specify specific GPU device IDs
43 | # device_ids: [ '2' ]
44 | capabilities: [gpu]
45 | # In case you want to persist the HF cache, uncomment the following line.
46 | # volumes:
47 | # - ~/.cache/huggingface:/root/.cache/huggingface
48 | environment:
49 | - HF_TOKEN=${HF_TOKEN}
50 | - MODEL=${MODEL}
51 | shm_size: "2gb"
52 |
53 | backend:
54 | build:
55 | context: ./backend
56 | # We need to give an additional context here so the
57 | # backend can build the frontend code for static serving.
58 | additional_contexts:
59 | - frontend=./frontend
60 | environment:
61 | INFERENCE_API_URI: http://inference:8000/v1
62 | MODEL: ${MODEL}
63 | # Change here if you use an external database.
64 | DATABASE_URL: mysql+pymysql://myuser:mypassword@db/mydb
65 | ports:
66 | - "80:8001"
67 | # The following is important - if the backend starts before the DB
68 | # is ready, it can crash immediately.
69 | depends_on:
70 | db:
71 | condition: service_healthy
72 |
73 | db:
74 | image: mysql:latest
75 | environment:
76 | # You'll want to change these values in production.
77 | MYSQL_DATABASE: mydb
78 | MYSQL_USER: myuser
79 | MYSQL_PASSWORD: mypassword
80 | MYSQL_ROOT_PASSWORD: rootpassword
81 | volumes:
82 | - /var/lib/mysql
83 | healthcheck:
84 | test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
85 | timeout: 20s
86 | retries: 10
87 |
--------------------------------------------------------------------------------
/docs/architecture_dev_mode.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgerstgrasser/tacheles/9331abc87bcadc47c8f3b8c7f9e7ff5eafd8f01b/docs/screenshot.png
--------------------------------------------------------------------------------
/frontend/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": ["eslint:recommended", "plugin:react/recommended", "prettier"],
3 | "plugins": ["react"],
4 | "parserOptions": {
5 | "ecmaVersion": 2021,
6 | "sourceType": "module",
7 | "ecmaFeatures": {
8 | "jsx": true
9 | }
10 | },
11 | "env": {
12 | "browser": true,
13 | "node": true,
14 | "es6": true
15 | },
16 | "rules": {
17 | // Add any custom ESLint rules here
18 | }
19 | }
--------------------------------------------------------------------------------
/frontend/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 |
8 | # testing
9 | /coverage
10 |
11 | # production
12 | /build
13 |
14 | # misc
15 | .DS_Store
16 | .env.local
17 | .env.development.local
18 | .env.test.local
19 | .env.production.local
20 |
21 | npm-debug.log*
22 | yarn-debug.log*
23 | yarn-error.log*
24 |
--------------------------------------------------------------------------------
/frontend/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "singleQuote": true,
3 | "trailingComma": "es5",
4 | "semi": true,
5 | "tabWidth": 2,
6 | "printWidth": 100
7 | }
--------------------------------------------------------------------------------
/frontend/Dockerfile:
--------------------------------------------------------------------------------
1 | # A standard Dockerfile for a Node.js application
2 | # Note that we only really use this in development, see the backend Dockerfile for how we build the frontend for production.
3 | FROM node:20-alpine
4 | WORKDIR /app
5 | COPY package.json .
6 | RUN npm install
7 | COPY . .
8 | CMD ["npm", "start"]
--------------------------------------------------------------------------------
/frontend/README.md:
--------------------------------------------------------------------------------
1 | # Getting Started with Create React App
2 |
3 | This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
4 |
5 | ## Available Scripts
6 |
7 | In the project directory, you can run:
8 |
9 | ### `npm start`
10 |
11 | Runs the app in the development mode.\
12 | Open [http://localhost:3000](http://localhost:3000) to view it in your browser.
13 |
14 | The page will reload when you make changes.\
15 | You may also see any lint errors in the console.
16 |
17 | ### `npm test`
18 |
19 | Launches the test runner in the interactive watch mode.\
20 | See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
21 |
22 | ### `npm run build`
23 |
24 | Builds the app for production to the `build` folder.\
25 | It correctly bundles React in production mode and optimizes the build for the best performance.
26 |
27 | The build is minified and the filenames include the hashes.\
28 | Your app is ready to be deployed!
29 |
30 | See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
31 |
32 | ### `npm run eject`
33 |
34 | **Note: this is a one-way operation. Once you `eject`, you can't go back!**
35 |
36 | If you aren't satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
37 |
38 | Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you're on your own.
39 |
40 | You don't have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn't feel obligated to use this feature. However we understand that this tool wouldn't be useful if you couldn't customize it when you are ready for it.
41 |
42 | ## Learn More
43 |
44 | You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
45 |
46 | To learn React, check out the [React documentation](https://reactjs.org/).
47 |
48 | ### Code Splitting
49 |
50 | This section has moved here: [https://facebook.github.io/create-react-app/docs/code-splitting](https://facebook.github.io/create-react-app/docs/code-splitting)
51 |
52 | ### Analyzing the Bundle Size
53 |
54 | This section has moved here: [https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size](https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size)
55 |
56 | ### Making a Progressive Web App
57 |
58 | This section has moved here: [https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app](https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app)
59 |
60 | ### Advanced Configuration
61 |
62 | This section has moved here: [https://facebook.github.io/create-react-app/docs/advanced-configuration](https://facebook.github.io/create-react-app/docs/advanced-configuration)
63 |
64 | ### Deployment
65 |
66 | This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment)
67 |
68 | ### `npm run build` fails to minify
69 |
70 | This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify)
71 |
--------------------------------------------------------------------------------
/frontend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "frontend",
3 | "version": "0.1.0",
4 | "private": true,
5 | "dependencies": {
6 | "@ant-design/compatible": "^5.1.2",
7 | "antd": "^5.10.1",
8 | "react": "^18.2.0",
9 | "react-dom": "^18.2.0",
10 | "react-markdown": "^6.0.3",
11 | "react-scripts": "^5.0.1",
12 | "remark-gfm": "^1.0.0"
13 | },
14 | "scripts": {
15 | "start": "FRONTEND_MODE=dev react-scripts start",
16 | "build": "react-scripts build",
17 | "eject": "react-scripts eject",
18 | "lint": "eslint \"src/**/*.js\"",
19 | "format": "prettier --write \"src/**/*.js\"",
20 | "test": "react-scripts test --testPathIgnorePatterns __tests__e2e__ --watchAll=false",
21 | "test:e2e": "react-scripts test --testPathPattern __tests__e2e__ --watchAll=false"
22 | },
23 | "eslintConfig": {
24 | "extends": [
25 | "react-app",
26 | "react-app/jest"
27 | ]
28 | },
29 | "browserslist": {
30 | "production": [
31 | ">0.2%",
32 | "not dead",
33 | "not op_mini all"
34 | ],
35 | "development": [
36 | "last 1 chrome version",
37 | "last 1 firefox version",
38 | "last 1 safari version"
39 | ]
40 | },
41 | "devDependencies": {
42 | "@babel/preset-env": "^7.24.6",
43 | "@testing-library/jest-dom": "^6.4.5",
44 | "@testing-library/react": "^15.0.7",
45 | "@testing-library/user-event": "^14.5.2",
46 | "axios": "^1.7.2",
47 | "babel-jest": "^29.7.0",
48 | "eslint": "^8.57.0",
49 | "eslint-config-prettier": "^9.1.0",
50 | "eslint-plugin-react": "^7.34.1",
51 | "jest": "^29.7.0",
52 | "prettier": "^3.2.5",
53 | "react-test-renderer": "^18.3.1",
54 | "text-encoding": "^0.7.0",
55 | "web-streams-polyfill": "^4.0.0"
56 | },
57 | "jest": {
58 | "moduleNameMapper": {
59 | "^axios$": "axios/dist/node/axios.cjs"
60 | }
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/frontend/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgerstgrasser/tacheles/9331abc87bcadc47c8f3b8c7f9e7ff5eafd8f01b/frontend/public/favicon.ico
--------------------------------------------------------------------------------
/frontend/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
{children}33 |
{children}
57 | ),
58 | pre: CodeBlock,
59 | }}
60 | remarkPlugins={[gfm]}
61 | >
62 | {message.content}
63 |