├── .github
└── workflows
│ └── deploy-docs.yml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── DEVELOPMENT.md
├── LICENSE
├── Makefile
├── README.md
├── docs
├── CNAME
├── getting-started.md
├── images
│ ├── android-chrome-192x192.png
│ ├── android-chrome-512x512.png
│ ├── apple-touch-icon.png
│ ├── favicon-16x16.png
│ ├── favicon-32x32.png
│ ├── favicon.ico
│ ├── mtl-powered-by.png
│ ├── promptmage-logo.png
│ ├── screenshots
│ │ ├── api-docs.png
│ │ ├── gui-overview.png
│ │ ├── plaground-dark.png
│ │ ├── playground-empty.png
│ │ ├── playground-finished.png
│ │ ├── playground-run.png
│ │ ├── playground-step.png
│ │ ├── promptmage-example-flow-1.png
│ │ ├── promptmage-example-flow-2.png
│ │ ├── promptmage-example-flow-3.png
│ │ ├── promptmage-example-flow-4.png
│ │ ├── promptmage-example-flow-5.png
│ │ ├── promptmage-example-flow-6.png
│ │ ├── prompts-page.png
│ │ └── runs-page.png
│ └── site.webmanifest
├── index.md
├── license.md
├── reference.md
├── roadmap.md
├── stylesheets
│ ├── extra.css
│ └── hides.css
├── tutorial.md
└── walkthrough.md
├── examples
├── README.md
├── docker
│ ├── .dockerignore
│ ├── .env.example
│ ├── Dockerfile
│ ├── README.md
│ ├── requirements.txt
│ └── summarize_article_by_facts.py
├── minimal-example.py
├── multiflow.py
├── poetry.lock
├── pyproject.toml
├── summarize_article_by_facts.py
├── summarize_article_by_facts_remote.py
└── youtube_understanding.py
├── images
├── mtl-powered-by.png
└── promptmage-logo.png
├── mkdocs.yml
├── promptmage
├── __init__.py
├── api.py
├── cli.py
├── exceptions.py
├── frontend
│ ├── __init__.py
│ ├── components
│ │ ├── __init__.py
│ │ ├── dataset_page.py
│ │ ├── evaluation_page.py
│ │ ├── flow_page.py
│ │ ├── main_runner.py
│ │ ├── menu.py
│ │ ├── overview_page.py
│ │ ├── prompts_page.py
│ │ ├── runs_page.py
│ │ ├── step_runner.py
│ │ ├── styles.py
│ │ └── theme.py
│ └── frontend.py
├── mage.py
├── prompt.py
├── remote.py
├── result.py
├── run.py
├── run_data.py
├── static
│ ├── android-chrome-192x192.png
│ ├── android-chrome-512x512.png
│ ├── apple-touch-icon.png
│ ├── favicon-16x16.png
│ ├── favicon-32x32.png
│ ├── favicon.ico
│ ├── index.html
│ ├── promptmage-logo.png
│ └── site.webmanifest
├── step.py
├── storage
│ ├── __init__.py
│ ├── data_store.py
│ ├── file_backend.py
│ ├── memory_backend.py
│ ├── prompt_store.py
│ ├── remote_data_backend.py
│ ├── remote_prompt_backend.py
│ ├── sqlite_backend.py
│ ├── storage_backend.py
│ └── utils.py
└── utils.py
├── pyproject.toml
└── tests
├── __init__.py
├── conftest.py
├── minimal_example.py
├── test_api.py
├── test_mage.py
├── test_mage_step.py
├── test_sqlite_backend.py
└── tmp
└── .gitkeep
/.github/workflows/deploy-docs.yml:
--------------------------------------------------------------------------------
1 | name: ci
2 | on:
3 | push:
4 | branches:
5 | - main
6 | paths:
7 | - docs/**
8 | - mkdocs.yml
9 | permissions:
10 | contents: write
11 | jobs:
12 | deploy:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v3
16 | - uses: actions/setup-python@v4
17 | with:
18 | python-version: 3.x
19 | - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
20 | - uses: actions/cache@v3
21 | with:
22 | key: mkdocs-material-${{ env.cache_id }}
23 | path: .cache
24 | restore-keys: |
25 | mkdocs-material-
26 | - run: pip install mkdocs-material material-plausible-plugin
27 | - run: mkdocs build
28 | - run: mkdocs gh-deploy --force --clean
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.db
2 | *.py[cod]
3 | .web
4 | __pycache__/
5 | .env
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 |
2 | # Contributor Covenant Code of Conduct
3 |
4 | ## Our Pledge
5 |
6 | We as members, contributors, and leaders pledge to make participation in our
7 | community a harassment-free experience for everyone, regardless of age, body
8 | size, visible or invisible disability, ethnicity, sex characteristics, gender
9 | identity and expression, level of experience, education, socio-economic status,
10 | nationality, personal appearance, race, religion, or sexual identity
11 | and orientation.
12 |
13 | We pledge to act and interact in ways that contribute to an open, welcoming,
14 | diverse, inclusive, and healthy community.
15 |
16 | ## Our Standards
17 |
18 | Examples of behavior that contributes to a positive environment for our
19 | community include:
20 |
21 | * Demonstrating empathy and kindness toward other people
22 | * Being respectful of differing opinions, viewpoints, and experiences
23 | * Giving and gracefully accepting constructive feedback
24 | * Accepting responsibility and apologizing to those affected by our mistakes,
25 | and learning from the experience
26 | * Focusing on what is best not just for us as individuals, but for the
27 | overall community
28 |
29 | Examples of unacceptable behavior include:
30 |
31 | * The use of sexualized language or imagery, and sexual attention or
32 | advances of any kind
33 | * Trolling, insulting or derogatory comments, and personal or political attacks
34 | * Public or private harassment
35 | * Publishing others' private information, such as a physical or email
36 | address, without their explicit permission
37 | * Other conduct which could reasonably be considered inappropriate in a
38 | professional setting
39 |
40 | ## Enforcement Responsibilities
41 |
42 | Community leaders are responsible for clarifying and enforcing our standards of
43 | acceptable behavior and will take appropriate and fair corrective action in
44 | response to any behavior that they deem inappropriate, threatening, offensive,
45 | or harmful.
46 |
47 | Community leaders have the right and responsibility to remove, edit, or reject
48 | comments, commits, code, wiki edits, issues, and other contributions that are
49 | not aligned to this Code of Conduct, and will communicate reasons for moderation
50 | decisions when appropriate.
51 |
52 | ## Scope
53 |
54 | This Code of Conduct applies within all community spaces, and also applies when
55 | an individual is officially representing the community in public spaces.
56 | Examples of representing our community include using an official e-mail address,
57 | posting via an official social media account, or acting as an appointed
58 | representative at an online or offline event.
59 |
60 | ## Enforcement
61 |
62 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
63 | reported to the community leaders responsible for enforcement at
64 | [coc@tobiassterbak.com].
65 | All complaints will be reviewed and investigated promptly and fairly.
66 |
67 | All community leaders are obligated to respect the privacy and security of the
68 | reporter of any incident.
69 |
70 | ## Enforcement Guidelines
71 |
72 | Community leaders will follow these Community Impact Guidelines in determining
73 | the consequences for any action they deem in violation of this Code of Conduct:
74 |
75 | ### 1. Correction
76 |
77 | **Community Impact**: Use of inappropriate language or other behavior deemed
78 | unprofessional or unwelcome in the community.
79 |
80 | **Consequence**: A private, written warning from community leaders, providing
81 | clarity around the nature of the violation and an explanation of why the
82 | behavior was inappropriate. A public apology may be requested.
83 |
84 | ### 2. Warning
85 |
86 | **Community Impact**: A violation through a single incident or series
87 | of actions.
88 |
89 | **Consequence**: A warning with consequences for continued behavior. No
90 | interaction with the people involved, including unsolicited interaction with
91 | those enforcing the Code of Conduct, for a specified period of time. This
92 | includes avoiding interactions in community spaces as well as external channels
93 | like social media. Violating these terms may lead to a temporary or
94 | permanent ban.
95 |
96 | ### 3. Temporary Ban
97 |
98 | **Community Impact**: A serious violation of community standards, including
99 | sustained inappropriate behavior.
100 |
101 | **Consequence**: A temporary ban from any sort of interaction or public
102 | communication with the community for a specified period of time. No public or
103 | private interaction with the people involved, including unsolicited interaction
104 | with those enforcing the Code of Conduct, is allowed during this period.
105 | Violating these terms may lead to a permanent ban.
106 |
107 | ### 4. Permanent Ban
108 |
109 | **Community Impact**: Demonstrating a pattern of violation of community
110 | standards, including sustained inappropriate behavior, harassment of an
111 | individual, or aggression toward or disparagement of classes of individuals.
112 |
113 | **Consequence**: A permanent ban from any sort of public interaction within
114 | the community.
115 |
116 | ## Attribution
117 |
118 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
119 | version 2.0, available at
120 | [https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0].
121 |
122 | Community Impact Guidelines were inspired by
123 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC].
124 |
125 | For answers to common questions about this code of conduct, see the FAQ at
126 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available
127 | at [https://www.contributor-covenant.org/translations][translations].
128 |
129 | [homepage]: https://www.contributor-covenant.org
130 | [v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html
131 | [Mozilla CoC]: https://github.com/mozilla/diversity
132 | [FAQ]: https://www.contributor-covenant.org/faq
133 | [translations]: https://www.contributor-covenant.org/translations
134 |
--------------------------------------------------------------------------------
/DEVELOPMENT.md:
--------------------------------------------------------------------------------
1 | # Development
2 |
3 | This document provides an overview of the development process for PromptMage. It includes information on the project structure, the development environment, and the development workflow.
4 |
5 | ## Install development environment
6 |
7 | To install the development environment, follow these steps:
8 |
9 | 1. Clone the repository:
10 |
11 | ```bash
12 | git clone
13 | ```
14 |
15 | 2. Install the dependencies:
16 |
17 | PromptMage uses [Poetry](https://python-poetry.org/) to manage dependencies. To install the dependencies, run the following command:
18 |
19 | ```bash
20 | poetry install
21 | ```
22 |
23 | ## Run Promptmage in development mode
24 |
25 | To run PromptMage examples in development mode install the examples dependencies:
26 |
27 | ```bash
28 | cd examples
29 | poetry install
30 | ```
31 |
32 | Then run the examples:
33 |
34 | ```bash
35 | poetry run promptmage run summarize_article_by_facts.py
36 | ```
37 |
38 | ## Run tests
39 |
40 | To run the tests, run the following command:
41 |
42 | ```bash
43 | poetry run pytest .
44 | ```
45 |
46 | ## Style guide
47 |
48 | PromptMage uses [Black](https://black.readthedocs.io/en/stable/) to format the code. To format the code, run the following command:
49 |
50 | ```bash
51 | poetry run black .
52 | ```
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Tobias Sterbak
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: install test build publish docs-build docs-serve format
2 |
3 | install:
4 | poetry install
5 |
6 | format:
7 | poetry run black promptmage
8 |
9 | test:
10 | poetry run pytest tests --cov=promptmage --cov-report=term-missing
11 |
12 | docs-build:
13 | poetry run mkdocs build
14 |
15 | docs-serve:
16 | poetry run mkdocs serve
17 |
18 | build:
19 | poetry build
20 |
21 | publish:
22 | poetry publish --build
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
PromptMage
8 |
9 |
10 | simplifies the process of creating and managing LLM workflows as a self-hosted solution.
11 |
12 |
13 | [](https://github.com/tsterbak/promptmage/blob/main/LICENSE)
14 | [](https://pypi.org/project/promptmage/)
16 | [](https://pypi.org/project/promptmage/)
17 | [](https://github.com/tsterbak/promptmage/issues)
18 | [](https://github.com/tsterbak/promptmage/stargazers)
19 |
20 |
21 | > [!WARNING]
22 | > This application is currently in alpha state and under active development. Please be aware that the API and features may change at any time.
23 |
24 |
25 | ## About the Project
26 |
27 | PromptMage is a python framework to simplify the development of complex, multi-step applications based on LLMs. It is designed to offer an intuitive interface that simplifies the process of creating and managing LLM workflows as a self-hosted solution. PromptMage facilitates prompt testing and comparison, and incorporates version control features to help users track the development of their prompts. Suitable for both small teams and large enterprises, PromptMage seeks to improve productivity and foster the practical use of LLM technology.
28 |
29 | The approach with PromptMage is to provide a pragmatic solution that bridges the current gap in LLM workflow management. We aim to empower developers, researchers, and organizations by making LLM technology more accessible and manageable, thereby supporting the next wave of AI innovations.
30 |
31 | 
32 |
33 | Take the [walkthrough](https://promptmage.io/walkthrough/) to see what you can do with PromptMage.
34 |
35 | ## Philosophy
36 | - Integrate the prompt playground into your workflow for fast iteration
37 | - Prompts as first-class citizens with version control and collaboration features
38 | - Manual and automatic testing and validation of prompts
39 | - Easy sharing of results with domain experts and stakeholders
40 | - build-in, automatically created API with fastAPI for easy integration and deployment
41 | - Type-hint everything for automatic inference and validation magic
42 |
43 | ## Projects using PromptMage
44 |
45 | - [product-review-research](https://github.com/tsterbak/product-review-research): An AI webapp build with PromptMage to provide in-depth analysis for products by researching trustworthy online reviews.
46 |
47 | ## Getting Started
48 |
49 | ### Installation
50 |
51 | To install promptmage, run the following command:
52 |
53 | ```bash
54 | pip install promptmage
55 | ```
56 |
57 | ## Usage
58 |
59 | To use promptmage, run the following command:
60 |
61 | ```bash
62 | promptmage run
63 | ```
64 |
65 | This will start the local promptmage server and run the flow at the given path. You can now access the promptmage interface at `http://localhost:8000/gui/`.
66 |
67 | To run the remote backend server, run the following command:
68 |
69 | ```bash
70 | promptmage serve --port 8021
71 | ```
72 |
73 | To make it work with your promptmage script, you should add the following lines to your script:
74 |
75 | ```python
76 | from promptmage import PromptMage
77 |
78 | mage = PromptMage(remote="http://localhost:8021") # or the URL of your remote server
79 | ```
80 |
81 | Have a look at the examples in the [examples](https://github.com/tsterbak/promptmage/tree/main/examples) folder to see how to use promptmage in your application or workflow.
82 |
83 |
84 | ## Use with Docker
85 |
86 | You can find an usage example with docker here: [Docker example](https://github.com/tsterbak/promptmage/tree/main/examples/docker).
87 |
88 |
89 | ## Development
90 |
91 | To develop PromptMage, check out the [DEVELOPMENT.md](DEVELOPMENT.md) file.
92 |
93 | ## Contributing
94 |
95 | We welcome contributions from the community!
96 |
97 | If you're interested in improving PromptMage, you can contribute in the following ways:
98 | * **Reporting Bugs**: Submit an issue in our repository, providing a detailed description of the problem and steps to reproduce it.
99 | * **Improve documentation**: If you find any errors or have suggestions for improving the documentation, please submit an issue or a pull request.
100 | * **Fixing Bugs**: Check out our list of open issues and submit a pull request to fix any bugs you find.
101 | * **Feature Requests**: Have ideas on how to make PromptMage better? We'd love to hear from you! Please submit an issue, detailing your suggestions.
102 | * **Pull Requests**: Contributions via pull requests are highly appreciated. Please ensure your code adheres to the coding standards of the project, and submit a pull request with a clear description of your changes.
103 |
104 | To ensure a smooth contribution process, please follow these guidelines:
105 | * **create an issue before submitting a pull request** to discuss the changes you'd like to make. This helps us ensure that your contribution aligns with the project's goals and prevents duplicate work.
106 | * **follow the coding standards** of the project. Check the [DEVELOPMENT.md](DEVELOPMENT.md) file for more information.
107 |
108 | Make sure to check if your issue or PR has already been fixed or implemented before opening a new one!
109 |
110 | ## License
111 |
112 | This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details.
113 | Original development by [Tobias Sterbak](https://tobiassterbak.com). Copyright (C) 2024.
114 |
115 | ## Contact
116 | For any inquiries or further information, feel free to reach out at [promptmage@tobiassterbak.com](mailto:promptmage@tobiassterbak.com).
117 |
118 | ## ❤️ Acknowledgements
119 |
120 | This project was supported by
121 |
122 |
123 |
124 |
--------------------------------------------------------------------------------
/docs/CNAME:
--------------------------------------------------------------------------------
1 | promptmage.io
--------------------------------------------------------------------------------
/docs/getting-started.md:
--------------------------------------------------------------------------------
1 |
2 | # Getting Started
3 |
4 | ## Installation
5 |
6 | To install promptmage, run the following command:
7 |
8 | ```bash
9 | pip install promptmage
10 | ```
11 |
12 | ## Annotated Code Example
13 |
14 | Here is an example of how to use promptmage in your application:
15 |
16 | ``` python
17 | from promptmage import PromptMage, Prompt, MageResult
18 |
19 | # Create a new promptmage instance
20 | mage = PromptMage(#(1)!
21 | name="example",#(2)!
22 | )
23 | ```
24 |
25 | 1. The [`PromptMage`](/reference/#promptmage) class is the main class of promptmage. It is used store all the information about the flow and to run the flow.
26 | 2. The `name` parameter is used to give the promptmage instance a unique name. This allows to run multiple promptmage instances in parallel.
27 |
28 | Steps are the building blocks of a flow. They are used to define the different parts of the flow and to connect them together. A step is just a python function with the [`@mage.step()`](/reference/#promptmagestep) decorator which returns a [`MageResult`](/reference/#mageresult). Here is an example of how to create a step:
29 |
30 | ``` python
31 | @mage.step(
32 | name="step1", #(1)!
33 | prompt_name="prompt1", #(2)!
34 | initial=True #(3)!
35 | )
36 | def step1(question: str, prompt: Prompt) -> MageResult: #(4)!
37 | response = client.chat.completions.create( #(5)!
38 | model="gpt-4o-mini",
39 | messages=[
40 | {"role": "system", "content": prompt.system},
41 | {
42 | "role": "user",
43 | "content": prompt.user.format(question=question),
44 | },
45 | ],
46 | )
47 | answer = response.choices[0].message.content
48 | return MageResult(
49 | next_step=None, #(6)!
50 | result=answer
51 | )
52 | ```
53 |
54 | 1. The `name` parameter is used to give the step a unique name.
55 | 2. The `prompt_name` parameter is used to specify the name of the prompt that should be used for this step.
56 | 3. The `initial` parameter is used to specify if this is the initial step of the flow.
57 | 4. The `step1` function is a step that takes a question and a prompt as input and returns a [`MageResult`](/reference/#mageresult) with the result of the step and the name of the next step to run. The prompt is managed by the promptmage instance and is automatically passed to the step.
58 | 5. The step uses the OpenAI API to generate a response to the question using the prompt.
59 | 6. The `next_step` parameter is used to specify the name of the next step to run. If `None` is returned, the flow will stop.
60 |
61 |
62 | ## Usage
63 |
64 | Put the above code in a file called `flow.py` and setup the OpenAI client. To run the flow with promptmage, run the following command:
65 |
66 | ```bash
67 | promptmage run flow.py
68 | ```
69 |
70 | This will start the promptmage server and run the flow at the given path. You can now access the promptmage interface at `http://localhost:8000/gui/`.
71 |
72 | ## Usage with a remote backend server
73 |
74 | For a production setup and collaborative usage with teams you can run the promptmage server with a remote backend. To run the remote backend on a remote server, run the following command:
75 |
76 | ```bash
77 | promptmage serve --port 8021
78 | ```
79 |
80 | To connect your promptmage script to the remote backend, you need to add the `remote` url to the PromptMage instance of your script:
81 |
82 | ```python
83 | mage = PromptMage(
84 | name="example",
85 | remote="http://localhost:8021" #(1)!
86 | )
87 | ```
88 |
89 | Now you can run your script and the promptmage server will use the remote backend to run the flow and store the results.
90 |
91 | 1. The `remote` parameter is used to specify the URL of the remote backend to use. If this is set, the `PromptMage` instance will use the remote backend instead of the local one.
92 |
93 |
94 |
95 | ## GUI walkthrough
96 |
97 | The promptmage interface is divided into four main sections: the flow playground, the run history, the prompt repository, and the evaluation section.
98 |
99 | ### Flow playground
100 |
101 |
102 | { width="70%" }
103 | Initial flow playground for the example flow.
104 |
105 |
106 |
107 | { width="70%" }
108 | Edit the step prompt of step 1.
109 |
110 |
111 |
112 | { width="70%" }
113 | After the run you can see the execution graph and the results.
114 |
115 |
116 | ### Run history
117 |
118 |
119 | { width="70%" }
120 | Here you can see all your runs and the results.
121 |
122 |
123 |
124 | { width="70%" }
125 | By clicking on a run, you can look at the details.
126 |
127 |
128 | ### Prompt repository
129 |
130 |
131 | { width="70%" }
132 | You can see all your prompts and versions in the prompts repository.
133 |
134 |
135 |
136 | ## More examples
137 |
138 | Have a look at the examples in the [examples](https://github.com/tsterbak/promptmage/tree/main/examples) folder to see how to use promptmage in your application or workflow.
139 |
140 | ### Use with Docker
141 |
142 | You can find an usage example with docker here: [Docker example](https://github.com/tsterbak/promptmage/tree/main/examples/docker).
143 |
--------------------------------------------------------------------------------
/docs/images/android-chrome-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/android-chrome-192x192.png
--------------------------------------------------------------------------------
/docs/images/android-chrome-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/android-chrome-512x512.png
--------------------------------------------------------------------------------
/docs/images/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/apple-touch-icon.png
--------------------------------------------------------------------------------
/docs/images/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/favicon-16x16.png
--------------------------------------------------------------------------------
/docs/images/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/favicon-32x32.png
--------------------------------------------------------------------------------
/docs/images/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/favicon.ico
--------------------------------------------------------------------------------
/docs/images/mtl-powered-by.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/mtl-powered-by.png
--------------------------------------------------------------------------------
/docs/images/promptmage-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/promptmage-logo.png
--------------------------------------------------------------------------------
/docs/images/screenshots/api-docs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/api-docs.png
--------------------------------------------------------------------------------
/docs/images/screenshots/gui-overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/gui-overview.png
--------------------------------------------------------------------------------
/docs/images/screenshots/plaground-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/plaground-dark.png
--------------------------------------------------------------------------------
/docs/images/screenshots/playground-empty.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/playground-empty.png
--------------------------------------------------------------------------------
/docs/images/screenshots/playground-finished.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/playground-finished.png
--------------------------------------------------------------------------------
/docs/images/screenshots/playground-run.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/playground-run.png
--------------------------------------------------------------------------------
/docs/images/screenshots/playground-step.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/playground-step.png
--------------------------------------------------------------------------------
/docs/images/screenshots/promptmage-example-flow-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/promptmage-example-flow-1.png
--------------------------------------------------------------------------------
/docs/images/screenshots/promptmage-example-flow-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/promptmage-example-flow-2.png
--------------------------------------------------------------------------------
/docs/images/screenshots/promptmage-example-flow-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/promptmage-example-flow-3.png
--------------------------------------------------------------------------------
/docs/images/screenshots/promptmage-example-flow-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/promptmage-example-flow-4.png
--------------------------------------------------------------------------------
/docs/images/screenshots/promptmage-example-flow-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/promptmage-example-flow-5.png
--------------------------------------------------------------------------------
/docs/images/screenshots/promptmage-example-flow-6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/promptmage-example-flow-6.png
--------------------------------------------------------------------------------
/docs/images/screenshots/prompts-page.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/prompts-page.png
--------------------------------------------------------------------------------
/docs/images/screenshots/runs-page.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/docs/images/screenshots/runs-page.png
--------------------------------------------------------------------------------
/docs/images/site.webmanifest:
--------------------------------------------------------------------------------
1 | {"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: PromptMage
3 | summary: PromptMage simplifies the process of creating and managing LLM workflows as a self-hosted solution.
4 | date: 2024-08-23
5 | authors:
6 | - Tobias Sterbak
7 | hide:
8 | - navigation
9 | extra:
10 | class: hide-title
11 | ---
12 |
13 |
14 |
15 |
16 |
17 |
18 |
Welcome to PromptMage
19 |
20 | PromptMage is a python framework to simplify the development of complex, multi-step applications based on LLMs.
21 |
22 |
Get Started
23 |
Learn More
24 |
25 |
26 |
27 | !!! warning "WARNING"
28 |
29 | This application is currently in alpha state and under active development. Please be aware that the API and features may change at any time.
30 |
31 |
32 |
33 |
34 | - :material-clock-fast:{ .lg .middle } __Set up in 5 minutes__
35 |
36 | ---
37 |
38 | Get PromptMage up and running quickly with simple installation steps. Deploy locally or on your server with ease.
39 |
40 | [:octicons-arrow-right-24: Getting started](getting-started)
41 |
42 | - :fontawesome-brands-github:{ .lg .middle } __Version Control Built-in__
43 |
44 | ---
45 |
46 | Track prompt development with integrated version control, making collaboration and iteration seamless.
47 |
48 | [:octicons-arrow-right-24: Learn more](/getting-started/#prompt-repository)
49 |
50 | - :material-play-box:{ .lg .middle } __Prompt Playground__
51 |
52 | ---
53 |
54 | Test, compare, and refine prompts in an intuitive interface designed for rapid iteration.
55 |
56 | [:octicons-arrow-right-24: Playground](/getting-started/#flow-playground)
57 |
58 | - :material-api:{ .lg .middle } __Auto-generated API__
59 |
60 | ---
61 |
62 | Leverage a FastAPI-powered, automatically created API for easy integration and deployment.
63 |
64 | [:octicons-arrow-right-24: API Documentation](#)
65 |
66 | - :material-check-decagram:{ .lg .middle } __Evaluation Mode__
67 |
68 | ---
69 |
70 | Assess prompt performance through manual and automatic testing, ensuring reliability before deployment.
71 |
72 | [:octicons-arrow-right-24: Evaluation Guide](#)
73 |
74 | - :material-update:{ .lg .middle } __More to Come__
75 |
76 | ---
77 |
78 | Stay tuned for upcoming features and enhancements as we continue to evolve PromptMage.
79 |
80 | [:octicons-arrow-right-24: Roadmap](roadmap)
81 |
82 |
83 |
84 |
85 |
86 | ## About the Project
87 |
88 | PromptMage is a python framework to simplify the development of complex, multi-step applications based on LLMs. It is designed to offer an intuitive interface that simplifies the process of creating and managing LLM workflows as a self-hosted solution. PromptMage facilitates prompt testing and comparison, and incorporates version control features to help users track the development of their prompts. Suitable for both small teams and large enterprises, PromptMage seeks to improve productivity and foster the practical use of LLM technology.
89 |
90 | The approach with PromptMage is to provide a pragmatic solution that bridges the current gap in LLM workflow management. We aim to empower developers, researchers, and organizations by making LLM technology more accessible and manageable, thereby supporting the next wave of AI innovations.
91 |
92 | Take the [walkthrough](walkthrough.md) to see what you can do with PromptMage.
93 |
94 | ## Philosophy
95 | - Integrate the prompt playground into your workflow for fast iteration
96 | - Prompts as first-class citizens with version control and collaboration features
97 | - Manual and automatic testing and validation of prompts
98 | - Easy sharing of results with domain experts and stakeholders
99 | - build-in, automatically created API with fastAPI for easy integration and deployment
100 | - Type-hint everything for automatic inference and validation magic
101 |
102 | ## Projects using PromptMage
103 |
104 | - [product-review-research](https://github.com/tsterbak/product-review-research): An AI webapp build with PromptMage to provide in-depth analysis for products by researching trustworthy online reviews.
105 |
106 | ## Development
107 |
108 | To develop PromptMage, check out the [DEVELOPMENT.md](https://github.com/tsterbak/promptmage/blob/main/DEVELOPMENT.md) file.
109 |
110 | ## Contributing
111 |
112 | We welcome contributions from the community!
113 |
114 | If you're interested in improving PromptMage, you can contribute in the following ways:
115 | * **Reporting Bugs**: Submit an issue in our repository, providing a detailed description of the problem and steps to reproduce it.
116 | * **Improve documentation**: If you find any errors or have suggestions for improving the documentation, please submit an issue or a pull request.
117 | * **Fixing Bugs**: Check out our list of open issues and submit a pull request to fix any bugs you find.
118 | * **Feature Requests**: Have ideas on how to make PromptMage better? We'd love to hear from you! Please submit an issue, detailing your suggestions.
119 | * **Pull Requests**: Contributions via pull requests are highly appreciated. Please ensure your code adheres to the coding standards of the project, and submit a pull request with a clear description of your changes.
120 |
121 | To ensure a smooth contribution process, please follow these guidelines:
122 | * **create an issue before submitting a pull request** to discuss the changes you'd like to make. This helps us ensure that your contribution aligns with the project's goals and prevents duplicate work.
123 | * **follow the coding standards** of the project. Check the [DEVELOPMENT.md](https://github.com/tsterbak/promptmage/blob/main/DEVELOPMENT.md) file for more information.
124 |
125 | Make sure to check if your issue or PR has already been fixed or implemented before opening a new one!
126 |
127 |
128 | ## Contact
129 | For any inquiries or further information, feel free to reach out at [promptmage@tobiassterbak.com](mailto:promptmage@tobiassterbak.com).
130 |
131 | ## ❤️ Acknowledgements
132 |
133 | This project was supported by
134 |
135 |
136 |
137 |
--------------------------------------------------------------------------------
/docs/license.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: PromptMage - License
3 | summary: PromptMage is licensed under the MIT License.
4 | date: 2024-08-23
5 | authors:
6 | - Tobias Sterbak
7 | hide:
8 | - navigation
9 | ---
10 | # License
11 |
12 | This project is licensed under the MIT License - see the [LICENSE.md](https://github.com/tsterbak/promptmage/blob/main/LICENSE.md) file for details.
13 | Original development by [Tobias Sterbak](https://tobiassterbak.com).
--------------------------------------------------------------------------------
/docs/reference.md:
--------------------------------------------------------------------------------
1 | # API Reference
2 |
3 | This page contains the API reference with the most important classes and methods of promptmage.
4 |
5 |
6 | ## PromptMage CLI
7 |
8 | The `promptmage` CLI is the command line interface to run the promptmage server and interact with the promptmage backend.
9 |
10 | ### version
11 | Show the installed promptmage version.
12 |
13 | Usage:
14 | ```bash
15 | promptmage version
16 | ```
17 |
18 | ### run
19 | Run a flow with the given path. A flow is a python script that defines the flow of the promptmage application.
20 |
21 | Usage:
22 | ```bash
23 | promptmage run
24 | ```
25 |
26 | Available options:
27 | - **`--port`** (`int`):
28 | The port to run the server on. Default is `8000`.
29 | - **`--host`** (`str`):
30 | The host to run the server on. Default is `localhost`.
31 |
32 | ### serve
33 | Start the promptmage backend server.
34 |
35 | Usage:
36 | ```bash
37 | promptmage serve
38 | ```
39 |
40 | Available options:
41 | - **`--port`** (`int`):
42 | The port to run the server on. Default is `8021`.
43 | - **`--host`** (`str`):
44 | The host to run the server on. Default is `localhost`.
45 |
46 | ### export
47 | Export the promptmage database to json.
48 |
49 | Usage:
50 | ```bash
51 | promptmage export --filename
52 | ```
53 |
54 | Available options:
55 | - **`--filename`** (`str`):
56 | The filename to export the database to.
57 | - **`--runs`** (`bool`):
58 | Whether to export the runs as well. Default is `False`.
59 | - **`--prompts`** (`bool`):
60 | Whether to export the prompts as well. Default is `False`.
61 |
62 | ### backup
63 | Backup the promptmage database to a json file.
64 |
65 | Usage:
66 | ```bash
67 | promptmage backup --json_path
68 | ```
69 |
70 | Available options:
71 | - **`--json_path`** (`str`):
72 | The path to the json file to backup the database to.
73 |
74 | ### restore
75 | Restore the promptmage database from a json file.
76 |
77 | !!! warning
78 |
79 | This will ask for confirmation before restoring and will overwrite the current database.
80 |
81 | Usage:
82 | ```bash
83 | promptmage restore --json_path
84 | ```
85 |
86 | Available options:
87 | - **`--json_path`** (`str`):
88 | The path to the json file to restore the database from.
89 |
90 |
91 | ## PromptMage `class`
92 |
93 | The `PromptMage` class is the main class of promptmage. It is used store all the information about the flow and to run the flow.
94 |
95 | ### Attributes
96 |
97 | - **name** (`str`):
98 | The name of the `PromptMage` instance.
99 |
100 | - **remote** (`str`):
101 | The URL of the remote backend to use. If this is set, the `PromptMage` instance will use the remote backend instead of the local one.
102 |
103 | - **available_models** (`List[str]`):
104 | A list of available models to use for the flow.
105 |
106 | !!! info
107 |
108 | The available models are just strings that are passed to the step function to specify the model to use for the completion. You have to handle the model selection in the step function.
109 |
110 | ### Methods
111 |
112 | #### `PromptMage.step()` `decorator`
113 |
114 | Decorator to define a step in the flow.
115 |
116 | !!! tip
117 |
118 | A step is just a python function with the `@mage.step()` decorator which returns a `MageResult`.
119 |
120 | ##### Arguments
121 |
122 | - **name** (`str`):
123 | The name of the step.
124 |
125 | - **prompt_name** (`str`):
126 | The name of the prompt to use for this step.
127 |
128 | - **initial** (`bool`):
129 | Whether this is the initial step of the flow.
130 |
131 | - **one_to_many** (`bool`):
132 | Whether this step should be run for each item in the input list.
133 |
134 | - **many_to_one** (`bool`):
135 | Whether this step should be run for each item in the input list and the results should be combined.
136 |
137 | ---
138 |
139 | ## MageResult `class`
140 |
141 | The `MageResult` class is used to return the result of a step.
142 |
143 | ### Attributes
144 |
145 | - **next_step** (`str | None`):
146 | The name of the next step to run.
147 |
148 | - **error** (`str | None`):
149 | An error message if the step failed.
150 |
151 | - **\*\*kwargs** (`Any`):
152 | All additional keyword arguments are stored as the result by name and can be used by the next step.
153 |
154 | ---
155 |
156 | ## Prompt `class`
157 |
158 | The `Prompt` class is used to store the prompt information.
159 |
160 | !!! warning
161 |
162 | This class should not be created by the user. It is automatically created by the `PromptMage` instance and only used to pass the prompt to the step functions and retrieve the prompts from the database.
163 |
164 | ### Attributes
165 |
166 | - **system** (`str`):
167 | The system prompt.
168 |
169 | - **user** (`str`):
170 | The user prompt.
171 |
--------------------------------------------------------------------------------
/docs/roadmap.md:
--------------------------------------------------------------------------------
1 | # Roadmap
2 |
3 | ## 2024
4 |
5 | ### August
6 |
7 | - [x] Implement a dynamic execution graph for flows
8 | - [x] Implement an evaluation mode for applications
9 |
10 |
11 | ### September
12 |
13 | - [x] Implement a remote backend for PromptMage
14 | - [x] Improve error handling and reporting
15 |
16 | ### October
17 | - [ ] More complex use-case examples
18 | - [ ] Implement a robust task queue for LLM calls
19 |
20 | ### November
21 |
22 | - [ ] Implement automatic evaluation with llm-as-a-judge
23 |
24 | ### December
25 |
26 | - [ ] more to come!
27 |
28 | ## 2025
29 |
--------------------------------------------------------------------------------
/docs/stylesheets/extra.css:
--------------------------------------------------------------------------------
1 | [data-md-color-scheme="slate"] {
2 | --md-primary-fg-color: #166088;
3 | }
4 |
5 | [data-md-color-scheme="default"] {
6 | --md-primary-fg-color: #166088;
7 | }
8 |
9 | /* Hero */
10 | .hero {
11 | display: flex;
12 | align-items: center;
13 | justify-content: space-between;
14 | background-color: var(--md-default-bg-color);
15 | margin-bottom: 40px;
16 | }
17 |
18 | .hero-image {
19 | flex: 0 0 70%;
20 | max-width: 70%;
21 | }
22 |
23 | .hero-image img {
24 | width: 100%;
25 | height: auto;
26 | border-radius: 4px;
27 | }
28 |
29 | .hero-content {
30 | flex: 1;
31 | padding-left: 20px;
32 | text-align: left;
33 | color: var(--md-default-fg-color);
34 | }
35 |
36 | .hero-content h1 {
37 | font-size: 2rem;
38 | margin-bottom: 20px;
39 | color: var(--md-primary-fg-color);
40 | }
41 |
42 | .hero-content p {
43 | margin-bottom: 30px;
44 | color: var(--md-secondary-fg-color);
45 | }
46 |
47 | .hero-content .button {
48 | display: inline-block;
49 | padding: 10px 20px;
50 | margin-right: 10px;
51 | font-size: 1rem;
52 | text-decoration: none;
53 | color: white;
54 | background-color: var(--md-primary-fg-color);
55 | border-radius: 5px;
56 | }
57 |
58 | .hero-content .button.secondary {
59 | background-color: var(--md-accent-fg-color);
60 | }
61 |
62 | .hero-content .button:hover {
63 | opacity: 0.8;
64 | }
65 |
66 | @media (max-width: 768px) {
67 | .hero {
68 | flex-direction: column;
69 | text-align: center;
70 | }
71 |
72 | .hero-image, .hero-content {
73 | max-width: 100%;
74 | padding: 0;
75 | }
76 |
77 | .hero-content {
78 | padding-top: 20px;
79 | }
80 | }
81 |
82 |
--------------------------------------------------------------------------------
/docs/stylesheets/hides.css:
--------------------------------------------------------------------------------
1 | /* Hide the title of the page */
2 | .hide-title article h1 {
3 | display: none !important;
4 | }
--------------------------------------------------------------------------------
/docs/tutorial.md:
--------------------------------------------------------------------------------
1 | # Tutorial
2 |
3 | Welcome to the PromptMage tutorial! This tutorial will guide you through the basics of PromptMage, and show you how integrate it into your own LLM project.
4 |
5 | ## Use case
6 |
7 | For this tutorial, we want to build a simple multi-step LLM application. It contains multiple dependent steps, where the output of one step is used as the input for the next step. The application will be used to summarize an input text with extracting facts to summarize from.
8 |
9 | The application will have the following steps:
10 |
11 | - Step 1: Extract facts from a given text
12 | - Step 2: Summarize the text using the extracted facts
13 |
14 | We assume all the steps are implemented as separate Python functions that take input and return output in one python file `summarizer.py`.
15 |
16 | ## Step 1: Install PromptMage
17 |
18 | First, we need to install PromptMage. You can install PromptMage using pip:
19 |
20 | ```bash
21 | pip install promptmage
22 | ```
23 |
24 | It is recommended to install PromptMage in a virtual environment to avoid conflicts with other packages.
25 |
26 | ## Step 2: Add PromptMage to your project
27 |
28 | First, you need to add PromptMage to your project. You do that by adding the following to your `summarizer.py` file:
29 |
30 | ```python
31 | # Create a new PromptMage instance
32 | mage = PromptMage(name="fact-summarizer")
33 | ```
34 |
35 | Next, you need to define the prompts and dependencies between the steps. You can do that by adding the following code to the functions in the `summarizer.py` file:
36 |
37 | ```python
38 | @mage.step(name="extract", prompt_name="extract_facts", initial=True)
39 | def extract_facts(article: str, prompt: Prompt) -> str:
40 | #
41 | return MageResult(facts=facts, next_step="summarize")
42 | ```
43 |
44 | As a first step, this needs to be the initial step, so we set the `initial` parameter to `True`. This will be the first step that is executed when the application is run. Every step needs to return a `MageResult` object, which contains the output of the step and the name of the next step to be executed. In this case, the next step is the `summarize` step. Note, that you can also return a list of `MageResult` objects if you want to execute multiple steps in parallel.
45 |
46 | ```python
47 | @mage.step(name="summarize", prompt_name="summarize_facts")
48 | def summarize_facts(facts: str, prompt: Prompt) -> str:
49 | #
50 | return MageResult(summary=summary)
51 | ```
52 |
53 | If the next_step is not specified, the step will be considered a terminal step and the application will stop after executing this step.
54 |
55 | Now you can access the prompts within the step functions using the `prompt` argument. The `prompt` argument is an instance of the `Prompt` class, which provides methods to interact with the prompt.
56 | By default we have a system and a user prompt available by `prompt.system` and `prompt.user` respectively. The prompts are later created in the web UI.
57 |
58 | You don't need to worry about saving the prompts and data, PromptMage will take care of that for you.
59 |
60 | ## Step 3: Run the application
61 |
62 | Now you can run the application by
63 |
64 | ```bash
65 | promptmage run summarizer.py
66 | ```
67 |
68 | This will start the PromptMage web UI, where you can interact with the prompts and run and see the output of the steps.
69 | You can access the web UI at `http://localhost:8000/gui/`.
70 |
71 |
72 | More examples can be found in the [examples](https://github.com/tsterbak/promptmage/tree/main/examples) folder.
--------------------------------------------------------------------------------
/docs/walkthrough.md:
--------------------------------------------------------------------------------
1 | # Walkthrough
2 |
3 | ## Launching the application
4 |
5 | After you installed promptmage and added it to your project following the [tutorial](tutorial.md), you can now run the application and interact with it in the web UI.
6 |
7 | To run the application, you can use the following command:
8 |
9 | ```bash
10 | promptmage run summarizer.py
11 | ```
12 |
13 | This will start the promptmage server and run the application at the given path.
14 |
15 | ## Accessing the API
16 |
17 | PromptMage automatically creates an API for your application using FastAPI. You can access the API at `http://localhost:8000/api/` and the Swagger documentation at `http://localhost:8000/docs/`.
18 |
19 | 
20 |
21 | You can use the API to interact with your application programmatically or integrate it into other services.
22 |
23 | ## Interacting with the web UI
24 |
25 | You can access the web UI at `http://localhost:8000/gui/`. Here you can interact with the prompts and see the output of the steps.
26 |
27 | ### Application Overview
28 |
29 | The application overview shows all available flows.
30 |
31 | 
32 |
33 | ### Flow Overview
34 |
35 | The flow overview shows all steps of the flow and their status as well as an execution graph for the flow once executed.
36 |
37 | 
38 |
39 | ### Step interaction
40 |
41 | You can interact with the steps by clicking on them. This will expand the step and show the prompts and the output of the step.
42 | This also allows you to manually run the step and tweak the input and prompts.
43 |
44 | 
45 |
46 |
47 | ## Runs page
48 |
49 | The runs page shows all runs of the application and allows you to see the output of the steps for each run.
50 |
51 | 
52 |
53 | You can also replay runs to see the output of the steps and the prompts that were used during the run.
54 |
55 | ## Prompt repository
56 |
57 | The prompt repository allows you to manage your prompts. You can create new prompt versions, edit existing prompts, and delete prompts. You can also see the history of a prompt and see which runs used the prompt.
58 |
59 | 
60 |
61 |
62 | ## Conclusion
63 |
64 | This concludes the walkthrough of PromptMage. You have seen how to install and use PromptMage, how to create a simple application, and how to interact with the web UI. You can now integrate PromptMage into your workflow and use it to build and test your applications faster and more efficiently.
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | # PromptMage Examples
2 |
3 | This repository contains examples for using PromptMage in your application or workflow.
4 |
5 | ## Examples
6 |
7 | - Article Summarizer: A simple multi-step LLM application that extracts facts from a given text and summarizes the text using the extracted facts. [View Example](https://github.com/tsterbak/promptmage/blob/main/examples/summarize_article_by_facts.py)
8 |
9 | - Answer questions about YouTube videos: A multi-step LLM application that extracts information from a YouTube video and answers questions about the video. [View Example](https://github.com/tsterbak/promptmage/blob/main/examples/youtube_understanding.py)
10 |
11 | - Multi-flow example: An example that demonstrates how to use multiple flows in a single application. [View Example](https://github.com/tsterbak/promptmage/blob/main/examples/multiflow.py)
12 |
13 |
14 | ## Getting Started
15 |
16 | ### Installation
17 |
18 | Install the dependencies from the pyproject.toml file.
19 |
20 | ```bash
21 | poetry install
22 | ```
23 |
24 | ### Usage
25 |
26 | To use PromptMage, run the following command:
27 |
28 | ```bash
29 | poetry run promptmage run .py
30 | ```
31 |
32 |
33 | ## Docker Example
34 |
35 | You can find an usage example with docker here: [Docker example](https://github.com/tsterbak/promptmage/tree/main/examples/docker)
--------------------------------------------------------------------------------
/examples/docker/.dockerignore:
--------------------------------------------------------------------------------
1 | .env
--------------------------------------------------------------------------------
/examples/docker/.env.example:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
--------------------------------------------------------------------------------
/examples/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use Python 3.11 as the base image
2 | FROM python:3.11-slim
3 |
4 | # Set the working directory in the container
5 | WORKDIR /app
6 |
7 | # Copy the current directory contents into the container at /app
8 | COPY . /app
9 |
10 | # Install promptmage
11 | RUN pip install promptmage==0.1.3
12 |
13 | # Install any needed packages specified in requirements.txt
14 | RUN pip install --no-cache-dir -r requirements.txt
15 |
16 | # Make port 8000 available to the world outside this container
17 | EXPOSE 8000
18 |
19 | # Run app.py when the container launches
20 | CMD ["promptmage", "run", "summarize_article_by_facts.py"]
21 |
--------------------------------------------------------------------------------
/examples/docker/README.md:
--------------------------------------------------------------------------------
1 | # PromptMage with Docker Example
2 |
3 | This example demonstrates how to use PromptMage with Docker.
4 |
5 | ## Prerequisites
6 |
7 | - Install docker
8 | - Add .env file with the api keys you need. You can use the .env.example file as a template.
9 |
10 | ## Usage
11 |
12 | ### Build the Docker image
13 |
14 | ```bash
15 | docker build -t promptmage-example .
16 | ```
17 |
18 | ### Run the Docker container
19 |
20 | ```bash
21 | docker run -p 8000:8000 -v $(pwd)/.promptmage:/app/.promptmage --env-file ./.env promptmage-example
22 | ```
23 |
24 | ### Access the API
25 |
26 | ```bash
27 | http://localhost:8000
28 | ```
29 |
30 | ## License
31 |
32 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
33 |
34 |
--------------------------------------------------------------------------------
/examples/docker/requirements.txt:
--------------------------------------------------------------------------------
1 | openai==1.30.1
2 | python-dotenv==1.0.1
3 |
--------------------------------------------------------------------------------
/examples/docker/summarize_article_by_facts.py:
--------------------------------------------------------------------------------
1 | from dotenv import load_dotenv
2 | from openai import OpenAI
3 |
4 | from promptmage import PromptMage, Prompt, MageResult
5 | from promptmage.storage import (
6 | SQLitePromptBackend,
7 | SQLiteDataBackend,
8 | PromptStore,
9 | DataStore,
10 | )
11 |
12 |
13 | load_dotenv()
14 |
15 |
16 | client = OpenAI()
17 |
18 | # Setup the prompt store and data store
19 | prompt_store = PromptStore(backend=SQLitePromptBackend())
20 |
21 | data_store = DataStore(backend=SQLiteDataBackend())
22 |
23 | # Create a new PromptMage instance
24 | mage = PromptMage(
25 | name="fact-extraction", prompt_store=prompt_store, data_store=data_store
26 | )
27 |
28 |
29 | # Application code
30 |
31 |
32 | @mage.step(name="extract", prompt_name="extract_facts", initial=True)
33 | def extract_facts(article: str, prompt: Prompt) -> str:
34 | """Extract the facts as a bullet list from an article."""
35 | response = client.chat.completions.create(
36 | model="gpt-3.5-turbo-0125", # "llama3:instruct",
37 | messages=[
38 | {"role": "system", "content": prompt.system},
39 | {
40 | "role": "user",
41 | "content": prompt.user.format(article=article),
42 | },
43 | ],
44 | )
45 | return MageResult(next_step="summarize", facts=response.choices[0].message.content)
46 |
47 |
48 | @mage.step(name="summarize", prompt_name="summarize_facts")
49 | def summarize_facts(facts: str, prompt: Prompt) -> str:
50 | """Summarize the given facts as a single sentence."""
51 | response = client.chat.completions.create(
52 | model="gpt-3.5-turbo-0125", # "llama3:instruct",
53 | messages=[
54 | {"role": "system", "content": prompt.system},
55 | {
56 | "role": "user",
57 | "content": prompt.user.format(facts=facts),
58 | },
59 | ],
60 | )
61 | return MageResult(summary=response.choices[0].message.content)
62 |
--------------------------------------------------------------------------------
/examples/minimal-example.py:
--------------------------------------------------------------------------------
1 | from promptmage import PromptMage, Prompt, MageResult
2 |
3 | mage = PromptMage(
4 | name="example",
5 | )
6 |
7 |
8 | @mage.step(name="step1", prompt_name="prompt1", initial=True)
9 | def step1(question: str, prompt: Prompt) -> MageResult:
10 | answer = f"Answer to {question}"
11 | return MageResult(next_step=None, result=answer)
12 |
--------------------------------------------------------------------------------
/examples/multiflow.py:
--------------------------------------------------------------------------------
1 | from summarize_article_by_facts import mage as flow1
2 | from youtube_understanding import mage as flow2
3 |
4 | flow1
5 |
6 | flow2
7 |
--------------------------------------------------------------------------------
/examples/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "examples"
3 | version = "0.1.1"
4 | description = ""
5 | authors = ["Tobias Sterbak "]
6 | readme = "README.md"
7 |
8 | [tool.poetry.dependencies]
9 | python = "^3.11"
10 | openai = "^1.30.1"
11 | promptmage = {path = "../.", develop = true}
12 | python-dotenv = "^1.0.1"
13 | youtube-transcript-api = "^0.6.2"
14 |
15 |
16 | [build-system]
17 | requires = ["poetry-core"]
18 | build-backend = "poetry.core.masonry.api"
19 |
--------------------------------------------------------------------------------
/examples/summarize_article_by_facts.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import List
3 | from openai import OpenAI
4 | from dotenv import load_dotenv
5 |
6 | from promptmage import PromptMage, Prompt, MageResult
7 |
8 | load_dotenv()
9 |
10 |
11 | client = OpenAI()
12 |
13 | # Create a new PromptMage instance
14 | mage = PromptMage(
15 | name="fact-extraction",
16 | available_models=["gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4", "gpt-3.5-turbo"],
17 | )
18 |
19 |
20 | # Application code #
21 |
22 |
23 | @mage.step(name="extract", prompt_name="extract_facts", initial=True)
24 | def extract_facts(
25 | article: str, focus: str | None, prompt: Prompt, model: str = "gpt-4o-mini"
26 | ) -> List[MageResult]:
27 | """Extract the facts as a bullet list from an article."""
28 | response = client.chat.completions.create(
29 | model=model,
30 | messages=[
31 | {"role": "system", "content": prompt.system},
32 | {
33 | "role": "user",
34 | "content": prompt.user.format(article=article, focus=focus),
35 | },
36 | ],
37 | )
38 | raw_facts = response.choices[0].message.content
39 | raw_facts = raw_facts.replace("```json", "").strip("```").strip()
40 | return [
41 | MageResult(next_step="check_facts", fact=str(f)) for f in json.loads(raw_facts)
42 | ]
43 |
44 |
45 | @mage.step(
46 | name="check_facts",
47 | prompt_name="check_facts",
48 | )
49 | def check_facts(fact: str, prompt: Prompt, model: str = "gpt-4o-mini") -> MageResult:
50 | """Check the extracted facts for accuracy."""
51 | response = client.chat.completions.create(
52 | model="gpt-4o-mini",
53 | messages=[
54 | {"role": "system", "content": prompt.system},
55 | {
56 | "role": "user",
57 | "content": prompt.user.format(fact=fact),
58 | },
59 | ],
60 | )
61 | return MageResult(
62 | next_step="summarize",
63 | check_results=f"Fact: {fact}\n\nCheck result: {response.choices[0].message.content}",
64 | )
65 |
66 |
67 | @mage.step(
68 | name="summarize",
69 | prompt_name="summarize_facts",
70 | many_to_one=True,
71 | )
72 | def summarize_facts(check_results: str, prompt: Prompt) -> MageResult:
73 | """Summarize the given facts as a single sentence."""
74 | response = client.chat.completions.create(
75 | model="gpt-4o-mini",
76 | messages=[
77 | {"role": "system", "content": prompt.system},
78 | {
79 | "role": "user",
80 | "content": prompt.user.format(check_result=check_results),
81 | },
82 | ],
83 | )
84 | return MageResult(result=response.choices[0].message.content)
85 |
--------------------------------------------------------------------------------
/examples/summarize_article_by_facts_remote.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import List
3 | from openai import OpenAI
4 | from dotenv import load_dotenv
5 |
6 | from promptmage import PromptMage, Prompt, MageResult
7 |
8 | load_dotenv()
9 |
10 |
11 | client = OpenAI()
12 |
13 | # Create a new PromptMage instance
14 | mage = PromptMage(
15 | name="fact-extraction",
16 | available_models=["gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4", "gpt-3.5-turbo"],
17 | remote_url="http://localhost:8021",
18 | )
19 |
20 |
21 | # Application code #
22 |
23 |
24 | @mage.step(name="extract", prompt_name="extract_facts", initial=True)
25 | def extract_facts(
26 | article: str, focus: str | None, prompt: Prompt, model: str = "gpt-4o-mini"
27 | ) -> List[MageResult]:
28 | """Extract the facts as a bullet list from an article."""
29 | response = client.chat.completions.create(
30 | model=model,
31 | messages=[
32 | {"role": "system", "content": prompt.system},
33 | {
34 | "role": "user",
35 | "content": prompt.user.format(article=article, focus=focus),
36 | },
37 | ],
38 | )
39 | raw_facts = response.choices[0].message.content
40 | raw_facts = raw_facts.replace("```json", "").strip("```").strip()
41 | return [
42 | MageResult(next_step="check_facts", fact=str(f)) for f in json.loads(raw_facts)
43 | ]
44 |
45 |
46 | @mage.step(
47 | name="check_facts",
48 | prompt_name="check_facts",
49 | )
50 | def check_facts(fact: str, prompt: Prompt, model: str = "gpt-4o-mini") -> MageResult:
51 | """Check the extracted facts for accuracy."""
52 | response = client.chat.completions.create(
53 | model="gpt-4o-mini",
54 | messages=[
55 | {"role": "system", "content": prompt.system},
56 | {
57 | "role": "user",
58 | "content": prompt.user.format(fact=fact),
59 | },
60 | ],
61 | )
62 | return MageResult(
63 | next_step="summarize",
64 | check_results=f"Fact: {fact}\n\nCheck result: {response.choices[0].message.content}",
65 | )
66 |
67 |
68 | @mage.step(
69 | name="summarize",
70 | prompt_name="summarize_facts",
71 | many_to_one=True,
72 | )
73 | def summarize_facts(check_results: str, prompt: Prompt) -> MageResult:
74 | """Summarize the given facts as a single sentence."""
75 | response = client.chat.completions.create(
76 | model="gpt-4o-mini",
77 | messages=[
78 | {"role": "system", "content": prompt.system},
79 | {
80 | "role": "user",
81 | "content": prompt.user.format(check_result=check_results),
82 | },
83 | ],
84 | )
85 | return MageResult(result=response.choices[0].message.content)
86 |
--------------------------------------------------------------------------------
/examples/youtube_understanding.py:
--------------------------------------------------------------------------------
1 | from dotenv import load_dotenv
2 | from openai import OpenAI
3 | from youtube_transcript_api import YouTubeTranscriptApi
4 | from youtube_transcript_api.formatters import TextFormatter
5 |
6 | from promptmage import PromptMage, Prompt, MageResult
7 | from promptmage.storage import (
8 | SQLitePromptBackend,
9 | SQLiteDataBackend,
10 | PromptStore,
11 | DataStore,
12 | )
13 |
14 |
15 | load_dotenv()
16 |
17 |
18 | client = OpenAI()
19 |
20 | # Setup the prompt store and data store
21 | prompt_store = PromptStore(backend=SQLitePromptBackend())
22 | data_store = DataStore(backend=SQLiteDataBackend())
23 |
24 | # Create a new PromptMage instance
25 | mage = PromptMage(
26 | name="youtube-understanding", prompt_store=prompt_store, data_store=data_store
27 | )
28 |
29 |
30 | @mage.step(name="get-transcript", initial=True)
31 | def get_transcript(video_id: str, question: str) -> str:
32 | """Get the transcript of a YouTube video.
33 |
34 | Uses the YouTube API to get the transcript of a video with youtube-transcript-api.
35 | """
36 | transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=["de"])
37 | transcript_text = TextFormatter().format_transcript(transcript)
38 | return MageResult(
39 | next_step=["create-outline", "extract-facts"],
40 | transcript=transcript_text,
41 | question=question,
42 | )
43 |
44 |
45 | @mage.step(name="create-outline", prompt_name="create-outline")
46 | def create_outline(transcript: str, question: str, prompt: Prompt) -> str:
47 | """Create an outline from the transcript of a YouTube video."""
48 | response = client.chat.completions.create(
49 | model="gpt-4o-mini",
50 | messages=[
51 | {"role": "system", "content": prompt.system},
52 | {"role": "user", "content": prompt.user.format(transcript=transcript)},
53 | ],
54 | )
55 | return MageResult(
56 | next_step="answer-question",
57 | outline=response.choices[0].message.content,
58 | question=question,
59 | )
60 |
61 |
62 | @mage.step(name="extract-facts", prompt_name="extract-facts")
63 | def extract_facts(transcript: str, question: str, prompt: Prompt) -> str:
64 | """Extract facts from the transcript of a YouTube video."""
65 | response = client.chat.completions.create(
66 | model="gpt-4o-mini",
67 | messages=[
68 | {"role": "system", "content": prompt.system},
69 | {"role": "user", "content": prompt.user.format(transcript=transcript)},
70 | ],
71 | )
72 | return MageResult(
73 | next_step="answer-question",
74 | facts=response.choices[0].message.content,
75 | )
76 |
77 |
78 | @mage.step(
79 | name="answer-question",
80 | prompt_name="answer-question",
81 | )
82 | def answer_question(outline: str, facts: str, question: str, prompt: Prompt) -> str:
83 | """Answer a question based on the outline of a YouTube video."""
84 | response = client.chat.completions.create(
85 | model="gpt-4o-mini",
86 | messages=[
87 | {"role": "system", "content": prompt.system},
88 | {
89 | "role": "user",
90 | "content": prompt.user.format(
91 | outline=outline, facts=facts, question=question
92 | ),
93 | },
94 | ],
95 | )
96 | return MageResult(answer=response.choices[0].message.content)
97 |
--------------------------------------------------------------------------------
/images/mtl-powered-by.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/images/mtl-powered-by.png
--------------------------------------------------------------------------------
/images/promptmage-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/images/promptmage-logo.png
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: PromptMage
2 | site_url: https://promptmage.io/
3 | site_description: PromptMage
4 |
5 | repo_url: https://github.com/tsterbak/promptmage
6 | repo_name: tsterbak/promptmage
7 | copyright: Copyright © 2024 PromptMage
8 |
9 | nav:
10 | - Home: index.md
11 | - Getting Started: getting-started.md
12 | - Roadmap: roadmap.md
13 | - Tutorials: tutorial.md
14 | - API Reference: reference.md
15 | - Walkthrough: walkthrough.md
16 | - License: license.md
17 |
18 | theme:
19 | name: material
20 | logo: images/promptmage-logo.png
21 | favicon: images/favicon-32x32.png
22 | icon:
23 | repo: fontawesome/brands/git-alt
24 | annotation: material/plus-circle-outline
25 |
26 | features:
27 | - search
28 | - search.suggest
29 | - content.code.copy
30 | - content.code.annotate
31 | - navigation.tracking
32 | - navigation.tabs
33 | #- navigation.tabs.sticky
34 | - navigation.sections
35 | - toc.integrate
36 | - navigation.top
37 |
38 | palette:
39 | # Palette toggle for light mode
40 | - media: "(prefers-color-scheme: light)"
41 | scheme: default
42 | toggle:
43 | icon: material/lightbulb-outline
44 | name: Switch to dark mode
45 |
46 | # Palette toggle for dark mode
47 | - media: "(prefers-color-scheme: dark)"
48 | scheme: slate
49 | toggle:
50 | icon: material/lightbulb
51 | name: Switch to light mode
52 |
53 | extra_css:
54 | - stylesheets/hides.css
55 | - stylesheets/extra.css
56 |
57 | markdown_extensions:
58 | - attr_list
59 | - md_in_html
60 | - admonition
61 | - def_list
62 | - pymdownx.highlight:
63 | anchor_linenums: true
64 | line_spans: __span
65 | pygments_lang_class: true
66 | - pymdownx.inlinehilite
67 | - pymdownx.snippets
68 | - pymdownx.details
69 | - pymdownx.superfences
70 | - pymdownx.arithmatex:
71 | generic: true
72 | - pymdownx.emoji:
73 | emoji_index: !!python/name:material.extensions.emoji.twemoji
74 | emoji_generator: !!python/name:material.extensions.emoji.to_svg
75 | - pymdownx.tasklist:
76 | custom_checkbox: true
77 |
78 |
79 | plugins:
80 | - material-plausible
81 | - search:
82 | enabled: true
83 | lang: en
84 |
85 | extra:
86 | analytics:
87 | provider: plausible
88 | domain: promptmage.io
--------------------------------------------------------------------------------
/promptmage/__init__.py:
--------------------------------------------------------------------------------
1 | from .mage import PromptMage
2 | from .prompt import Prompt
3 | from .run_data import RunData
4 | from .result import MageResult
5 |
6 |
7 | import importlib.metadata
8 |
9 | __version__ = importlib.metadata.version("promptmage")
10 | title = """
11 | ╔═══╗ ╔╗ ╔═╗╔═╗
12 | ║╔═╗║ ╔╝╚╗║║╚╝║║
13 | ║╚═╝║╔═╗╔══╗╔╗╔╗╔══╗╚╗╔╝║╔╗╔╗║╔══╗ ╔══╗╔══╗
14 | ║╔══╝║╔╝║╔╗║║╚╝║║╔╗║ ║║ ║║║║║║╚ ╗║ ║╔╗║║╔╗║
15 | ║║ ║║ ║╚╝║║║║║║╚╝║ ║╚╗║║║║║║║╚╝╚╗║╚╝║║║═╣
16 | ╚╝ ╚╝ ╚══╝╚╩╩╝║╔═╝ ╚═╝╚╝╚╝╚╝╚═══╝╚═╗║╚══╝
17 | ║║ ╔═╝║
18 | ╚╝ ╚══╝
19 | """
20 |
21 | __all__ = ["PromptMage", "Prompt", "RunData", "MageResult", "__version__", "title"]
22 |
--------------------------------------------------------------------------------
/promptmage/api.py:
--------------------------------------------------------------------------------
1 | """This module contains the API for the PromptMage package."""
2 |
3 | import inspect
4 | import pkg_resources
5 | from loguru import logger
6 | from typing import List, Callable
7 | from pathlib import Path
8 | from pydantic import BaseModel
9 | from slugify import slugify
10 |
11 | from fastapi import FastAPI, Path, Query
12 | from fastapi.staticfiles import StaticFiles
13 | from fastapi.responses import HTMLResponse, FileResponse
14 | from fastapi.middleware.cors import CORSMiddleware
15 |
16 |
17 | from promptmage import PromptMage
18 |
19 |
20 | class PromptMageAPI:
21 | """A class that creates a FastAPI application to serve a PromptMage instance."""
22 |
23 | def __init__(self, flows: List[PromptMage]):
24 | self.flows = flows
25 | self.mage = flows[0]
26 |
27 | def get_app(self) -> FastAPI:
28 | """Create a FastAPI application to serve the PromptMage instance."""
29 | app = FastAPI(
30 | title=f"PromptMage API: {self.mage.name}", description="API for PromptMage."
31 | )
32 |
33 | app.add_middleware(
34 | CORSMiddleware,
35 | allow_origins=["*"],
36 | allow_credentials=True,
37 | allow_methods=["*"],
38 | allow_headers=["*"],
39 | )
40 |
41 | static_files_path = pkg_resources.resource_filename("promptmage", "static/")
42 | app.mount(
43 | "/static",
44 | StaticFiles(directory=static_files_path, html=True),
45 | name="static",
46 | )
47 |
48 | # create index endpoint
49 | @app.get("/")
50 | async def index():
51 | # Construct the absolute path to the index.html file
52 | index_file_path = f"{static_files_path}/index.html"
53 | return FileResponse(index_file_path)
54 |
55 | # create index API endpoint
56 | @app.get("/api")
57 | async def root():
58 | flow_names_to_slug = {flow.name: slugify(flow.name) for flow in self.flows}
59 | flow_message = "Available flows: "
60 | for name, slug in flow_names_to_slug.items():
61 | flow_message += f"{name} "
62 | return HTMLResponse(
63 | f"Welcome to the PromptMage API {flow_message}
"
64 | )
65 |
66 | # create an endpoint list of all available flows
67 | @app.get("/api/flows")
68 | async def list_flows():
69 | flow_names_to_slug = {flow.name: slugify(flow.name) for flow in self.flows}
70 | return flow_names_to_slug
71 |
72 | # create the endpoints for each flow
73 | for flow in self.flows:
74 |
75 | @app.get(f"/api/{slugify(flow.name)}/prompts", tags=[flow.name])
76 | async def list_prompts():
77 | return self.mage.prompt_store.get_prompts()
78 |
79 | @app.get(f"/api/{slugify(flow.name)}/data", tags=[flow.name])
80 | async def list_data():
81 | return self.mage.data_store.get_all_data()
82 |
83 | # add a route to list all available steps with their names and input variables
84 | @app.get(f"/api/{slugify(flow.name)}/steps", tags=[flow.name])
85 | async def list_steps():
86 | return step_list
87 |
88 | # create the endpoints for each step
89 | step_list = []
90 | for step_name, step in self.mage.steps.items():
91 | signature = inspect.signature(step.func)
92 | path = f"/api/{slugify(flow.name)}/{step_name}"
93 | params, path_variables = self._parameters_from_signature(signature)
94 | # TODO: not use path variables but build a request body dynamically
95 | path += path_variables
96 |
97 | # Update the signature for the endpoint function
98 | new_signature = signature.replace(parameters=params)
99 | endpoint_func = self.create_endpoint_function(step.execute)
100 | setattr(
101 | endpoint_func, "__signature__", new_signature
102 | ) # Update the signature for FastAPI to recognize
103 |
104 | # Add the route to FastAPI
105 | app.add_api_route(
106 | path,
107 | endpoint_func,
108 | methods=["GET"],
109 | tags=[flow.name],
110 | response_model=EndpointResponse,
111 | )
112 | step_list.append({"name": step_name, "path": path})
113 |
114 | # create an endpoint to run the full dependency graph of the flow
115 | run_function = self.mage.get_run_function(
116 | active_prompts=True
117 | ) # use only active prompts
118 | signature = inspect.signature(run_function)
119 | path = f"/api/{slugify(flow.name)}/run_flow"
120 | params, path_variables = self._parameters_from_signature(signature)
121 | path += path_variables
122 | new_signature = signature.replace(parameters=params)
123 | endpoint_func = self.create_endpoint_function(run_function)
124 | setattr(endpoint_func, "__signature__", new_signature)
125 | app.add_api_route(
126 | path,
127 | endpoint_func,
128 | methods=["GET"],
129 | response_model=EndpointResponse,
130 | tags=[flow.name],
131 | )
132 | # add a websocket for the flow
133 | app.add_websocket_route(
134 | f"/api/{slugify(flow.name)}/ws", flow.websocket_handler
135 | )
136 |
137 | return app
138 |
139 | def _parameters_from_signature(self, signature):
140 | params = []
141 | path = ""
142 | for name, param in signature.parameters.items():
143 | # ignore prompt parameter
144 | if name == "prompt":
145 | continue
146 | if param.default is inspect.Parameter.empty:
147 | # Assume required parameters are path parameters
148 | new_param = inspect.Parameter(
149 | name,
150 | kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
151 | default=Path(..., description=f"Path parameter `{name}`"),
152 | annotation=param.annotation,
153 | )
154 | path += f"/{{{name}}}" # Add to the path
155 | else:
156 | # Parameters with defaults are query parameters
157 | new_param = inspect.Parameter(
158 | name,
159 | kind=inspect.Parameter.KEYWORD_ONLY,
160 | default=Query(
161 | param.default, description=f"Query parameter `{name}`"
162 | ),
163 | annotation=param.annotation,
164 | )
165 | params.append(new_param)
166 | return params, path
167 |
168 | def create_endpoint_function(self, func: Callable) -> Callable:
169 | # Define the endpoint function using dynamic parameters
170 | async def endpoint(*args, **kwargs):
171 | try:
172 | result = func(*args, **kwargs)
173 | return EndpointResponse(
174 | name=f"{func.__name__}",
175 | status=200,
176 | message="Success",
177 | result=str(result),
178 | )
179 | except Exception as e:
180 | logger.info(f"Failed to call {func.__name__}")
181 | return EndpointResponse(
182 | name=f"{func.__name__}",
183 | status=500,
184 | message=f"Error when calling {func.__name__}: {e}",
185 | )
186 |
187 | return endpoint
188 |
189 |
190 | class EndpointResponse(BaseModel):
191 | name: str
192 | status: int = 500
193 | message: str = "Internal Server Error"
194 | result: str | List[str] | None = None
195 |
--------------------------------------------------------------------------------
/promptmage/cli.py:
--------------------------------------------------------------------------------
1 | """This module contains the command line interface for the PromptMage package."""
2 |
3 | import json
4 | import click
5 | import uvicorn
6 | from pathlib import Path
7 | from loguru import logger
8 |
9 | from promptmage import __version__, title
10 | from promptmage.utils import get_flows
11 | from promptmage.api import PromptMageAPI
12 | from promptmage.remote import RemoteBackendAPI
13 | from promptmage.frontend import PromptMageFrontend
14 | from promptmage.storage import SQLiteDataBackend, SQLitePromptBackend
15 | from promptmage.storage.utils import backup_db_to_json, restore_db_from_json
16 |
17 |
18 | @click.group()
19 | def promptmage():
20 | """Promptmage CLI"""
21 | pass
22 |
23 |
24 | @click.command()
25 | def version():
26 | """Print the version of the PromptMage package."""
27 | click.echo(f"PromptMage version: {__version__}")
28 |
29 |
30 | @click.command()
31 | @click.argument(
32 | "file_path",
33 | type=click.Path(
34 | exists=True,
35 | ),
36 | )
37 | @click.option("--host", default="localhost", help="The host IP to run the server on.")
38 | @click.option("--port", default=8000, type=int, help="The port to run the server on.")
39 | @click.option(
40 | "--browser",
41 | is_flag=True,
42 | help="Open the browser after starting the server.",
43 | default=False,
44 | )
45 | def run(file_path: str, host: str, port: int, browser: bool):
46 | """Serve the application containing a PromptMage instance from the given file.
47 |
48 | Args:
49 | file_path (str): The path to the file containing the PromptMage instance.
50 | host (str): The host IP to run the FastAPI server on.
51 | port (int): The port to run the FastAPI server on.
52 | browser (bool): Whether to open the browser after starting the server.
53 | """
54 | logger.info(f"\nWelcome to\n{title}")
55 | logger.info(f"Running PromptMage version {__version__} from {file_path}")
56 | # create the .promptmage directory to store all the data
57 | dirPath = Path(".promptmage")
58 | dirPath.mkdir(mode=0o777, parents=False, exist_ok=True)
59 |
60 | # get the available flows from the source file
61 | available_flows = get_flows(file_path)
62 |
63 | if not available_flows:
64 | raise ValueError("No PromptMage instance found in the module.")
65 |
66 | # create the FastAPI app
67 | app = PromptMageAPI(flows=available_flows).get_app()
68 |
69 | # create the frontend app
70 | frontend = PromptMageFrontend(flows=available_flows)
71 | frontend.init_from_api(app)
72 |
73 | # Run the applications
74 | if browser:
75 | import webbrowser
76 |
77 | url = f"http://localhost:{port}"
78 | webbrowser.open_new_tab(url)
79 | uvicorn.run(app, host=host, port=port, log_level="info")
80 |
81 |
82 | @click.command()
83 | @click.option("--runs", "runs", default=False, help="Export runs.", flag_value=True)
84 | @click.option(
85 | "--prompts", "prompts", default=False, help="Export prompts.", flag_value=True
86 | )
87 | @click.option(
88 | "--filename",
89 | default="promptmage",
90 | help="The name of the file to export the data to.",
91 | )
92 | def export(runs: bool = False, prompts: bool = False, filename: str = "promptmage"):
93 | """Export the run data and prompts from the PromptMage instance to json.
94 |
95 | Args:
96 | runs (bool): Whether to export the run data.
97 | prompts (bool): Whether to export the prompts.
98 | filename (str): The name of the file to export the data to.
99 | """
100 | if runs:
101 | click.echo("Exporting runs...")
102 | data_store = SQLiteDataBackend()
103 | run_data = data_store.get_all_data()
104 |
105 | with open(f"{filename}_runs.json", "w") as f:
106 | json.dump([run for run in run_data.values()], f)
107 |
108 | if prompts:
109 | click.echo("Exporting prompts...")
110 | prompt_store = SQLitePromptBackend()
111 | prompts = prompt_store.get_prompts()
112 |
113 | with open(f"{filename}_prompts.json", "w") as f:
114 | json.dump([prompt.to_dict() for prompt in prompts], f)
115 |
116 | if not runs and not prompts:
117 | click.echo("No data to export.")
118 | else:
119 | click.echo("Export complete.")
120 |
121 |
122 | @click.command()
123 | @click.option("--host", help="The host IP to run the server on.", default="localhost")
124 | @click.option("--port", help="The port to run the server on.", default=8021)
125 | def serve(host: str, port: int):
126 | """Serve the PromptMage collaborative backend and frontend."""
127 | logger.info(f"\nWelcome to\n{title}")
128 | logger.info(f"Running PromptMage backend version {__version__}")
129 | # create the .promptmage directory to store all the data
130 | dirPath = Path(".promptmage")
131 | dirPath.mkdir(mode=0o777, parents=False, exist_ok=True)
132 |
133 | # create the FastAPI app
134 | backend = RemoteBackendAPI(
135 | url=f"http://{host}:{port}",
136 | data_backend=SQLiteDataBackend(),
137 | prompt_backend=SQLitePromptBackend(),
138 | )
139 | app = backend.get_app()
140 |
141 | # run the applications
142 | uvicorn.run(app, host=host, port=port, log_level="info")
143 |
144 |
145 | @click.command()
146 | @click.option(
147 | "--json_path",
148 | type=click.Path(
149 | exists=True,
150 | ),
151 | help="The path to write the JSON file containing the database backup.",
152 | required=True,
153 | )
154 | def backup(json_path: str):
155 | """Backup the database from the PromptMage instance to json."""
156 | click.echo(f"Backing up the database to '{json_path}'...")
157 | backup_db_to_json(db_path=".promptmage/promptmage.db", json_path=json_path)
158 | click.echo("Backup complete.")
159 |
160 |
161 | @click.command()
162 | @click.option(
163 | "--json_path",
164 | type=click.Path(
165 | exists=True,
166 | ),
167 | help="The path to the JSON file containing the database backup.",
168 | required=True,
169 | )
170 | def restore(json_path: str):
171 | """Restore the database from json to the PromptMage instance."""
172 | click.echo(f"Restoring the database from the backup '{json_path}'...")
173 | # check if the database already exists
174 | if Path(".promptmage/promptmage.db").exists():
175 | click.confirm(
176 | "Are you sure you want to overwrite the current database?",
177 | abort=True,
178 | )
179 | # restore the database
180 | restore_db_from_json(db_path=".promptmage/promptmage.db", json_path=json_path)
181 | click.echo("Database restored successfully.")
182 |
183 |
184 | promptmage.add_command(version)
185 | promptmage.add_command(run)
186 | promptmage.add_command(export)
187 | promptmage.add_command(serve)
188 | promptmage.add_command(backup)
189 | promptmage.add_command(restore)
190 |
191 |
192 | if __name__ == "__main__":
193 | promptmage()
194 |
--------------------------------------------------------------------------------
/promptmage/exceptions.py:
--------------------------------------------------------------------------------
1 | class PromptNotFoundException(Exception):
2 | """Raised when a prompt is not found in the backend."""
3 |
4 | def __init__(self, prompt_id: str):
5 | self.prompt_id = prompt_id
6 | super().__init__(f"Prompt with ID {prompt_id} not found.")
7 |
8 |
9 | class DataNotFoundException(Exception):
10 | """Raised when data is not found in the backend."""
11 |
12 | def __init__(self, data_id: str):
13 | self.data_id = data_id
14 | super().__init__(f"Data with ID {data_id} not found.")
15 |
--------------------------------------------------------------------------------
/promptmage/frontend/__init__.py:
--------------------------------------------------------------------------------
1 | from .frontend import PromptMageFrontend
2 |
3 |
4 | __all__ = ["PromptMageFrontend"]
5 |
--------------------------------------------------------------------------------
/promptmage/frontend/components/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/promptmage/frontend/components/__init__.py
--------------------------------------------------------------------------------
/promptmage/frontend/components/dataset_page.py:
--------------------------------------------------------------------------------
1 | from nicegui import ui
2 |
3 | from loguru import logger
4 | from typing import List
5 |
6 | from promptmage import PromptMage
7 | from promptmage.run_data import RunData
8 | from .styles import label_with_icon
9 |
10 | RATING_LABEL_LOOKUP = {1: "positive", -1: "negative", None: "Not rated"}
11 |
12 |
13 | # Function to build the rows for the table
14 | def build_table_rows(runs, datapoints):
15 | return [
16 | {
17 | "step_run_id": run_data.step_run_id,
18 | "name": run_data.step_name,
19 | "rated": RATING_LABEL_LOOKUP.get(datapoint.rating, "Invalid rating"),
20 | }
21 | for run_data, datapoint in zip(runs, datapoints)
22 | ]
23 |
24 |
25 | def build_dataset_page(flow: PromptMage, dataset_id: str):
26 | # Get the dataset
27 | dataset = flow.data_store.backend.get_dataset(dataset_id)
28 |
29 | datapoints = flow.data_store.backend.get_datapoints(dataset_id)
30 |
31 | runs: List[RunData] = [
32 | flow.data_store.backend.get_data(datapoint.run_data_id)
33 | for datapoint in datapoints
34 | ]
35 |
36 | table = None
37 | progress_bar = None
38 |
39 | # side panel
40 | side_panel = (
41 | ui.element("div")
42 | .style(
43 | "position: fixed; top: 0; right: 0; width: 50%; height: 100%; transform: translateX(100%); transition: transform 0.3s ease; z-index: 1000; overflow-y: auto;"
44 | )
45 | .classes("bg-gray-100 dark:bg-slate-800")
46 | )
47 |
48 | # rating function
49 | def rate_run(datapoint, rating):
50 | flow.data_store.backend.rate_datapoint(datapoint.id, rating)
51 |
52 | # Update the datapoints and runs data after rating
53 | datapoints[:] = flow.data_store.backend.get_datapoints(dataset_id)
54 | runs[:] = [
55 | flow.data_store.backend.get_data(dp.run_data_id) for dp in datapoints
56 | ]
57 | # Refresh the table content
58 | table.rows = build_table_rows(runs, datapoints)
59 | table.update()
60 |
61 | # refresh progress
62 | progress = sum(datapoint.rating is not None for datapoint in datapoints) / len(
63 | datapoints
64 | )
65 | progress_bar.value = progress
66 | progress_bar.update()
67 |
68 | # Function to show the side panel with detailed information
69 | def show_side_panel(run_data: RunData):
70 | # Get the datapoint for the selected run
71 | datapoint = [
72 | datapoint
73 | for datapoint in datapoints
74 | if datapoint.run_data_id == run_data.step_run_id
75 | ][0]
76 | # Clear the side panel and update it with the new content
77 | side_panel.clear()
78 | with side_panel:
79 | ui.button(">>", on_click=hide_side_panel).style(
80 | "margin: 20px; margin-bottom: 0px; margin-top: 100px;"
81 | )
82 | with ui.card().style(
83 | "padding: 20px; margin-right: 20px; margin-top: 20px; margin-bottom: 20px; margin-left: 20px"
84 | ):
85 | # display run data
86 | with ui.row().classes("w-full"):
87 | with ui.column().classes("gap-0"):
88 | ui.label("Step name").classes("text-sm text-gray-500")
89 | ui.label(f"{run_data.step_name}").classes("text-2xl")
90 | ui.space()
91 | with ui.column().classes("gap-0 items-center"):
92 | ui.label("Status").classes("text-sm text-gray-500")
93 | ui.chip(
94 | f"{run_data.status}",
95 | icon="",
96 | color=f"{'green' if run_data.status == 'success' else 'red'}",
97 | ).props("outline square")
98 | with ui.row().classes("w-full"):
99 | with ui.column().classes("gap-0"):
100 | label_with_icon(
101 | "Execution time:", icon="hourglass_bottom"
102 | ).classes("text-sm text-gray-500")
103 | label_with_icon("Run At:", icon="o_schedule").classes(
104 | "text-sm text-gray-500"
105 | )
106 | label_with_icon("Model:", icon="o_psychology").classes(
107 | "text-sm text-gray-500"
108 | )
109 | label_with_icon("Step Run ID:", icon="o_info").classes(
110 | "text-sm text-gray-500"
111 | )
112 | label_with_icon("Run ID:", icon="o_info").classes(
113 | "text-sm text-gray-500"
114 | )
115 | label_with_icon("Rating:", icon="o_scale").classes(
116 | "text-sm text-gray-500 pt-4"
117 | )
118 | with ui.column().classes("gap-0"):
119 | ui.label(
120 | f"{run_data.execution_time if run_data.execution_time else 0.0:.2f}s"
121 | )
122 | ui.label(f"{run_data.run_time[:19]}")
123 | ui.label(f"{run_data.model}")
124 | ui.label(f"{run_data.step_run_id}")
125 | ui.label(f"{run_data.run_id}")
126 | ui.label()
127 | ui.chip(
128 | f"{RATING_LABEL_LOOKUP.get(datapoint.rating, 'Not rated')}",
129 | icon="",
130 | color=f"{'grey' if not datapoint.rating else ('red' if datapoint.rating == -1 else 'green')}",
131 | ).props("outline square")
132 |
133 | ui.label("Input Data:").classes("text-lg")
134 | for key, value in run_data.input_data.items():
135 | ui.markdown(f"**{key}**")
136 | ui.markdown(f"{value}")
137 | ui.label("Output Data:").classes("text-lg")
138 | try:
139 | for key, value in run_data.output_data.items():
140 | ui.markdown(f"**{key}**")
141 | ui.markdown(f"{value}")
142 | except AttributeError:
143 | ui.markdown(f"{run_data.output_data}")
144 | # rating buttons
145 | with ui.row():
146 | ui.label("Rate this run:").classes("text-lg")
147 | with ui.button_group():
148 | ui.button(
149 | icon="thumb_up",
150 | on_click=lambda: rate_run(datapoint, 1),
151 | )
152 | ui.button(
153 | icon="thumb_down",
154 | on_click=lambda: rate_run(datapoint, -1),
155 | )
156 |
157 | side_panel.style("transform:translateX(0%);")
158 | side_panel.update()
159 |
160 | # Function to hide the side panel
161 | def hide_side_panel():
162 | side_panel.clear()
163 | side_panel.style("transform:translateX(100%);")
164 | side_panel.update()
165 |
166 | # Function to download the data
167 | def download_data():
168 | import json
169 |
170 | logger.info("Downloading data")
171 | # Get the data
172 | data = [
173 | flow.data_store.backend.get_data(datapoint.run_data_id)
174 | for datapoint in datapoints
175 | ]
176 | # create the json file for export
177 | export_data = []
178 | for run_data, datapoint in zip(data, datapoints):
179 | export_data.append(
180 | {
181 | "step_run_id": run_data.step_run_id,
182 | "step_name": run_data.step_name,
183 | "input_data": run_data.input_data,
184 | "output_data": run_data.output_data,
185 | "rating": datapoint.rating,
186 | }
187 | )
188 | # download the file
189 | ui.download(
190 | src=json.dumps(export_data, indent=4).encode("utf-8"),
191 | filename=f"{dataset.name}_data.json",
192 | media_type="application/json",
193 | )
194 |
195 | # Function to build the UI
196 | def build_ui():
197 | nonlocal table # Access the outer-scope table variable
198 | nonlocal progress_bar # Access the outer-scope progress_bar variable
199 |
200 | with ui.column().classes("w-2/5"):
201 | progress = sum(
202 | datapoint.rating is not None for datapoint in datapoints
203 | ) / len(datapoints)
204 | # header section
205 | with ui.card().classes("w-full"):
206 | with ui.row().classes("w-full"):
207 | ui.label(f"Dataset name: {dataset.name}").classes("text-2xl")
208 | ui.space()
209 | ui.button(
210 | "Delete",
211 | on_click=lambda: flow.data_store.backend.delete_dataset(
212 | dataset_id
213 | ),
214 | ).style("color: red;")
215 | ui.button(
216 | "Export",
217 | on_click=download_data,
218 | )
219 | ui.label(f"Number of datapoints: {len(datapoints)}").classes("text-lg")
220 | progress_bar = ui.linear_progress(
221 | value=progress,
222 | show_value=False,
223 | size="20px",
224 | color="primary",
225 | ).classes("w-full")
226 |
227 | # Create a table with clickable rows
228 | columns = [
229 | {
230 | "name": "step_run_id",
231 | "label": "step_run_id",
232 | "field": "step_run_id",
233 | },
234 | {
235 | "name": "name",
236 | "label": "name",
237 | "field": "name",
238 | "sortable": True,
239 | },
240 | {
241 | "name": "rated",
242 | "label": "rated",
243 | "field": "rated",
244 | "sortable": True,
245 | },
246 | ]
247 |
248 | rows = build_table_rows(runs, datapoints)
249 |
250 | table = ui.table(
251 | columns=columns,
252 | rows=rows,
253 | selection="multiple",
254 | row_key="step_run_id",
255 | pagination={
256 | "rowsPerPage": 20,
257 | "sortBy": "run_time",
258 | "page": 1,
259 | "descending": True,
260 | },
261 | )
262 |
263 | table.add_slot(
264 | "body-cell-rated",
265 | """
266 |
267 |
268 | {{ props.value }}
269 |
270 |
271 | """,
272 | )
273 |
274 | def on_row_click(event):
275 | selected_run_index = event.args[-2]["step_run_id"]
276 | show_side_panel(
277 | run_data=[
278 | r for r in runs if r.step_run_id == selected_run_index
279 | ][-1]
280 | )
281 |
282 | table.on("rowClick", on_row_click)
283 |
284 | return build_ui
285 |
--------------------------------------------------------------------------------
/promptmage/frontend/components/evaluation_page.py:
--------------------------------------------------------------------------------
1 | from nicegui import ui
2 | from typing import List
3 | from slugify import slugify
4 | from loguru import logger
5 |
6 | from promptmage import PromptMage
7 |
8 |
9 | def build_evaluation_page(flow: PromptMage):
10 | available_datasets = []
11 |
12 | def get_datasets():
13 | nonlocal available_datasets
14 | available_datasets = flow.data_store.backend.get_datasets()
15 |
16 | get_datasets()
17 | columns: int = 5
18 |
19 | def dataset_card(dataset, flow):
20 | datapoints = flow.data_store.backend.get_datapoints(dataset.id)
21 | is_done = (
22 | all([dp.rating is not None for dp in datapoints]) and len(datapoints) > 0
23 | )
24 | with ui.card().style("padding: 20px; margin: 10px;"):
25 | with ui.row().classes("items-center"):
26 | if is_done:
27 | ui.icon("check_circle").style("color: green; font-size: 24px;")
28 | else:
29 | ui.icon("o_pending").style("color: orange; font-size: 24px;")
30 | ui.label(f"{dataset.name}").classes("text-lg")
31 | ui.separator()
32 | with ui.row().classes("justify-between"):
33 | with ui.column():
34 | ui.label("Description")
35 | ui.label("Created at")
36 | ui.label("Datapoints")
37 | ui.label("Progress")
38 | ui.label("Average rating")
39 | with ui.column():
40 | ui.label(
41 | f"{dataset.description if dataset.description else 'No description'}"
42 | )
43 | ui.label(f"{dataset.created}")
44 | ui.label(f"{len(datapoints)}")
45 | if len(datapoints) == 0:
46 | ui.label("0%")
47 | ui.label("N/A")
48 | else:
49 | ui.label(
50 | f"{len([dp for dp in datapoints if dp.rating is not None]) / len(datapoints) * 100:.1f}%"
51 | )
52 | ui.label(
53 | f"{sum([dp.rating for dp in datapoints if dp.rating is not None]) / len(datapoints):.2f}"
54 | )
55 | ui.separator()
56 | with ui.row().classes("justify-between"):
57 | ui.button(
58 | "Delete",
59 | icon="o_delete",
60 | on_click=lambda: delete_dataset(dataset.id, flow),
61 | ).style("color: red;").props("outline")
62 | ui.button(
63 | "Go to dataset",
64 | icon="o_arrow_forward",
65 | on_click=lambda: ui.navigate.to(
66 | f"/evaluation/{slugify(flow.name)}/{dataset.id}"
67 | ),
68 | )
69 |
70 | def create_dataset(name: str, description: str, flow: PromptMage):
71 | """Create a new dataset.
72 |
73 | Args:
74 | name (str): The name of the dataset.
75 | description (str): The description of the dataset.
76 | flow (PromptMage): The PromptMage instance.
77 | """
78 | logger.info(f"Creating dataset: {name}")
79 | try:
80 | flow.data_store.backend.create_dataset(name, description)
81 | ui.notify(f"Dataset {name} created.")
82 | get_datasets()
83 | create_grid.refresh()
84 | except Exception as e:
85 | logger.error(f"Error creating dataset: {e}")
86 |
87 | def delete_dataset(dataset_id: str, flow: PromptMage):
88 | flow.data_store.backend.delete_dataset(dataset_id)
89 | logger.info(f"Deleted dataset: {dataset_id}")
90 | get_datasets()
91 | create_grid.refresh()
92 |
93 | def create_new_dataset_dialog(flow: PromptMage):
94 | # create new dataset dialog
95 | dialog = ui.dialog()
96 | with dialog, ui.card():
97 | ui.label("Create new dataset").classes("text-2xl")
98 | # fields
99 | name = ui.input(
100 | label="Name",
101 | placeholder="Enter the name of the dataset",
102 | validation={
103 | "Name must be shorter than 100 characters!": lambda value: len(
104 | value
105 | )
106 | < 100
107 | },
108 | )
109 | description = ui.textarea(
110 | label="Description",
111 | placeholder="Enter the description of the dataset",
112 | validation={
113 | "Description must be shorter than 1000 characters!": lambda value: len(
114 | value
115 | )
116 | < 1000
117 | },
118 | ).props("clearable")
119 | # final buttons
120 | with ui.row().classes("justify-end"):
121 | ui.button(
122 | "Create",
123 | on_click=lambda: create_dataset(
124 | name=name.value, description=description.value, flow=flow
125 | ),
126 | )
127 | ui.button("Close", on_click=dialog.close)
128 | return dialog
129 |
130 | @ui.refreshable
131 | def create_grid():
132 | new_dataset_dialog = create_new_dataset_dialog(flow)
133 | available_datasets.insert(0, None)
134 | rows = len(available_datasets) // columns + (
135 | 1 if len(available_datasets) % columns > 0 else 0
136 | )
137 | if rows == 0:
138 | rows = 1
139 | for i in range(rows):
140 | with ui.row().classes("justify-center"):
141 | for j in range(columns):
142 | if i == j == 0:
143 | with ui.card().style("padding: 20px; margin: 10px;"):
144 | ui.button(
145 | "Create dataset",
146 | icon="add",
147 | on_click=lambda: new_dataset_dialog.open(),
148 | )
149 |
150 | else:
151 | index = i * columns + j
152 | if index < len(available_datasets):
153 | dataset_card(available_datasets[index], flow=flow)
154 |
155 | with ui.column().classes("items-center"):
156 | create_grid()
157 |
--------------------------------------------------------------------------------
/promptmage/frontend/components/flow_page.py:
--------------------------------------------------------------------------------
1 | from nicegui import ui
2 | from loguru import logger
3 |
4 | from promptmage import PromptMage
5 | from promptmage.step import MageStep
6 | from .main_runner import create_main_runner
7 | from .step_runner import create_function_runner
8 |
9 |
10 | def render_mermaid_diagram(execution_list: list) -> str:
11 | """
12 | Generate a Mermaid diagram from the self.execution_results.
13 |
14 | Returns:
15 | str: The Mermaid diagram code.
16 | """
17 | diagram = ["graph TD"]
18 | graph_events = ""
19 | all_nodes = set() # To keep track of all nodes
20 | nodes_with_outgoing_edges = set() # To track nodes with outgoing edges
21 | id_to_step_name = {} # Mapping of result_id to step name
22 | id_to_result = {} # Mapping of result_id to result
23 |
24 | for result in execution_list:
25 | previous_ids = result["previous_result_ids"]
26 | current_id = result["current_result_id"]
27 | step_name = result["step"]
28 |
29 | # Store the mapping from id to step name
30 | id_to_step_name[current_id] = step_name
31 |
32 | # Store the mapping from id to result
33 | id_to_result[current_id] = result["results"]
34 |
35 | all_nodes.add(current_id)
36 |
37 | if previous_ids:
38 | for prev_id in previous_ids:
39 | diagram.append(f" {prev_id} --> {current_id}")
40 | graph_events += (
41 | f'click {prev_id} call emitEvent("graph_click", {prev_id})\n'
42 | )
43 | all_nodes.add(prev_id)
44 | nodes_with_outgoing_edges.add(prev_id)
45 | else:
46 | diagram.append(f" start --> {current_id}")
47 | nodes_with_outgoing_edges.add("start")
48 |
49 | # Identify terminal nodes (nodes that do not have outgoing edges)
50 | terminal_nodes = all_nodes - nodes_with_outgoing_edges
51 |
52 | # Add 'END' for terminal nodes
53 | for node in terminal_nodes:
54 | graph_events += f'click {node} call emitEvent("graph_click", {node})\n'
55 | diagram.append(f" {node} --> END")
56 |
57 | # Add labels for nodes using their step names
58 | for node_id, step_name in id_to_step_name.items():
59 | diagram.append(f" {node_id}({step_name})")
60 |
61 | return "\n".join(diagram) + "\n" + graph_events, id_to_step_name, id_to_result
62 |
63 |
64 | @ui.refreshable
65 | def execution_graph(flow: PromptMage):
66 | with ui.dialog() as dialog, ui.card():
67 | ui.label("Execution Result will be shown here.")
68 |
69 | if flow.is_running:
70 | ui.spinner("puff", size="xl")
71 | elif flow.execution_results:
72 | graph, id_to_step_name, id_to_result = render_mermaid_diagram(
73 | flow.execution_results
74 | )
75 |
76 | def node_dialog(id: str):
77 | dialog.clear()
78 | with dialog, ui.card().classes("w-512 h-128"):
79 | with ui.row().classes("w-full justify-between"):
80 | ui.label(f"Result for step '{id_to_step_name[id]}'").classes(
81 | "text-lg"
82 | )
83 | ui.space()
84 | ui.button("Close", on_click=dialog.close)
85 | ui.markdown(
86 | "\n\n".join(
87 | f"{variable}:\n{result}"
88 | for variable, result in id_to_result[id].items()
89 | )
90 | )
91 | dialog.open()
92 |
93 | mermaid_graph = ui.mermaid(graph, config={"securityLevel": "loose"}).classes(
94 | "w-2/3"
95 | )
96 | ui.on("graph_click", lambda e: node_dialog(e.args))
97 | else:
98 | ui.label("No execution results available.")
99 |
100 |
101 | def build_flow_page(flow: PromptMage):
102 | with ui.row().classes("w-full gap-0"):
103 | with ui.splitter().classes("w-full p-0") as splitter:
104 | with splitter.before:
105 | with ui.column().classes("w-full items-center"):
106 | # Create a card for the mage
107 | with ui.card().classes("w-full"):
108 | ui.label(f"Flow: {flow.name}").classes("text-xl")
109 | with create_main_runner(flow, execution_graph):
110 | # Create a card for each step
111 | with ui.column().classes("w-full"):
112 | step: MageStep
113 | for step in flow.steps.values():
114 | create_function_runner(step)()
115 | with splitter.after:
116 | # Create a card for the execution graph
117 | with ui.column().classes("w-full items-center pl-4"):
118 | with ui.card().classes("w-full items-center"):
119 | ui.label("Execution graph").classes("text-xl")
120 | execution_graph(flow)
121 |
--------------------------------------------------------------------------------
/promptmage/frontend/components/main_runner.py:
--------------------------------------------------------------------------------
1 | """This ui element represent the input, prompt and output of a callable step in the PromptMage."""
2 |
3 | import inspect
4 | from contextlib import contextmanager
5 | from nicegui import ui, run
6 |
7 | from promptmage import PromptMage
8 | from .styles import textbox_style
9 |
10 |
11 | @contextmanager
12 | def create_main_runner(mage: PromptMage, execution_graph):
13 | input_fields = {}
14 | result_field = None
15 | flow_func = mage.get_run_function(start_from=None)
16 |
17 | async def run_function():
18 | inputs = {name: field.value for name, field in input_fields.items()}
19 | mage.is_running = True
20 | execution_graph.refresh()
21 | result = await run.io_bound(flow_func, **inputs)
22 | newline = "\n\n"
23 | if isinstance(result, list):
24 | result_field.set_content(
25 | f"{[newline.join(res.values()) for res in result]}"
26 | )
27 | else:
28 | result_field.set_content(f"{newline.join(result.values())}")
29 | result_field.update()
30 | execution_graph.refresh()
31 |
32 | def build_ui():
33 | nonlocal result_field
34 | with ui.column().classes("w-full"):
35 | # elements before the steps runner
36 | ui.label("Inputs:").classes("font-bold text-lg")
37 | for param in inspect.signature(flow_func).parameters.values():
38 | if param.name not in ["prompt", "model"]:
39 | with ui.row().classes("w-full"):
40 | input_fields[param.name] = (
41 | ui.textarea(label=f"{param.name}")
42 | .classes(textbox_style)
43 | .props("outlined")
44 | )
45 |
46 | with ui.row().classes("w-full justify-end"):
47 | ui.button("Run", on_click=run_function, icon="o_play_circle_filled")
48 | ui.separator()
49 | # steps runner
50 | ui.label("Steps:").classes("font-bold text-lg")
51 | yield
52 | # elements after the steps runner
53 | ui.separator()
54 | with ui.row().classes("w-full justify-between"):
55 | ui.label("Result:").classes("font-bold text-lg")
56 | ui.button(
57 | "Copy to clipboard",
58 | icon="o_content_copy",
59 | on_click=lambda: ui.clipboard.write(result_field.content),
60 | ) # .props("fab")
61 | result_field = (
62 | ui.markdown("")
63 | .style("height: 200px; overflow-y: auto;")
64 | .classes("color-black dark:color-white")
65 | )
66 |
67 | return build_ui()
68 |
--------------------------------------------------------------------------------
/promptmage/frontend/components/menu.py:
--------------------------------------------------------------------------------
1 | from nicegui import ui
2 |
3 |
4 | def menu(flow_name: str) -> None:
5 | with ui.button_group().classes("self-center"):
6 | ui.button("Overview", on_click=lambda: ui.navigate.to("/")).classes(
7 | replace="text-white"
8 | )
9 | ui.button(
10 | "Playground", on_click=lambda: ui.navigate.to(f"/{flow_name}")
11 | ).classes(replace="text-white")
12 | ui.button(
13 | "Runs", on_click=lambda: ui.navigate.to(f"/runs/{flow_name}")
14 | ).classes(replace="text-white")
15 | ui.button(
16 | "Prompts", on_click=lambda: ui.navigate.to(f"/prompts/{flow_name}")
17 | ).classes(replace="text-white")
18 | ui.button(
19 | "Evaluation", on_click=lambda: ui.navigate.to(f"/evaluation/{flow_name}")
20 | ).classes(replace="text-white")
21 |
--------------------------------------------------------------------------------
/promptmage/frontend/components/overview_page.py:
--------------------------------------------------------------------------------
1 | from nicegui import ui
2 | from typing import List
3 | from slugify import slugify
4 |
5 | from promptmage import PromptMage
6 |
7 |
8 | def flow_card(flow: PromptMage):
9 | with ui.card().style("padding: 20px; margin: 10px;"):
10 | ui.button(
11 | f"{flow.name}", on_click=lambda: ui.navigate.to(f"/{slugify(flow.name)}")
12 | )
13 | ui.separator()
14 | ui.chip(f"{len(flow.steps)} Steps", icon="run_circle").props("square")
15 | runs = flow.get_run_data()
16 | ui.chip(f"{len(runs)} Runs", icon="check_circle").props("square")
17 |
18 |
19 | def create_grid(elements, columns=4):
20 | rows = len(elements) // columns + (1 if len(elements) % columns > 0 else 0)
21 | for i in range(rows):
22 | with ui.row():
23 | for j in range(columns):
24 | index = i * columns + j
25 | if index < len(elements):
26 | flow_card(elements[index])
27 |
28 |
29 | def build_overview_page(flows: List[PromptMage]):
30 | with ui.column():
31 | ui.label("Available Flows").classes("font-bold text-lg")
32 | create_grid(flows, columns=4)
33 |
--------------------------------------------------------------------------------
/promptmage/frontend/components/prompts_page.py:
--------------------------------------------------------------------------------
1 | from nicegui import ui
2 | from loguru import logger
3 | from collections import defaultdict
4 | from typing import List
5 |
6 | from promptmage import PromptMage
7 | from promptmage.prompt import Prompt
8 | from .styles import label_with_icon
9 |
10 |
11 | # Define a simple function to add custom styles for active tabs
12 | def add_custom_css():
13 | ui.add_head_html(
14 | """
15 |
40 | """
41 | )
42 |
43 |
44 | def create_prompts_view(mage: PromptMage):
45 | # prompt editing dialog
46 | dialog = ui.dialog().props("full-width")
47 | # Add the custom styles to the head of the document
48 | add_custom_css()
49 |
50 | def create_prompt_content(prompts: List[Prompt]):
51 | content = ui.column().classes("w-full h-[88vh] p-10")
52 | with content:
53 | for prompt in sorted(prompts, key=lambda p: p.version, reverse=True):
54 | bg_color = ""
55 | # highlight active prompt in green
56 | if prompt.active:
57 | bg_color = "bg-green-200 dark:bg-green-800"
58 | with ui.card().classes(bg_color).classes("p-5 w-full"):
59 | # display run data
60 | with ui.grid(columns=2).classes("gap-0"):
61 | label_with_icon("Prompt ID:", icon="o_info")
62 | ui.label(f"{prompt.id}")
63 |
64 | label_with_icon("Name:", icon="o_badge")
65 | ui.label(f"{prompt.name}")
66 |
67 | label_with_icon("Version:", icon="o_tag")
68 | ui.label(f"{prompt.version}")
69 |
70 | label_with_icon("Active:", icon="o_check")
71 | ui.label(f"{prompt.active}")
72 |
73 | label_with_icon("System prompt:", icon="o_code")
74 | ui.label(f"{prompt.system}")
75 |
76 | label_with_icon("User prompt:", icon="o_psychology")
77 | ui.label(f"{prompt.user}")
78 |
79 | with ui.row():
80 | activate_button = ui.button(
81 | "Activate Prompt",
82 | icon="o_play_circle_filled",
83 | on_click=lambda prompt_id=prompt.id: activate_prompt(
84 | prompt_id
85 | ),
86 | )
87 | ui.button(
88 | "Edit Prompt",
89 | icon="o_edit",
90 | on_click=lambda prompt_id=prompt.id: edit_prompt(prompt_id),
91 | )
92 | delete_button = ui.button(
93 | "Delete Prompt",
94 | icon="o_delete",
95 | on_click=lambda prompt_id=prompt.id: delete_prompt(
96 | prompt_id
97 | ),
98 | )
99 | if prompt.active:
100 | activate_button.disable()
101 | delete_button.disable()
102 | if not prompt.active:
103 | delete_button.props("outline")
104 | return content
105 |
106 | def delete_prompt(prompt_id):
107 | logger.info(f"Deleting prompt with ID: {prompt_id}.")
108 | mage.prompt_store.delete_prompt(prompt_id)
109 | ui.notify(f"Prompt {prompt_id} deleted.")
110 |
111 | def edit_prompt(prompt_id):
112 | logger.info(f"Editing prompt with ID: {prompt_id}.")
113 | prompt = mage.prompt_store.get_prompt_by_id(prompt_id)
114 | dialog.clear()
115 | with dialog, ui.card():
116 | ui.label("Edit prompt").classes("text-2xl")
117 | with ui.row():
118 | ui.label(f"Name: {prompt.name}")
119 | ui.label(f"Version: {prompt.version}")
120 | ui.label(f"Prompt ID: {prompt_id}")
121 | with ui.row():
122 | system_prompt = ui.textarea(
123 | value=prompt.system, label="System prompt"
124 | ).style("width: 500px; height: 200px;")
125 | user_prompt = ui.textarea(value=prompt.user, label="User prompt").style(
126 | "width: 500px; height: 200px;"
127 | )
128 | with ui.row():
129 | ui.button(
130 | "Save",
131 | on_click=lambda: save_prompt(
132 | prompt_id, system_prompt.value, user_prompt.value
133 | ),
134 | )
135 | ui.button("Cancel", on_click=dialog.close)
136 |
137 | dialog.open()
138 |
139 | def activate_prompt(prompt_id):
140 | logger.info(f"Activating prompt with ID: {prompt_id}.")
141 | # activate the selected prompt
142 | prompt = mage.prompt_store.get_prompt_by_id(prompt_id)
143 | prompt.active = True
144 | mage.prompt_store.update_prompt(prompt)
145 | # deactivate all other prompts with the same name
146 | for p in mage.prompt_store.get_prompts():
147 | if p.name == prompt.name and p.id != prompt_id:
148 | p.active = False
149 | mage.prompt_store.update_prompt(p)
150 | ui.notify(f"Prompt {prompt_id} activated.")
151 |
152 | def save_prompt(prompt_id: str, system: str, user: str):
153 | prompt = mage.prompt_store.get_prompt_by_id(prompt_id)
154 |
155 | if (prompt.system != system) or (prompt.user != user):
156 | ui.notify("Prompt saved.")
157 | else:
158 | ui.notify("Prompt unchanged. Not saved.")
159 |
160 | prompt.system = system
161 | prompt.user = user
162 | mage.prompt_store.update_prompt(prompt)
163 | dialog.close()
164 |
165 | def build_ui():
166 | all_prompts = mage.prompt_store.get_prompts()
167 | # group them by name
168 | prompts = defaultdict(list)
169 | for prompt in all_prompts:
170 | if prompt.name in [step.prompt_name for step in mage.steps.values()]:
171 | prompts[prompt.name].append(prompt)
172 |
173 | # Main UI setup
174 | content = {name: prompts_for_name for name, prompts_for_name in prompts.items()}
175 | with ui.splitter(value=10).classes("w-full h-full") as splitter:
176 | with splitter.before:
177 | with ui.tabs().props("vertical").classes("w-full h-full") as tabs:
178 | for title in content:
179 | with ui.tab(title, label=title):
180 | ui.chip(
181 | f"{len(content[title])} versions", color="secondary"
182 | ).props("outline square")
183 | with splitter.after:
184 | with ui.tab_panels(tabs, value=list(content.keys())[0]).props(
185 | "vertical"
186 | ).classes("w-full h-full"):
187 | for title, prompts_for_name in content.items():
188 | with ui.tab_panel(title).classes("w-full h-full"):
189 | create_prompt_content(prompts_for_name)
190 |
191 | return build_ui
192 |
--------------------------------------------------------------------------------
/promptmage/frontend/components/runs_page.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import List
3 | from loguru import logger
4 | from slugify import slugify
5 | from nicegui import ui, app
6 |
7 | from promptmage import PromptMage, RunData
8 | from .styles import label_with_icon
9 |
10 |
11 | def create_runs_view(mage: PromptMage):
12 |
13 | datasets = mage.data_store.backend.get_datasets()
14 |
15 | side_panel = (
16 | ui.element("div")
17 | .style(
18 | "position: fixed; top: 0; right: 0; width: 50%; height: 100%; transform: translateX(100%); transition: transform 0.3s ease; z-index: 1000; overflow-y: auto;"
19 | )
20 | .classes("bg-gray-100 dark:bg-slate-800")
21 | )
22 | # compare runs dialog
23 | compare_dialog = ui.dialog().props("full-width")
24 |
25 | def use_run_in_playground(step_run_id):
26 | app.storage.user["step_run_id"] = step_run_id
27 | ui.navigate.to(f"/{slugify(mage.name)}")
28 |
29 | # Function to show the side panel with detailed information
30 | def show_side_panel(run_data: RunData):
31 | side_panel.clear()
32 | with side_panel:
33 | ui.button(">>", on_click=hide_side_panel).style(
34 | "margin: 20px; margin-bottom: 0px; margin-top: 100px;"
35 | )
36 | with ui.card().style(
37 | "padding: 20px; margin-right: 20px; margin-top: 20px; margin-bottom: 20px; margin-left: 20px"
38 | ):
39 | # display run data
40 | with ui.row().classes("w-full"):
41 | with ui.column().classes("gap-0"):
42 | ui.label("Step name").classes("text-sm text-gray-500")
43 | ui.label(f"{run_data.step_name}").classes("text-2xl")
44 | ui.space()
45 | with ui.column().classes("gap-0 items-center"):
46 | ui.label("Status").classes("text-sm text-gray-500")
47 | ui.chip(
48 | f"{run_data.status}",
49 | icon="",
50 | color=f"{'green' if run_data.status == 'success' else 'red'}",
51 | ).props("outline square")
52 | with ui.row().classes("w-full"):
53 | with ui.column().classes("gap-0"):
54 | label_with_icon(
55 | "Execution time:", icon="hourglass_bottom"
56 | ).classes("text-sm text-gray-500")
57 | label_with_icon("Run At:", icon="o_schedule").classes(
58 | "text-sm text-gray-500"
59 | )
60 | label_with_icon("Model:", icon="o_psychology").classes(
61 | "text-sm text-gray-500"
62 | )
63 | label_with_icon("Step Run ID:", icon="o_info").classes(
64 | "text-sm text-gray-500"
65 | )
66 | label_with_icon("Run ID:", icon="o_info").classes(
67 | "text-sm text-gray-500"
68 | )
69 | with ui.column().classes("gap-0"):
70 | ui.label(
71 | f"{run_data.execution_time if run_data.execution_time else 0.0:.2f}s"
72 | )
73 | ui.label(f"{run_data.run_time[:19]}")
74 | ui.label(f"{run_data.model}")
75 | ui.label(f"{run_data.step_run_id}")
76 | ui.label(f"{run_data.run_id}")
77 |
78 | if run_data.prompt:
79 | ui.label("Prompt:").classes("text-lg")
80 | with ui.row().classes("w-full"):
81 | with ui.column().classes("gap-0"):
82 | ui.label("Version:").classes("text-sm text-gray-500")
83 | ui.label("ID:").classes("text-sm text-gray-500")
84 | ui.label("System Prompt:").classes("text-sm text-gray-500")
85 | ui.label("User Prompt:").classes("text-sm text-gray-500")
86 | with ui.column().classes("gap-0 w-1/2"):
87 | ui.label(f"{run_data.prompt.version}")
88 | ui.label(f"{run_data.prompt.id}")
89 | ui.label(f"{run_data.prompt.system}")
90 | ui.label(f"{run_data.prompt.user}")
91 |
92 | ui.label("Input Data:").classes("text-lg")
93 | for key, value in run_data.input_data.items():
94 | ui.markdown(f"**{key}**")
95 | ui.markdown(f"{value}")
96 | ui.label("Output Data:").classes("text-lg")
97 | try:
98 | for key, value in run_data.output_data.items():
99 | ui.markdown(f"**{key}**")
100 | ui.markdown(f"{value}")
101 | except AttributeError:
102 | ui.markdown(f"{run_data.output_data}")
103 | with ui.row():
104 | ui.button(
105 | "Use in playground",
106 | on_click=lambda: use_run_in_playground(run_data.step_run_id),
107 | )
108 | side_panel.style("transform:translateX(0%);")
109 | side_panel.update()
110 |
111 | # Function to hide the side panel
112 | def hide_side_panel():
113 | side_panel.clear()
114 | side_panel.style("transform:translateX(100%);")
115 | side_panel.update()
116 |
117 | def build_ui():
118 | runs: List[RunData] = mage.get_run_data()
119 |
120 | def display_comparison():
121 | selected_runs = table.selected
122 | if len(selected_runs) < 2:
123 | ui.notify("Please select at least two runs to compare.")
124 | return
125 | if len(selected_runs) > 5:
126 | ui.notify("Please select at most five runs to compare.")
127 | return
128 | status_success = all([r["status"] == "success" for r in selected_runs])
129 | if not status_success:
130 | ui.notify("Please select only successful runs to compare.")
131 | return
132 | # check if all selected runs are from the same step
133 | step_names = set([r["name"] for r in selected_runs])
134 | if len(step_names) > 1:
135 | ui.notify("Please select runs from the same step to compare.")
136 | return
137 | compare_dialog.clear()
138 | with compare_dialog, ui.card():
139 | ui.label(f"Compare Runs for step {step_names.pop()}").classes(
140 | "text-2xl"
141 | )
142 | with ui.row().style(
143 | "display: flex; width: 100%; align-items: stretch;"
144 | ):
145 | for run_data in selected_runs:
146 | # get the results for the selected run
147 | run: RunData = [
148 | run
149 | for run in runs
150 | if run.step_run_id == run_data["step_run_id"]
151 | ][0]
152 | with ui.column().style("flex: 1;"):
153 | with ui.card().style(
154 | "flex-grow: 1; display: flex; flex-direction: column;"
155 | ):
156 | ui.label(f"step_run_id: {run_data['step_run_id']}")
157 | ui.label("Prompt:").classes("text-lg")
158 | with ui.row().classes("w-full"):
159 | with ui.column().classes("gap-0"):
160 | ui.label("Version:").classes(
161 | "text-sm text-gray-500"
162 | )
163 | ui.label("System Prompt:").classes(
164 | "text-sm text-gray-500"
165 | )
166 | ui.label("User Prompt:").classes(
167 | "text-sm text-gray-500"
168 | )
169 | with ui.column().classes("gap-0 w-1/2"):
170 | ui.label(f"{run.prompt.version}")
171 | ui.label(f"{run.prompt.system}")
172 | ui.label(f"{run.prompt.user}")
173 | ui.label("Output Data:").classes("text-lg")
174 | try:
175 | for key, value in run.output_data.items():
176 | ui.markdown(f"**{key}**")
177 | ui.markdown(f"{value}")
178 | except AttributeError:
179 | ui.markdown(f"{run.output_data}")
180 |
181 | ui.button("Close", on_click=compare_dialog.close)
182 | compare_dialog.open()
183 |
184 | def add_to_dataset():
185 | if dataset_select.value is None:
186 | ui.notify("Please select a dataset to add the runs to.")
187 | return
188 | dataset = datasets[dataset_select.value]
189 | selected_runs = table.selected
190 | for run in selected_runs:
191 | mage.data_store.backend.add_datapoint_to_dataset(
192 | run["step_run_id"], dataset.id
193 | )
194 | logger.info(f"Added run {run['step_run_id']} to dataset {dataset.id}")
195 | ui.notify(
196 | f"Added {len(selected_runs)} runs to dataset {dataset.name} successfully."
197 | )
198 |
199 | # Main UI setup
200 | with ui.card().style("padding: 20px").classes("w-full"):
201 | with ui.row().classes("w-full"):
202 | ui.button("Compare Runs", on_click=display_comparison).style(
203 | "margin-bottom: 20px"
204 | )
205 | ui.button("Add to dataset", on_click=add_to_dataset).style(
206 | "margin-bottom: 20px"
207 | )
208 | dataset_select = ui.select(
209 | {idx: f"{d.name}-{d.id}" for idx, d in enumerate(datasets)},
210 | value=None,
211 | label="Select Dataset",
212 | ).classes("w-1/3")
213 | # Create a table with clickable rows
214 | columns = [
215 | {
216 | "name": "run_time",
217 | "label": "Run At",
218 | "field": "run_time",
219 | "sortable": True,
220 | },
221 | {
222 | "name": "status",
223 | "label": "Status",
224 | "field": "status",
225 | "sortable": True,
226 | },
227 | {"name": "name", "label": "Step", "field": "name", "sortable": True},
228 | {
229 | "name": "execution_time",
230 | "label": "Execution Time (in seconds)",
231 | "field": "execution_time",
232 | "sortable": True,
233 | },
234 | {
235 | "name": "run_id",
236 | "label": "run_id",
237 | "field": "run_id",
238 | },
239 | {"name": "step_run_id", "label": "step_run_id", "field": "step_run_id"},
240 | ]
241 |
242 | rows = [
243 | {
244 | "run_id": run_data.run_id,
245 | "step_run_id": run_data.step_run_id,
246 | "name": run_data.step_name,
247 | "status": run_data.status,
248 | "run_time": run_data.run_time[:19],
249 | "execution_time": (
250 | round(run_data.execution_time, 3)
251 | if run_data.execution_time
252 | else -1
253 | ),
254 | }
255 | for run_data in runs
256 | ]
257 |
258 | table = ui.table(
259 | columns=columns,
260 | rows=rows,
261 | selection="multiple",
262 | row_key="step_run_id",
263 | pagination={
264 | "rowsPerPage": 20,
265 | "sortBy": "run_time",
266 | "page": 1,
267 | "descending": True,
268 | },
269 | )
270 | table.add_slot(
271 | "body-cell-status",
272 | """
273 |
274 |
275 | {{ props.value }}
276 |
277 |
278 | """,
279 | )
280 |
281 | def on_row_click(event):
282 | selected_run_index = event.args[-2]["step_run_id"]
283 | show_side_panel(
284 | run_data=[r for r in runs if r.step_run_id == selected_run_index][
285 | -1
286 | ]
287 | )
288 |
289 | table.on("rowClick", on_row_click)
290 |
291 | return build_ui
292 |
--------------------------------------------------------------------------------
/promptmage/frontend/components/step_runner.py:
--------------------------------------------------------------------------------
1 | """This ui element represent the input, prompt and output of a callable step in the PromptMage."""
2 |
3 | from nicegui import ui, run, app
4 | from typing import List
5 | from loguru import logger
6 |
7 | from promptmage.mage import MageStep
8 | from .styles import textbox_style
9 |
10 |
11 | RUNNING_ICON = "run_circle"
12 | NOT_RUNNING_ICON = "circle"
13 | SUCCESS_RUN_ICON = "check_circle"
14 |
15 |
16 | class InputOutputSection:
17 |
18 | def __init__(self, step: MageStep):
19 | self.step = step
20 |
21 | self.fields = {}
22 |
23 | @ui.refreshable
24 | def ui(self):
25 | ui.label("Inputs:").classes("font-bold mr-5")
26 | for param in self.step.signature.parameters.values():
27 | if param.name not in ["prompt", "model"]:
28 | with ui.row():
29 | self.fields[param.name] = (
30 | ui.textarea(
31 | label=f"{param.name}",
32 | value=self.step.input_values[param.name],
33 | )
34 | .classes(textbox_style)
35 | .props("outlined")
36 | )
37 |
38 |
39 | def create_function_runner(step: MageStep):
40 | input_output_section = InputOutputSection(step)
41 | system_prompt_field = None
42 | user_prompt_field = None
43 | model_select = None
44 | result_field = None
45 | expansion_tab = ui.expansion(
46 | f"Step: {step.name}", group="steps", icon=f"{NOT_RUNNING_ICON}"
47 | ).classes("text-lg w-full border")
48 | # load prompt if available
49 | if step.prompt_name:
50 | prompt = step.get_prompt()
51 | # load all available prompts for the step
52 | prompts = [
53 | p for p in step.prompt_store.get_prompts() if p.name == step.prompt_name
54 | ]
55 | else:
56 | prompt = None
57 | prompts = []
58 |
59 | # run id given in app.storage, initialize with this data
60 | if app.storage.user.get("step_run_id"):
61 | step_run_id = app.storage.user.get("step_run_id")
62 | run_data = step.data_store.get_data(step_run_id)
63 | if run_data.step_name == step.name:
64 | prompt = run_data.prompt
65 | step.input_values = run_data.input_data
66 | step.result = run_data.output_data
67 | expansion_tab.props(f"icon={SUCCESS_RUN_ICON}")
68 | expansion_tab.update()
69 | del app.storage.user["step_run_id"]
70 |
71 | async def run_function():
72 | expansion_tab.props(f"icon={RUNNING_ICON}")
73 | expansion_tab.update()
74 | inputs = {
75 | name: field.value for name, field in input_output_section.fields.items()
76 | }
77 | if prompt is not None:
78 | prompt.system = system_prompt_field.value
79 | prompt.user = user_prompt_field.value
80 | set_prompt()
81 | if model_select:
82 | logger.info(f"Selected model: {model_select.value}")
83 | step.model = model_select.value
84 | _ = await run.io_bound(step.execute, **inputs, prompt=prompt)
85 | if step.one_to_many:
86 | num_results = len(step.result.results)
87 | expansion_tab.props(f"caption='{num_results} results'")
88 | expansion_tab.props(f"icon={SUCCESS_RUN_ICON}")
89 | expansion_tab.update()
90 |
91 | def set_prompt():
92 | nonlocal prompt
93 | system_prompt_field.update()
94 | user_prompt_field.update()
95 | if (prompt.system != system_prompt_field.value) or (
96 | prompt.user != user_prompt_field.value
97 | ):
98 | prompt.system = system_prompt_field.value
99 | prompt.user = user_prompt_field.value
100 | step.set_prompt(prompt)
101 | ui.notify("Prompt saved.")
102 | else:
103 | ui.notify("Prompt unchanged. Not saved.")
104 |
105 | def update_inputs():
106 | for name, field in input_output_section.fields.items():
107 | field.set_value(step.input_values[name])
108 | field.update()
109 | expansion_tab.props(f"icon={RUNNING_ICON}")
110 | expansion_tab.update()
111 |
112 | def update_results():
113 | newline = "\n\n"
114 | if isinstance(step.result, list):
115 | result_field.set_content(
116 | f"{[newline.join(result.results.values()) for result in step.result]}"
117 | )
118 | else:
119 | result_field.set_content(f"{newline.join(step.result.results.values())}")
120 | result_field.update()
121 |
122 | expansion_tab.props(f"icon={SUCCESS_RUN_ICON}")
123 | if isinstance(step.result, list):
124 | num_results = len(step.result)
125 | expansion_tab.props(f"caption='{num_results} results'")
126 | expansion_tab.update()
127 |
128 | def display_prompt(prompt_str: str):
129 | nonlocal prompt
130 | logger.info(f"Selected prompt: {prompt_str}")
131 | prompt_name, version = prompt_str.split(" - v")
132 | prompt = step.prompt_store.get_prompt(prompt_name, version=int(version))
133 | system_prompt_field.set_value(prompt.system)
134 | user_prompt_field.set_value(prompt.user)
135 | system_prompt_field.update()
136 | user_prompt_field.update()
137 |
138 | step.on_input_change(update_inputs)
139 | step.on_output_change(update_results)
140 |
141 | def build_ui():
142 | nonlocal user_prompt_field, system_prompt_field, result_field, expansion_tab, model_select
143 | with expansion_tab:
144 | ui.label(f"ID: {step.step_id}")
145 | with ui.column().classes("w-full"):
146 | with ui.row().classes("w-full"):
147 | # show available models if available
148 | if step.available_models:
149 | model_select = ui.select(
150 | step.available_models,
151 | label="Select model",
152 | value=step.model,
153 | ).classes("w-1/3")
154 | # show available prompts
155 | prompt_select = ui.select(
156 | [f"{p.name} - v{p.version}" for p in prompts],
157 | label="Select prompt",
158 | value=(
159 | f"{prompt.name} - v{prompt.version}"
160 | if prompt
161 | else "No prompt selected"
162 | ),
163 | on_change=lambda event: display_prompt(event.value),
164 | ).classes("w-1/3")
165 |
166 | with ui.row().classes("w-full"):
167 | ui.label("Prompts:").classes("font-bold")
168 | with ui.row().classes("grow"):
169 | system_prompt_field = (
170 | ui.textarea(
171 | label="System prompt:",
172 | value=(
173 | prompt.system if prompt else "No prompt supported"
174 | ),
175 | )
176 | .classes(textbox_style)
177 | .props("outlined")
178 | )
179 | with ui.row().classes("grow"):
180 | user_prompt_field = (
181 | ui.textarea(
182 | label="User prompt:",
183 | value=(
184 | prompt.user if prompt else "No prompt supported"
185 | ),
186 | )
187 | .classes(textbox_style)
188 | .props("outlined")
189 | )
190 | with ui.row():
191 | input_output_section.ui()
192 |
193 | with ui.row().classes("w-full justify-end"):
194 | ui.button(
195 | "Save & Run", on_click=run_function, icon="o_play_circle_filled"
196 | )
197 | ui.button("Save", on_click=set_prompt, icon="o_save")
198 | ui.separator()
199 | with ui.row().classes("w-full justify-between"):
200 | ui.label("Result:").classes("font-bold")
201 | ui.button(
202 | "Copy to clipboard",
203 | icon="o_content_copy",
204 | on_click=lambda: ui.clipboard.write(
205 | step.result or "No result available"
206 | ),
207 | )
208 | result_field = (
209 | ui.markdown(f"{step.result}" if step.result else "")
210 | .style("margin-top: 20px;")
211 | .classes("color-black dark:color-white")
212 | )
213 |
214 | return build_ui
215 |
--------------------------------------------------------------------------------
/promptmage/frontend/components/styles.py:
--------------------------------------------------------------------------------
1 | from nicegui import Tailwind, ui
2 |
3 |
4 | textbox_style = "w-full max-w-3xl" # border-2 border-gray-300 rounded-lg ring focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent"
5 |
6 |
7 | def label_with_icon(label: str, icon: str):
8 | row = ui.row().classes("gap-2 items-center")
9 | with row:
10 | ui.icon(icon)
11 | ui.label(label)
12 | return row
13 |
--------------------------------------------------------------------------------
/promptmage/frontend/components/theme.py:
--------------------------------------------------------------------------------
1 | from contextlib import contextmanager
2 | from loguru import logger
3 | from nicegui import ui, app
4 |
5 |
6 | from .menu import menu
7 |
8 |
9 | @contextmanager
10 | def frame(navigation_title: str, flow_name: str = None, subtitle: str = None):
11 | """Custom page frame to share the same styling and behavior across all pages"""
12 | dark_mode_icon = None
13 | # load dark mode from local storage
14 | dark_value = app.storage.user.get("dark_mode", True)
15 | dark = ui.dark_mode(value=dark_value)
16 |
17 | def toggle_dark_mode():
18 | """Toggle dark mode on and off."""
19 | if dark.value:
20 | dark.disable()
21 | dark_mode_icon.name = "dark_mode"
22 | else:
23 | dark.enable()
24 | dark_mode_icon.name = "light_mode"
25 | # store dark mode in local storage
26 | app.storage.user["dark_mode"] = dark.value
27 | dark_mode_icon.update()
28 |
29 | # ui.colors(
30 | # primary="#6E93D6", secondary="#53B689", accent="#111B1E", positive="#53B689"
31 | # )
32 | ui.colors(
33 | primary="#166088",
34 | secondary="#8fb9d2",
35 | accent="#FFFD98",
36 | dark="#13140b",
37 | positive="#21ba45",
38 | negative="#c10015",
39 | info="#bde4a7",
40 | warning="#f2c037",
41 | )
42 |
43 | with ui.header(elevated=True):
44 | with ui.button(on_click=lambda event: ui.navigate.to("/")).props(
45 | "flat color=white"
46 | ).classes("p-0"):
47 | ui.avatar("img:/static/promptmage-logo.png", square=True)
48 |
49 | if subtitle:
50 | with ui.column().classes("gap-0"):
51 | ui.label(navigation_title).classes("text-2xl")
52 | ui.label(subtitle).classes("text-sm text-gray-400")
53 | else:
54 | ui.label(navigation_title).classes("text-2xl self-center")
55 | ui.space()
56 | menu(flow_name=flow_name)
57 | with ui.button(
58 | on_click=toggle_dark_mode,
59 | ).props(
60 | "flat small color=white"
61 | ).classes("self-center"):
62 | dark_mode_icon = ui.icon("light_mode" if dark.value else "dark_mode")
63 |
64 | with ui.column().classes("self-center w-full gap-0"):
65 | yield
66 |
--------------------------------------------------------------------------------
/promptmage/frontend/frontend.py:
--------------------------------------------------------------------------------
1 | """Frontend module for PromptMage."""
2 |
3 | from fastapi import FastAPI
4 | from nicegui import ui
5 | from typing import List
6 | from slugify import slugify
7 |
8 | from promptmage import PromptMage
9 |
10 | from .components import theme
11 | from .components.runs_page import create_runs_view
12 | from .components.prompts_page import create_prompts_view
13 | from .components.overview_page import build_overview_page
14 | from .components.flow_page import build_flow_page
15 | from .components.evaluation_page import build_evaluation_page
16 | from .components.dataset_page import build_dataset_page
17 |
18 |
19 | class PromptMageFrontend:
20 | """A class that creates a frontend for a PromptMage instance."""
21 |
22 | def __init__(self, flows: List[PromptMage]):
23 | self.flows = flows
24 | self.flows_dict = {slugify(flow.name): flow for flow in flows}
25 | self.current_flow = self.flows[0]
26 |
27 | def init_from_api(self, fastapi_app: FastAPI) -> None:
28 | """Initialize the frontend from a FastAPI application."""
29 |
30 | @ui.page("/", title="PromptMage")
31 | def main_page():
32 | with theme.frame(
33 | "Welcome to the PromptMage GUI",
34 | flow_name=slugify(self.current_flow.name),
35 | subtitle="Select a flow to get started",
36 | ):
37 | build_overview_page(self.flows)
38 |
39 | @ui.page("/{flow_name}", title="PromptMage - Flow")
40 | def flow_page(flow_name: str):
41 | self.current_flow = self.flows_dict[flow_name]
42 | with theme.frame(
43 | "Playground",
44 | flow_name=flow_name,
45 | subtitle="Run, evaluate, and manage your flow",
46 | ):
47 | build_flow_page(self.flows_dict[flow_name])
48 |
49 | @ui.page("/runs/{flow_name}", title="PromptMage - Runs")
50 | def runs_page(flow_name: str):
51 | self.current_flow = self.flows_dict[flow_name]
52 | with theme.frame(
53 | f"Runs Overview - {flow_name}",
54 | flow_name=flow_name,
55 | subtitle="View and manage runs",
56 | ):
57 | create_runs_view(self.current_flow)()
58 |
59 | @ui.page("/prompts/{flow_name}", title="PromptMage - Prompts")
60 | def prompts_page(flow_name: str):
61 | self.current_flow = self.flows_dict[flow_name]
62 | with theme.frame(
63 | f"Prompts Overview - {flow_name}",
64 | flow_name=flow_name,
65 | subtitle="View and manage prompts",
66 | ):
67 | create_prompts_view(self.current_flow)()
68 |
69 | @ui.page("/evaluation/{flow_name}", title="PromptMage - Evaluation")
70 | def evaluation_page(flow_name: str):
71 | self.current_flow = self.flows_dict[flow_name]
72 | with theme.frame(
73 | f"Evaluation - {flow_name}",
74 | flow_name=flow_name,
75 | subtitle="Select a dataset to evaluate",
76 | ):
77 | build_evaluation_page(self.current_flow)
78 |
79 | @ui.page(
80 | "/evaluation/{flow_name}/{dataset_id}",
81 | title="PromptMage - Evaluation Dataset",
82 | )
83 | def evaluation_dataset_page(flow_name: str, dataset_id: str):
84 | self.current_flow = self.flows_dict[flow_name]
85 | with theme.frame(
86 | f"Evaluation - {flow_name} - {dataset_id}",
87 | flow_name=flow_name,
88 | subtitle="Evaluate your results",
89 | ):
90 | build_dataset_page(self.current_flow, dataset_id)()
91 |
92 | ui.run_with(
93 | fastapi_app,
94 | mount_path="/gui", # NOTE this can be omitted if you want the paths passed to @ui.page to be at the root
95 | storage_secret="pick your private secret here", # NOTE setting a secret is optional but allows for persistent storage per user
96 | dark=True,
97 | favicon="🧙",
98 | )
99 |
--------------------------------------------------------------------------------
/promptmage/prompt.py:
--------------------------------------------------------------------------------
1 | """This module contains the Prompt class, which represents a prompt."""
2 |
3 | import uuid
4 | from typing import Dict, List
5 |
6 |
7 | class Prompt:
8 | """A class that represents a prompt.
9 |
10 | Attributes:
11 | name (str): The name of the prompt.
12 | prompt_id (str): The ID of the prompt.
13 | system (str): The system that generated the prompt.
14 | user (str): The user that the prompt is for.
15 | version (int): The version of the prompt.
16 | template_vars (List[str]): The template variables in the prompt.
17 | id (str): The unique identifier for the prompt.
18 | active (bool): Whether the prompt is active.
19 | """
20 |
21 | def __init__(
22 | self,
23 | name: str,
24 | system: str,
25 | user: str,
26 | template_vars: List[str],
27 | version: int = 1,
28 | id: str | None = None,
29 | active: bool = False,
30 | ):
31 | self.name = name
32 | self.id = id if id else str(uuid.uuid4())
33 | self.system = system
34 | self.user = user
35 | self.version = version
36 | self.template_vars = template_vars
37 | self.active = active
38 |
39 | def __repr__(self):
40 | return f"Prompt(id={self.id}, \
41 | name={self.name}, \
42 | version={self.version}, \
43 | active={self.active}, \
44 | system={self.system}, \
45 | user={self.user}), \
46 | template_vars={self.template_vars})"
47 |
48 | def to_dict(self):
49 | return {
50 | "name": self.name,
51 | "id": self.id,
52 | "active": self.active,
53 | "system": self.system,
54 | "user": self.user,
55 | "version": self.version,
56 | "template_vars": self.template_vars,
57 | }
58 |
59 | @classmethod
60 | def from_dict(cls, data: Dict) -> "Prompt":
61 | return cls(
62 | name=data["name"],
63 | id=data["id"],
64 | active=data["active"],
65 | system=data["system"],
66 | user=data["user"],
67 | template_vars=data["template_vars"],
68 | version=data["version"],
69 | )
70 |
--------------------------------------------------------------------------------
/promptmage/remote.py:
--------------------------------------------------------------------------------
1 | """This module contains the api for the remote backend of the PromptMage package."""
2 |
3 | from loguru import logger
4 |
5 | from fastapi import FastAPI, Path, Query
6 | from fastapi.middleware.cors import CORSMiddleware
7 |
8 | from promptmage import RunData, Prompt
9 | from promptmage.exceptions import PromptNotFoundException
10 |
11 |
12 | class RemoteBackendAPI:
13 |
14 | def __init__(self, url: str, data_backend, prompt_backend):
15 | self.url = url
16 | self.data_backend = data_backend
17 | self.prompt_backend = prompt_backend
18 |
19 | def get_app(self) -> FastAPI:
20 | """Get an instance of the FastAPI app."""
21 | app = FastAPI(
22 | title="PromptMage Remote Backend",
23 | description="API for the remote backend of PromptMage.",
24 | )
25 |
26 | app.add_middleware(
27 | CORSMiddleware,
28 | allow_origins=["*"],
29 | allow_credentials=True,
30 | allow_methods=["*"],
31 | allow_headers=["*"],
32 | )
33 |
34 | @app.get("/", tags=["root"])
35 | async def index():
36 | return {"message": "Welcome to the PromptMage Remote Backend!"}
37 |
38 | # Endpoints for the data storage backend
39 | @app.post("/runs", tags=["runs"])
40 | async def store_run(run_data: dict):
41 | logger.info(f"Storing run data: {run_data}")
42 | run_data = RunData(**run_data)
43 | run_data.prompt = Prompt(**run_data.prompt)
44 | self.data_backend.store_data(run_data)
45 |
46 | @app.get("/runs/{step_run_id}", tags=["runs"])
47 | async def get_run(step_run_id: str = Path(...)):
48 | return self.data_backend.get_data(step_run_id)
49 |
50 | @app.get("/runs", tags=["runs"])
51 | async def get_all_runs():
52 | return self.data_backend.get_all_data()
53 |
54 | # Endpoints for the prompt storage backend
55 |
56 | @app.post("/prompts", tags=["prompts"])
57 | async def store_prompt(prompt: dict):
58 | logger.info(f"Storing prompt: {prompt}")
59 | self.prompt_backend.store_prompt(Prompt(**prompt))
60 |
61 | @app.put("/prompts", tags=["prompts"])
62 | async def update_prompt(prompt: dict):
63 | logger.info(f"Updating prompt: {prompt}")
64 | self.prompt_backend.update_prompt(Prompt(**prompt))
65 |
66 | @app.get("/prompts/{prompt_name}", tags=["prompts"])
67 | async def get_prompt(
68 | prompt_name: str = Path(
69 | ..., description="The name of the prompt to retrieve"
70 | ),
71 | version: int | None = Query(None, description="The version of the prompt"),
72 | active: bool | None = Query(
73 | None, description="Whether the prompt is active"
74 | ),
75 | ):
76 | logger.info(f"Retrieving prompt with name: {prompt_name}")
77 | try:
78 | return self.prompt_backend.get_prompt(prompt_name, version, active)
79 | except PromptNotFoundException as e:
80 | logger.error(
81 | f"Prompt with ID {prompt_name} not found, returning an empty prompt."
82 | )
83 | # return an empty prompt if the prompt is not found
84 | return Prompt(
85 | name=prompt_name,
86 | version=1,
87 | system="You are a helpful assistant.",
88 | user="",
89 | template_vars=[],
90 | active=False,
91 | )
92 |
93 | @app.get("/prompts", tags=["prompts"])
94 | def get_prompts():
95 | logger.info("Retrieving all prompts.")
96 | return self.prompt_backend.get_prompts()
97 |
98 | @app.get("/prompts/id/{prompt_id}", tags=["prompts"])
99 | async def get_prompt_by_id(
100 | prompt_id: str = Path(..., description="The ID of the prompt to retrieve")
101 | ):
102 | logger.info(f"Retrieving prompt with ID {prompt_id}")
103 | return self.prompt_backend.get_prompt_by_id(prompt_id)
104 |
105 | @app.delete("/prompts/{prompt_id}", tags=["prompts"])
106 | async def delete_prompt(prompt_id: str = Path(...)):
107 | logger.info(f"Deleting prompt with ID: {prompt_id}")
108 | self.prompt_backend.delete_prompt(prompt_id)
109 |
110 | # Endpoints for the datasets
111 |
112 | @app.get("/datasets/{dataset_id}", tags=["datasets"])
113 | async def get_dataset(dataset_id: str):
114 | return self.data_backend.get_dataset(dataset_id)
115 |
116 | @app.get("/datasets", tags=["datasets"])
117 | async def get_datasets():
118 | return self.data_backend.get_datasets()
119 |
120 | return app
121 |
--------------------------------------------------------------------------------
/promptmage/result.py:
--------------------------------------------------------------------------------
1 | import uuid
2 |
3 |
4 | class MageResult:
5 |
6 | def __init__(
7 | self,
8 | next_step: str | None = None,
9 | error: str | None = None,
10 | **kwargs,
11 | ):
12 | self.id = str(uuid.uuid4())
13 | self.next_step = next_step
14 | self.results: dict = kwargs
15 | self.error = error
16 |
17 | def __repr__(self):
18 | return f""
19 |
--------------------------------------------------------------------------------
/promptmage/run.py:
--------------------------------------------------------------------------------
1 | class MageRun:
2 | """This represents a run of a flow in the PromptMage."""
3 |
4 | def __init__(
5 | self,
6 | ):
7 | pass
8 |
--------------------------------------------------------------------------------
/promptmage/run_data.py:
--------------------------------------------------------------------------------
1 | """This module contains the RunData class, which is used to represent the data for a single run of a promptmage flow."""
2 |
3 | import uuid
4 | from datetime import datetime
5 | from typing import Dict
6 |
7 | from promptmage.prompt import Prompt
8 |
9 |
10 | class RunData:
11 | """A class that represents the data for a single run of a promptmage flow."""
12 |
13 | def __init__(
14 | self,
15 | step_name: str,
16 | prompt: Prompt,
17 | input_data: Dict,
18 | output_data: Dict,
19 | run_id: str = str(uuid.uuid4()),
20 | step_run_id: str | None = None,
21 | run_time: datetime | None = None,
22 | execution_time: float | None = None, # execution_time in seconds
23 | status: str | None = None,
24 | model: str | None = None,
25 | ):
26 | self.step_run_id = step_run_id if step_run_id else str(uuid.uuid4())
27 | self.run_id = run_id
28 | self.step_name = step_name
29 | self.run_time = run_time if run_time else str(datetime.now())
30 | self.execution_time = execution_time
31 | self.prompt = prompt
32 | self.input_data = input_data
33 | self.output_data = output_data
34 | self.status = status
35 | self.model = model
36 |
37 | def __repr__(self) -> str:
38 | return (
39 | f"RunData(run_id={self.run_id}, "
40 | f"step_run_id={self.step_run_id}, "
41 | f"step_name={self.step_name}, "
42 | f"status={self.status}, "
43 | f"run_time={self.run_time}, "
44 | f"execution_time={self.execution_time}, "
45 | f"prompt={self.prompt}, "
46 | f"input_data={self.input_data}, "
47 | f"output_data={self.output_data}, "
48 | f"model={self.model})"
49 | )
50 |
51 | def to_dict(self) -> Dict:
52 | return {
53 | "prompt": self.prompt.to_dict() if self.prompt else None,
54 | "step_name": self.step_name,
55 | "input_data": self.input_data,
56 | "output_data": self.output_data,
57 | "run_id": self.run_id,
58 | "step_run_id": self.step_run_id,
59 | "run_time": self.run_time,
60 | "model": self.model,
61 | "execution_time": self.execution_time,
62 | "status": self.status,
63 | }
64 |
65 | @classmethod
66 | def from_dict(cls, data):
67 | return cls(
68 | data["step_name"],
69 | data["prompt"],
70 | data["input_data"],
71 | data["output_data"],
72 | data["run_id"],
73 | data["step_run_id"],
74 | data["run_time"],
75 | data["status"],
76 | data["model"],
77 | data["execution_time"],
78 | )
79 |
--------------------------------------------------------------------------------
/promptmage/static/android-chrome-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/promptmage/static/android-chrome-192x192.png
--------------------------------------------------------------------------------
/promptmage/static/android-chrome-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/promptmage/static/android-chrome-512x512.png
--------------------------------------------------------------------------------
/promptmage/static/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/promptmage/static/apple-touch-icon.png
--------------------------------------------------------------------------------
/promptmage/static/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/promptmage/static/favicon-16x16.png
--------------------------------------------------------------------------------
/promptmage/static/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/promptmage/static/favicon-32x32.png
--------------------------------------------------------------------------------
/promptmage/static/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/promptmage/static/favicon.ico
--------------------------------------------------------------------------------
/promptmage/static/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | 🧙 PromptMage
8 |
75 |
76 |
77 |
78 |
79 |
Welcome to PromptMage
80 |
86 |
87 |
88 | © 2024 PromptMage. All rights reserved.
89 |
90 |
91 |
92 |
--------------------------------------------------------------------------------
/promptmage/static/promptmage-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/promptmage/static/promptmage-logo.png
--------------------------------------------------------------------------------
/promptmage/static/site.webmanifest:
--------------------------------------------------------------------------------
1 | {"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}
--------------------------------------------------------------------------------
/promptmage/step.py:
--------------------------------------------------------------------------------
1 | import uuid
2 | import time
3 | import inspect
4 | from typing import Callable, List
5 | from loguru import logger
6 |
7 | from .prompt import Prompt
8 | from .run_data import RunData
9 | from .result import MageResult
10 | from .storage import PromptStore, DataStore
11 |
12 |
13 | class MageStep:
14 | """A class to represent a step in a PromptMage instance.
15 |
16 | Attributes:
17 | step_id (str): The unique identifier for the step.
18 | name (str): The name of the step.
19 | func (Callable): The function to execute for the step.
20 | signature (inspect.Signature): The signature of the function.
21 | prompt_store (PromptStore): The prompt store to use for storing prompts.
22 | data_store (DataStore): The data store to use for storing data.
23 | prompt_name (str): The name of the prompt associated with the step.
24 | depends_on (str): The name of the step that this step depends on.
25 | initial (bool): Whether the step is an initial step.
26 | one_to_many (bool): Whether the step is a one-to-many step.
27 | many_to_one (bool): Whether the step is a many-to-one step.
28 | model (str): The model to use for the step.
29 | available_models (List[str]): The available models for the step.
30 | pass_through_inputs (List[str]): The inputs to pass through to the next step.
31 | """
32 |
33 | def __init__(
34 | self,
35 | name: str,
36 | func: Callable,
37 | prompt_store: PromptStore,
38 | data_store: DataStore,
39 | prompt_name: str | None = None,
40 | depends_on: str | None = None,
41 | initial: bool = False,
42 | one_to_many: bool = False,
43 | many_to_one: bool = False,
44 | model: str | None = None,
45 | available_models: List[str] | None = None,
46 | pass_through_inputs: List[str] | None = None,
47 | ):
48 | self.step_id = str(uuid.uuid4())
49 | self.name = name
50 | self.func = func
51 | self.signature = inspect.signature(func)
52 | self.prompt_store = prompt_store
53 | self.data_store = data_store
54 | self.prompt_name = prompt_name
55 | self.depends_on = depends_on
56 | self.initial = initial
57 | self.one_to_many = one_to_many
58 | self.many_to_one = many_to_one
59 | self.model = model
60 | self.available_models = available_models
61 | self.pass_through_inputs = pass_through_inputs
62 |
63 | # store inputs and results
64 | self.input_values = {}
65 | self.result = None
66 |
67 | # Initialize input values with default parameter values
68 | for param in self.signature.parameters.values():
69 | if param.name in ["prompt", "model"]:
70 | continue
71 | if param.default is not inspect.Parameter.empty:
72 | self.input_values[param.name] = param.default
73 | else:
74 | self.input_values[param.name] = None
75 |
76 | # callbacks for the frontend
77 | self._input_callbacks = []
78 | self._output_callbacks = []
79 |
80 | def execute(
81 | self, prompt: Prompt | None = None, active: bool | None = None, **inputs
82 | ):
83 | """Execute the step with the given inputs."""
84 | logger.info(f"Executing step: {self.name}...")
85 | multi_input_param = None
86 | # set the inputs
87 | logger.info(f"Setting inputs: {inputs.keys()}")
88 | for key, value in inputs.items():
89 | if isinstance(value, list):
90 | multi_input_param = key
91 | self.input_values[key] = value
92 | # set the model
93 | if self.model:
94 | self.input_values["model"] = self.model
95 | # get the prompt and set it if exists
96 | if self.prompt_name:
97 | if prompt:
98 | self.input_values["prompt"] = prompt
99 | else:
100 | prompt = self.get_prompt(active=active)
101 | self.input_values["prompt"] = prompt
102 | else:
103 | prompt = None
104 | # run the input callbacks
105 | for callback in self._input_callbacks:
106 | callback()
107 | # execute the function and store the result
108 | start_time = time.time()
109 | try:
110 | if self.one_to_many:
111 | logger.info("Executing step one-to-many")
112 | self.result = []
113 | if multi_input_param:
114 | param_values = self.input_values[multi_input_param]
115 | for value in param_values:
116 | self.input_values[multi_input_param] = value
117 | self.result.append(self.func(**self.input_values))
118 | self.input_values[multi_input_param] = param_values
119 | else:
120 | raise ValueError(
121 | "One-to-many step requires a list input parameter."
122 | )
123 | elif self.many_to_one:
124 | logger.info("Executing step many-to-one")
125 | self.result = self.func(**self.input_values)
126 | else:
127 | logger.info("Executing step normally")
128 | self.result = self.func(**self.input_values)
129 | status = "success"
130 | except Exception as e:
131 | logger.error(f"Error executing step: {e}")
132 | self.result = MageResult(error=f"Error: {e}")
133 | status = "failed"
134 | execution_time = time.time() - start_time
135 | # store the run data
136 | self.store_run(prompt=prompt, status=status, execution_time=execution_time)
137 | # run the output callbacks
138 | for callback in self._output_callbacks:
139 | callback()
140 | logger.info(f"Step {self.name} executed successfully.")
141 | return self.result
142 |
143 | def __repr__(self):
144 | return (
145 | f"Step(step_id={self.step_id}, "
146 | f"name={self.name}, "
147 | f"prompt_name={self.prompt_name}, "
148 | f"depends_on={self.depends_on})"
149 | )
150 |
151 | def get_prompt(
152 | self, version: int | None = None, active: bool | None = None
153 | ) -> Prompt:
154 | return self.prompt_store.get_prompt(self.prompt_name, version, active)
155 |
156 | def set_prompt(self, prompt: Prompt):
157 | prompt.active = False
158 | try:
159 | self.prompt_store.update_prompt(prompt)
160 | except Exception as e:
161 | self.prompt_store.store_prompt(prompt)
162 |
163 | def store_run(
164 | self,
165 | prompt: Prompt | None = None,
166 | status: str = "success",
167 | execution_time: float = 0.0,
168 | ):
169 | """Store the run data in the data store."""
170 | if self.data_store:
171 | run_data = RunData(
172 | step_name=self.name,
173 | prompt=prompt if self.prompt_name else None,
174 | input_data={
175 | k: v
176 | for k, v in self.input_values.items()
177 | if k not in ["prompt", "model"]
178 | },
179 | output_data=(
180 | [r.results for r in self.result]
181 | if isinstance(self.result, list)
182 | else self.result.results
183 | ),
184 | status=status,
185 | model=self.model,
186 | execution_time=execution_time,
187 | )
188 | self.data_store.store_data(run_data)
189 |
190 | def on_input_change(self, callback):
191 | self._input_callbacks.append(callback)
192 |
193 | def on_output_change(self, callback):
194 | self._output_callbacks.append(callback)
195 |
--------------------------------------------------------------------------------
/promptmage/storage/__init__.py:
--------------------------------------------------------------------------------
1 | from .storage_backend import StorageBackend
2 | from .sqlite_backend import SQLitePromptBackend, SQLiteDataBackend
3 | from .file_backend import FileBackend
4 | from .data_store import DataStore
5 | from .memory_backend import InMemoryPromptBackend, InMemoryDataBackend
6 | from .prompt_store import PromptStore
7 | from .remote_prompt_backend import RemotePromptBackend
8 | from .remote_data_backend import RemoteDataBackend
9 |
10 | __all__ = [
11 | "StorageBackend",
12 | "SQLitePromptBackend",
13 | "SQLiteDataBackend",
14 | "FileBackend",
15 | "InMemoryPromptBackend",
16 | "InMemoryDataBackend",
17 | "DataStore",
18 | "PromptStore",
19 | "RemotePromptBackend",
20 | "RemoteDataBackend",
21 | ]
22 |
--------------------------------------------------------------------------------
/promptmage/storage/data_store.py:
--------------------------------------------------------------------------------
1 | """This module contains the DataStore class, which implements the storage and retrieval of data with different backends."""
2 |
3 | from typing import Dict
4 | from loguru import logger
5 |
6 | from promptmage.storage import StorageBackend
7 | from promptmage.run_data import RunData
8 | from promptmage.exceptions import DataNotFoundException
9 |
10 |
11 | class DataStore:
12 | """A class that stores and retrieves data with different backends."""
13 |
14 | def __init__(self, backend):
15 | self.backend: StorageBackend = backend
16 |
17 | def store_data(self, data: RunData):
18 | """Store data in the backend."""
19 | logger.info(f"Storing data: {data}")
20 | self.backend.store_data(data)
21 |
22 | def get_data(self, step_run_id: str) -> RunData:
23 | """Retrieve data from the backend."""
24 | logger.info(f"Retrieving data with ID: {step_run_id}")
25 | data = self.backend.get_data(step_run_id)
26 | if data:
27 | return data
28 | raise DataNotFoundException(step_run_id)
29 |
30 | def get_all_data(self) -> Dict:
31 | """Retrieve all data from the backend."""
32 | return self.backend.get_all_data()
33 |
--------------------------------------------------------------------------------
/promptmage/storage/file_backend.py:
--------------------------------------------------------------------------------
1 | """This module contains the FileBackend class, which is a subclass of the StorageBackend class. It is used to store the data in a file on the local filesystem."""
2 |
3 | from promptmage.storage.storage_backend import StorageBackend
4 |
5 |
6 | class FileBackend(StorageBackend):
7 | """A class that stores the data in a file on the local filesystem."""
8 |
9 | def __init__(self, file_path: str):
10 | self.file_path = file_path
11 |
--------------------------------------------------------------------------------
/promptmage/storage/memory_backend.py:
--------------------------------------------------------------------------------
1 | """This module contains the InMemoryBackend class, which implements a simple in-memory storage backend for prompts."""
2 |
3 | from typing import Dict
4 |
5 | from promptmage.prompt import Prompt
6 | from promptmage.run_data import RunData
7 | from promptmage.storage import StorageBackend
8 | from promptmage.exceptions import PromptNotFoundException
9 |
10 |
11 | class InMemoryPromptBackend(StorageBackend):
12 | """A simple in-memory storage backend for prompts."""
13 |
14 | def __init__(self):
15 | self.prompts: Dict[str, Prompt] = {}
16 |
17 | def store_prompt(self, prompt: Prompt):
18 | """Store a prompt in memory."""
19 | self.prompts[prompt.name] = prompt.to_dict()
20 |
21 | def get_prompt(self, prompt_name: str) -> str:
22 | """Retrieve a prompt from memory."""
23 | if prompt_name not in self.prompts:
24 | raise PromptNotFoundException(f"Prompt with name {prompt_name} not found.")
25 | return Prompt.from_dict(self.prompts.get(prompt_name))
26 |
27 | def get_prompts(self) -> Dict:
28 | """Retrieve all prompts from memory."""
29 | return self.prompts
30 |
31 |
32 | class InMemoryDataBackend(StorageBackend):
33 | """A simple in-memory storage backend for data."""
34 |
35 | def __init__(self):
36 | self.data: Dict[str, RunData] = {}
37 |
38 | def store_data(self, run: RunData):
39 | """Store data in memory."""
40 | self.data[run.run_id] = run.to_dict()
41 |
42 | def get_data(self, run_id: str) -> str:
43 | """Retrieve data from memory."""
44 | return RunData.from_dict(self.data.get(run_id))
45 |
46 | def get_all_data(self) -> Dict:
47 | """Retrieve all data from memory."""
48 | return self.data
49 |
--------------------------------------------------------------------------------
/promptmage/storage/prompt_store.py:
--------------------------------------------------------------------------------
1 | """This module contains the PromptStore class, which implements the storage and retrieval of prompts with different backends."""
2 |
3 | from typing import List
4 | from loguru import logger
5 |
6 | from promptmage.storage import StorageBackend
7 | from promptmage.prompt import Prompt
8 | from promptmage.exceptions import PromptNotFoundException
9 |
10 |
11 | class PromptStore:
12 | """A class that stores and retrieves prompts with different backends."""
13 |
14 | def __init__(self, backend):
15 | self.backend: StorageBackend = backend
16 |
17 | def store_prompt(self, prompt: Prompt):
18 | """Store a prompt in the backend."""
19 | logger.info(f"Storing prompt: {prompt}")
20 | self.backend.store_prompt(prompt)
21 |
22 | def get_prompt(
23 | self, prompt_name: str, version: int | None = None, active: bool | None = None
24 | ) -> Prompt:
25 | """Retrieve a prompt from the backend.
26 |
27 | Args:
28 | prompt_name (str): The name of the prompt to retrieve.
29 | version (int): The version of the prompt to retrieve.
30 | active (bool): Whether to retrieve only the active prompt.
31 |
32 | Returns:
33 | Prompt: The retrieved prompt.
34 | """
35 | logger.info(f"Retrieving prompt with name: {prompt_name}")
36 | try:
37 | return self.backend.get_prompt(prompt_name, version, active)
38 | except PromptNotFoundException:
39 | logger.error(
40 | f"Prompt with ID {prompt_name} not found, returning an empty prompt."
41 | )
42 | # return an empty prompt if the prompt is not found
43 | return Prompt(
44 | name=prompt_name,
45 | version=1,
46 | system="You are a helpful assistant.",
47 | user="",
48 | template_vars=[],
49 | active=False,
50 | )
51 |
52 | def get_prompt_by_id(self, prompt_id: str) -> Prompt:
53 | logger.info(f"Retrieving prompt with ID {prompt_id}")
54 | return self.backend.get_prompt_by_id(prompt_id)
55 |
56 | def get_prompts(self) -> List[Prompt]:
57 | """Retrieve all prompts from the backend."""
58 | return self.backend.get_prompts()
59 |
60 | def delete_prompt(self, prompt_id: str):
61 | """Delete a prompt from the backend."""
62 | logger.info(f"Deleting prompt with ID: {prompt_id}")
63 | self.backend.delete_prompt(prompt_id)
64 |
65 | def update_prompt(self, prompt: Prompt):
66 | """Update the prompt by id."""
67 | logger.info(f"Update prompt: {prompt}")
68 | self.backend.update_prompt(prompt)
69 |
--------------------------------------------------------------------------------
/promptmage/storage/remote_data_backend.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from loguru import logger
3 | from typing import Any, Dict, List, Optional
4 |
5 | from promptmage.run_data import RunData, Prompt
6 |
7 |
8 | class RemoteDataBackend:
9 | def __init__(self, url: str):
10 | self.url = url
11 |
12 | def store_data(self, run_data: RunData):
13 | """Store the run data."""
14 | # Send the run data to the remote server
15 | try:
16 | response = requests.post(f"{self.url}/runs", json=run_data.to_dict())
17 | response.raise_for_status()
18 | logger.info(f"Stored run data: {run_data}")
19 | except requests.exceptions.RequestException as e:
20 | logger.error(f"Failed to store run data: {e}")
21 | raise
22 |
23 | def get_data(self, step_run_id: str) -> RunData:
24 | """Get the run data for a given step run ID."""
25 | try:
26 | response = requests.get(f"{self.url}/runs/{step_run_id}")
27 | response.raise_for_status()
28 | run_data = RunData(**response.json())
29 | run_data.prompt = Prompt(**run_data.prompt)
30 | return run_data
31 | except requests.exceptions.RequestException as e:
32 | logger.error(f"Failed to get run data: {e}")
33 | raise
34 |
35 | def get_all_data(self) -> List[RunData]:
36 | """Get all the run data."""
37 | try:
38 | response = requests.get(f"{self.url}/runs")
39 | response.raise_for_status()
40 | run_datas = []
41 | for data in response.json():
42 | run_data = RunData(**data)
43 | run_data.prompt = Prompt(**run_data.prompt)
44 | run_datas.append(run_data)
45 | return run_datas
46 | except requests.exceptions.RequestException as e:
47 | logger.error(f"Failed to get all run data: {e}")
48 | raise
49 |
50 | def create_dataset(self, name: str):
51 | """Create a new dataset."""
52 | pass
53 |
54 | def delete_dataset(self, dataset_id: str):
55 | pass
56 |
57 | def add_datapoint_to_dataset(self, datapoint_id, dataset_id):
58 | pass
59 |
60 | def get_datasets(self) -> List:
61 | """Get all the datasets."""
62 | try:
63 | response = requests.get(f"{self.url}/datasets")
64 | response.raise_for_status()
65 | return response.json()
66 | except requests.exceptions.RequestException as e:
67 | logger.error(f"Failed to get all datasets: {e}")
68 | raise
69 |
70 | def get_dataset(self, dataset_id: str):
71 | """Get a dataset by ID."""
72 | try:
73 | response = requests.get(f"{self.url}/datasets/{dataset_id}")
74 | response.raise_for_status()
75 | return response.json()
76 | except requests.exceptions.RequestException as e:
77 | logger.error(f"Failed to get dataset: {e}")
78 | raise
79 |
80 | def get_datapoints(self, dataset_id: str) -> List:
81 | pass
82 |
83 | def get_datapoint(self, datapoint_id: str):
84 | pass
85 |
86 | def rate_datapoint(self, datapoint_id: str, rating: int):
87 | pass
88 |
89 | def remove_datapoint_from_dataset(self, datapoint_id: str, dataset_id: str):
90 | pass
91 |
--------------------------------------------------------------------------------
/promptmage/storage/remote_prompt_backend.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from typing import List
3 | from loguru import logger
4 |
5 | from promptmage.prompt import Prompt
6 |
7 |
8 | class RemotePromptBackend:
9 |
10 | def __init__(self, url: str):
11 | self.url = url
12 |
13 | def store_prompt(self, prompt: Prompt):
14 | """Store a prompt in the database."""
15 | # Send the prompt to the remote server
16 | try:
17 | response = requests.post(f"{self.url}/prompts", json=prompt.to_dict())
18 | response.raise_for_status()
19 | logger.info(f"Stored prompt {prompt}")
20 | except requests.exceptions.RequestException as e:
21 | logger.error(f"Failed to store run data: {e}")
22 | raise
23 |
24 | def update_prompt(self, prompt: Prompt):
25 | """Update an existing prompt in the database by id.
26 |
27 | Args:
28 | prompt (Prompt): The prompt to update.
29 | """
30 | try:
31 | response = requests.put(f"{self.url}/prompts", json=prompt.to_dict())
32 | response.raise_for_status()
33 | logger.info(f"Updated prompt {prompt}")
34 | except requests.exceptions.RequestException as e:
35 | logger.error(f"Failed to update prompt: {e}")
36 | raise
37 |
38 | def get_prompt(
39 | self, prompt_name: str, version: int | None = None, active: bool | None = None
40 | ) -> Prompt:
41 | """Retrieve a prompt from the database by name or version.
42 |
43 | Args:
44 | prompt_name (str): The name of the prompt to retrieve.
45 | version (int | None): The version of the prompt to retrieve.
46 | active (bool | None): Whether to retrieve only active prompts.
47 |
48 | Returns:
49 | Prompt: The retrieved prompt.
50 | """
51 | logger.info(f"Retrieving prompt with name: {prompt_name}")
52 | try:
53 | path = f"{self.url}/prompts/{prompt_name}"
54 | if version:
55 | path += f"?version={version}"
56 | if active:
57 | path += f"&active={active}"
58 | response = requests.get(path)
59 | response.raise_for_status()
60 | return Prompt(**response.json())
61 | except requests.exceptions.RequestException as e:
62 | logger.error(f"Failed to get prompt: {e}")
63 | raise
64 |
65 | def get_prompt_by_id(self, prompt_id: str) -> Prompt:
66 | """Get the prompt by id.
67 |
68 | Args:
69 | prompt_id (str): The id of the prompt to get.
70 | """
71 | logger.info(f"Retrieving prompt with ID {prompt_id}")
72 | try:
73 | response = requests.get(f"{self.url}/prompts/id/{prompt_id}")
74 | response.raise_for_status()
75 | logger.info(f"{response.json()}")
76 | return Prompt(**response.json())
77 | except requests.exceptions.RequestException as e:
78 | logger.error(f"Failed to get prompt by id: {e}")
79 | raise
80 |
81 | def get_prompts(self) -> List[Prompt]:
82 | """Get all prompts from the database."""
83 | try:
84 | response = requests.get(f"{self.url}/prompts")
85 | response.raise_for_status()
86 | return [Prompt(**prompt) for prompt in response.json()]
87 | except requests.exceptions.RequestException as e:
88 | logger.error(f"Failed to get prompts: {e}")
89 | raise
90 |
91 | def delete_prompt(self, prompt_id: str):
92 | """Delete a prompt from the database by id.
93 |
94 | Args:
95 | prompt_id (str): The id of the prompt to delete.
96 | """
97 | try:
98 | response = requests.delete(f"{self.url}/prompts/{prompt_id}")
99 | response.raise_for_status()
100 | logger.info(f"Deleted prompt with id {prompt_id}")
101 | except requests.exceptions.RequestException as e:
102 | logger.error(f"Failed to delete prompt: {e}")
103 | raise
104 |
--------------------------------------------------------------------------------
/promptmage/storage/storage_backend.py:
--------------------------------------------------------------------------------
1 | """This module contains the StorageBackend class, which is an abstract class that defines the interface for storage backends."""
2 |
3 | from abc import ABC, abstractmethod
4 | from pathlib import Path
5 |
6 |
7 | class StorageBackend(ABC):
8 | """An abstract class that defines the interface for storage backends."""
9 |
--------------------------------------------------------------------------------
/promptmage/storage/utils.py:
--------------------------------------------------------------------------------
1 | import sqlite3
2 | import json
3 | from loguru import logger
4 |
5 |
6 | def backup_db_to_json(db_path: str, json_path: str):
7 | """Backup a SQLite database to a JSON file.
8 |
9 | Args:
10 | db_path (str): Path to the SQLite database file.
11 | json_path (str): Path to the JSON file to save the backup to.
12 | """
13 | logger.info(f"Backing up database from '{db_path}' to '{json_path}' ...")
14 | # Connect to the SQLite database
15 | conn = sqlite3.connect(db_path)
16 | cursor = conn.cursor()
17 |
18 | # Get a list of tables in the database
19 | cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
20 | tables = cursor.fetchall()
21 |
22 | # Dictionary to hold the database structure
23 | db_dict = {}
24 |
25 | for table_name in tables:
26 | table_name = table_name[0]
27 |
28 | # Get the table's data
29 | cursor.execute(f"SELECT * FROM {table_name}")
30 | rows = cursor.fetchall()
31 |
32 | # Get column names and types
33 | cursor.execute(f"PRAGMA table_info({table_name})")
34 | columns = [
35 | (description[1], description[2]) for description in cursor.fetchall()
36 | ] # (name, type)
37 |
38 | # Add table data to the dictionary
39 | db_dict[table_name] = {
40 | "columns": columns, # Store both column name and type
41 | "data": rows,
42 | }
43 |
44 | # Close the connection
45 | conn.close()
46 |
47 | # Write the database dictionary to a JSON file
48 | with open(json_path, "w") as json_file:
49 | json.dump(db_dict, json_file, indent=4)
50 | logger.info("Backup complete.")
51 |
52 |
53 | def restore_db_from_json(db_path: str, json_path: str):
54 | """Restore a SQLite database from a JSON file.
55 |
56 | Args:
57 | db_path (str): Path to the SQLite database file.
58 | json_path (str): Path to the JSON file containing the database backup.
59 | """
60 | logger.info(f"Restoring database from '{json_path}' to '{db_path}' ...")
61 | # Read the JSON file
62 | with open(json_path, "r") as json_file:
63 | db_dict = json.load(json_file)
64 |
65 | # Connect to the SQLite database
66 | conn = sqlite3.connect(db_path)
67 | cursor = conn.cursor()
68 |
69 | # Drop existing tables if they exist (optional, use with caution)
70 | for table_name in db_dict.keys():
71 | cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
72 |
73 | # Create tables and insert data
74 | for table_name, table_data in db_dict.items():
75 | columns = table_data["columns"] # List of (name, type) tuples
76 | column_definitions = ", ".join(
77 | [f"{col_name} {col_type}" for col_name, col_type in columns]
78 | )
79 |
80 | # Create table with the correct column types
81 | cursor.execute(f"CREATE TABLE {table_name} ({column_definitions})")
82 |
83 | # Insert rows
84 | for row in table_data["data"]:
85 | placeholders = ", ".join(["?" for _ in columns])
86 | cursor.execute(f"INSERT INTO {table_name} VALUES ({placeholders})", row)
87 |
88 | # Commit changes and close the connection
89 | conn.commit()
90 | conn.close()
91 | logger.info("Restore complete.")
92 |
--------------------------------------------------------------------------------
/promptmage/utils.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 | from typing import List
4 | from importlib import import_module
5 |
6 | from promptmage import PromptMage
7 |
8 |
9 | def get_flows(file_path: str) -> List[PromptMage]:
10 | sys.path.append(
11 | str(Path(file_path).parent.absolute())
12 | ) # Add the directory of the file to PYTHONPATH
13 | module_name = Path(file_path).stem
14 | module = import_module(module_name)
15 |
16 | # Find an instances of PromptMage in the module
17 | flows = []
18 | for attr_name in dir(module):
19 | attr = getattr(module, attr_name)
20 | if isinstance(attr, PromptMage):
21 | flows.append(attr)
22 | return flows
23 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "promptmage"
3 | version = "0.1.4"
4 | description = "\"PromptMage\" is designed to offer an intuitive interface that simplifies the process of creating and managing LLM workflows as a self-hosted solution."
5 | authors = ["Tobias Sterbak "]
6 | license = "MIT"
7 | readme = "README.md"
8 | homepage = "https://www.promptmage.io"
9 | repository = "https://github.comm/tsterbak/promptmage"
10 | keywords = ["promptmage", "llm", "workflow", "management", "self-hosted", "solution", "ai", "nlp", "prompt"]
11 | classifiers = [
12 | "Topic :: Scientific/Engineering :: Artificial Intelligence",
13 | "Topic :: Software Development :: Libraries :: Python Modules"
14 | ]
15 | include = ["promptmage/static"]
16 |
17 | [tool.poetry.dependencies]
18 | python = "^3.11"
19 | loguru = "^0.7.2"
20 | click = "^8.1.7"
21 | uvicorn = {extras = ["standard"], version = "^0.29.0"}
22 | setuptools = "^69.5.1"
23 | nicegui = "^1.4.25"
24 | fastapi = ">=0.109.1,<0.110.0"
25 | sqlalchemy = "^2.0.31"
26 | python-slugify = "^8.0.4"
27 | websockets = "^13.1"
28 |
29 | [tool.poetry.group.dev.dependencies]
30 | black = "^24.4.2"
31 | ruff = "^0.4.2"
32 | pytest = "^8.2.0"
33 | pytest-cov = "^5.0.0"
34 | mkdocs = "^1.6.0"
35 | mkdocs-material = "^9.5.23"
36 | material-plausible-plugin = "^0.2.0"
37 | pytest-selenium = "^4.1.0"
38 | pytest-asyncio = "^0.24.0"
39 |
40 | [build-system]
41 | requires = ["poetry-core"]
42 | build-backend = "poetry.core.masonry.api"
43 |
44 | [tool.poetry.scripts]
45 | promptmage = "promptmage.cli:promptmage"
46 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/tests/__init__.py
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | from typing import Generator
4 |
5 | from promptmage.api import PromptMageAPI
6 | from promptmage.frontend import PromptMageFrontend
7 | from nicegui.testing import Screen, User
8 |
9 | from promptmage.storage import SQLitePromptBackend, SQLiteDataBackend
10 |
11 |
12 | pytest_plugins = ["nicegui.testing.plugin"]
13 |
14 |
15 | @pytest.fixture
16 | def user(user: User) -> Generator[User, None, None]:
17 | pm = PromptMageAPI(flows=[])
18 | app = pm.get_app()
19 | frontend = PromptMageFrontend(flows=pm.flows)
20 | frontend.init_from_api(app)
21 | yield user
22 |
23 |
24 | @pytest.fixture
25 | def screen(screen: Screen) -> Generator[Screen, None, None]:
26 | pm = PromptMageAPI(flows=[])
27 | app = pm.get_app()
28 | frontend = PromptMageFrontend(flows=pm.flows)
29 | frontend.init_from_api(app)
30 | yield screen
31 |
32 |
33 | @pytest.fixture
34 | def db_path():
35 | return "tests/tmp/test_promptmage.db"
36 |
37 |
38 | @pytest.fixture
39 | def prompt_sqlite_backend(db_path):
40 | yield SQLitePromptBackend(db_path)
41 |
42 | # Clean up the database
43 | os.remove(db_path)
44 |
45 |
46 | @pytest.fixture
47 | def data_sqlite_backend(db_path):
48 | yield SQLiteDataBackend(db_path)
49 |
50 | # Clean up the database
51 | os.remove(db_path)
52 |
--------------------------------------------------------------------------------
/tests/minimal_example.py:
--------------------------------------------------------------------------------
1 | from promptmage import PromptMage, Prompt, MageResult
2 | from promptmage.storage import (
3 | PromptStore,
4 | DataStore,
5 | SQLiteDataBackend,
6 | SQLitePromptBackend,
7 | )
8 |
9 | prompt_store = PromptStore(backend=SQLitePromptBackend(":memory:"))
10 | prompt_store.store_prompt(
11 | Prompt(
12 | name="prompt1",
13 | system="system1",
14 | user="user1",
15 | template_vars=["question"],
16 | version=1,
17 | active=True,
18 | )
19 | )
20 |
21 | mage = PromptMage(
22 | name="example",
23 | prompt_store=prompt_store,
24 | data_store=DataStore(backend=SQLiteDataBackend(":memory:")),
25 | )
26 |
27 |
28 | @mage.step(name="step1", prompt_name="prompt1", initial=True)
29 | def step1(question: str, prompt: Prompt) -> MageResult:
30 | answer = f"Answer to {question}"
31 | return MageResult(next_step=None, result=answer)
32 |
--------------------------------------------------------------------------------
/tests/test_api.py:
--------------------------------------------------------------------------------
1 | from fastapi.testclient import TestClient
2 |
3 | from promptmage.api import PromptMageAPI
4 | from .minimal_example import mage
5 |
6 |
7 | def test_promptmage_api():
8 | """Test the PromptMageAPI class."""
9 | app = PromptMageAPI([mage]).get_app()
10 | assert app
11 |
12 |
13 | def test_call_api():
14 | """Test calling the API."""
15 | app = PromptMageAPI([mage]).get_app()
16 |
17 | client = TestClient(app)
18 |
19 | response = client.get("/")
20 | print(response.text)
21 | assert response.status_code == 200
22 | assert "PromptMage" in response.text
23 |
24 | response = client.get("/api")
25 | print(response.text)
26 | assert response.status_code == 200
27 | assert "example" in response.text
28 |
29 | # question = "What is your name?"
30 | # response = client.get(f"/api/example/step1/{question}")
31 | # print(response.text)
32 | # assert response.status_code == 200
33 | # assert question in response.text
34 |
--------------------------------------------------------------------------------
/tests/test_mage.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from collections import defaultdict
3 | from unittest.mock import MagicMock, patch
4 |
5 | from promptmage import PromptMage, MageResult, Prompt
6 | from promptmage.step import MageStep
7 | from promptmage.mage import combine_dicts
8 | from promptmage.storage import (
9 | PromptStore,
10 | DataStore,
11 | )
12 |
13 |
14 | @pytest.fixture
15 | def mock_prompt_store():
16 | return MagicMock(spec=PromptStore)
17 |
18 |
19 | @pytest.fixture
20 | def mock_data_store():
21 | return MagicMock(spec=DataStore)
22 |
23 |
24 | @pytest.fixture
25 | def mock_step():
26 | step = MagicMock(spec=MageStep)
27 | step.name = "mock_step"
28 | step.execute.return_value = MageResult(
29 | id="result_1", results={"output": "result"}, next_step=None
30 | )
31 | step.initial = False
32 | step.signature = MagicMock()
33 | step.signature.parameters = {}
34 | return step
35 |
36 |
37 | @pytest.fixture
38 | def prompt_mage(mock_prompt_store, mock_data_store):
39 | return PromptMage(
40 | name="test_mage", prompt_store=mock_prompt_store, data_store=mock_data_store
41 | )
42 |
43 |
44 | def test_initialization_with_provided_stores(mock_prompt_store, mock_data_store):
45 | pm = PromptMage(
46 | name="custom_test", prompt_store=mock_prompt_store, data_store=mock_data_store
47 | )
48 | assert pm.prompt_store == mock_prompt_store
49 | assert pm.data_store == mock_data_store
50 |
51 |
52 | def test_step_decorator_registration(prompt_mage, mock_step):
53 | @prompt_mage.step(name="test_step")
54 | def mock_function():
55 | pass
56 |
57 | assert "test_step" in prompt_mage.steps
58 | assert isinstance(prompt_mage.steps["test_step"], MageStep)
59 |
60 |
61 | def test_step_dependency_handling(prompt_mage):
62 | @prompt_mage.step(name="step1", depends_on="step0")
63 | def step1():
64 | pass
65 |
66 | @prompt_mage.step(name="step2", depends_on=["step0", "step1"])
67 | def step2():
68 | pass
69 |
70 | assert prompt_mage.dependencies["step1"] == ["step0"]
71 | assert prompt_mage.dependencies["step2"] == ["step0", "step1"]
72 |
73 |
74 | def test_get_run_function(prompt_mage, mock_step):
75 | prompt_mage.steps[mock_step.name] = mock_step
76 | mock_step.initial = True
77 |
78 | run_function = prompt_mage.get_run_function()
79 |
80 | assert callable(run_function)
81 |
82 |
83 | def test_run_function_execution(prompt_mage, mock_step):
84 | mock_step.initial = True
85 | prompt_mage.steps[mock_step.name] = mock_step
86 |
87 | run_function = prompt_mage.get_run_function()
88 | result = run_function()
89 |
90 | assert result == {"id": "result_1", "results": {"output": "result"}}
91 | assert mock_step.execute.called
92 |
93 |
94 | def test_get_run_data(prompt_mage, mock_data_store):
95 | mock_data_store.get_all_data.return_value = [
96 | MagicMock(step_name="step1"),
97 | MagicMock(step_name="step2"),
98 | ]
99 | prompt_mage.steps["step1"] = MagicMock()
100 | prompt_mage.steps["step2"] = MagicMock()
101 |
102 | run_data = prompt_mage.get_run_data()
103 |
104 | assert len(run_data) == 2
105 | assert run_data[0].step_name == "step1"
106 | assert run_data[1].step_name == "step2"
107 |
108 |
109 | def test_combine_dicts():
110 | list_of_dicts = [{"a": 1, "b": 2}, {"a": 3, "c": 4}]
111 | combined = combine_dicts(list_of_dicts)
112 | assert combined == {"a": [1, 3], "b": 2, "c": 4}
113 |
--------------------------------------------------------------------------------
/tests/test_mage_step.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from promptmage.mage import MageStep
4 |
5 |
6 | def test_init_mage_step():
7 | step = MageStep(
8 | name="test_step", func=lambda x: x, prompt_store=None, data_store=None
9 | )
10 | assert step.name == "test_step"
11 | assert step.func(5) == 5
12 | assert step.prompt_store is None
13 |
14 | assert step.input_values == {"x": None}
15 | assert step.result is None
16 |
17 |
18 | def test_execute_mage_step():
19 | step = MageStep(
20 | name="test_step", func=lambda x: x + 1, prompt_store=None, data_store=None
21 | )
22 | step.execute(x=5)
23 | assert step.result == 6
24 | assert step.input_values == {"x": 5}
25 |
--------------------------------------------------------------------------------
/tests/test_sqlite_backend.py:
--------------------------------------------------------------------------------
1 | """Tests for the sqlite backends for prompts and run data."""
2 |
3 | import pytest
4 | import sqlite3
5 |
6 | from promptmage.storage import SQLitePromptBackend, SQLiteDataBackend
7 | from promptmage import Prompt, RunData
8 |
9 |
10 | def test_init_backend():
11 | """Test that the database is initialized correctly."""
12 | db_path = ":memory:"
13 | # Create a prompt backend
14 | prompt_backend = SQLitePromptBackend(db_path)
15 | assert prompt_backend.db_path == db_path
16 |
17 |
18 | def test_store_prompt(prompt_sqlite_backend):
19 | """Test that a prompt is stored correctly."""
20 | prompt = Prompt(
21 | name="test",
22 | system="test",
23 | user="test",
24 | version=1,
25 | template_vars=["test"],
26 | active=False,
27 | )
28 |
29 | prompt_sqlite_backend.store_prompt(prompt)
30 |
31 | # Check that the prompt is stored correctly
32 | conn = sqlite3.connect(prompt_sqlite_backend.db_path)
33 | cursor = conn.cursor()
34 | cursor.execute("SELECT * FROM prompts WHERE id=?", (prompt.id,))
35 | row = cursor.fetchone()
36 | assert row == (
37 | prompt.id,
38 | prompt.name,
39 | prompt.system,
40 | prompt.user,
41 | prompt.version,
42 | ",".join(prompt.template_vars),
43 | prompt.active,
44 | )
45 |
46 |
47 | def test_get_prompt(prompt_sqlite_backend):
48 | """Test that a prompt is retrieved correctly."""
49 | prompt = Prompt(
50 | name="test",
51 | system="test",
52 | user="test",
53 | version=1,
54 | template_vars=["test"],
55 | )
56 | prompt_sqlite_backend.store_prompt(prompt)
57 |
58 | # Check that the prompt is retrieved correctly
59 | retrieved_prompt = prompt_sqlite_backend.get_prompt(prompt.name)
60 | assert retrieved_prompt.id == prompt.id
61 | assert retrieved_prompt.name == prompt.name
62 | assert retrieved_prompt.system == prompt.system
63 | assert retrieved_prompt.user == prompt.user
64 | assert retrieved_prompt.version == prompt.version
65 | assert retrieved_prompt.template_vars == prompt.template_vars
66 |
67 |
68 | def test_get_prompts(prompt_sqlite_backend):
69 | """Test that all prompts are retrieved correctly."""
70 | prompt1 = Prompt(
71 | name="test1",
72 | system="test1",
73 | user="test1",
74 | version=1,
75 | template_vars=["test1"],
76 | )
77 | prompt2 = Prompt(
78 | name="test2",
79 | system="test2",
80 | user="test2",
81 | version=2,
82 | template_vars=["test2"],
83 | )
84 | prompt_sqlite_backend.store_prompt(prompt1)
85 | prompt_sqlite_backend.store_prompt(prompt2)
86 |
87 | # Check that the prompts are retrieved correctly
88 | prompts = prompt_sqlite_backend.get_prompts()
89 | assert len(prompts) == 2
90 | assert prompts[0].id == prompt1.id
91 | assert prompts[0].name == prompt1.name
92 | assert prompts[0].system == prompt1.system
93 | assert prompts[0].user == prompt1.user
94 | assert prompts[0].version == prompt1.version
95 | assert prompts[0].template_vars == prompt1.template_vars
96 | assert prompts[1].id == prompt2.id
97 | assert prompts[1].name == prompt2.name
98 | assert prompts[1].system == prompt2.system
99 | assert prompts[1].user == prompt2.user
100 | assert prompts[1].version == prompt2.version
101 | assert prompts[1].template_vars == prompt2.template_vars
102 |
103 |
104 | def test_store_run_data(data_sqlite_backend):
105 | """Test that run data is stored correctly."""
106 | run_data = RunData(
107 | run_id="test",
108 | prompt=Prompt(
109 | name="test",
110 | system="test",
111 | user="test",
112 | version=1,
113 | template_vars=["test"],
114 | ),
115 | step_name="test",
116 | input_data={"test": "test"},
117 | output_data={"test": "test"},
118 | status="test",
119 | model="test",
120 | execution_time=1.0,
121 | run_time="test",
122 | )
123 |
124 | data_sqlite_backend.store_data(run_data)
125 |
126 | # Check that the run data is stored correctly
127 | conn = sqlite3.connect(data_sqlite_backend.db_path)
128 | cursor = conn.cursor()
129 | cursor.execute("SELECT * FROM data WHERE run_id=?", (run_data.run_id,))
130 | row = cursor.fetchone()
131 |
132 | assert row[0] == run_data.step_run_id
133 | assert row[1] == run_data.run_time
134 | assert row[2] == run_data.execution_time
135 |
136 |
137 | def test_get_run_data(data_sqlite_backend):
138 | """Test that run data is retrieved correctly."""
139 |
140 |
141 | def test_get_run_data_by_prompt(data_sqlite_backend):
142 | """Test that run data is retrieved correctly by prompt."""
143 |
--------------------------------------------------------------------------------
/tests/tmp/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tsterbak/promptmage/9845325bf55c990a23ca2995f77cb40462941f63/tests/tmp/.gitkeep
--------------------------------------------------------------------------------