├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── assets ├── DemoGPT_banner.png ├── DemoGPT_banner_new.jpeg ├── architecture_transparent.png ├── banner_small.png ├── banner_smaller.png ├── demogpt_banner_white.png ├── demogpt_banner_white.svg ├── demogpt_banner_white_square.png ├── demogpt_new_banner.jpeg ├── demogpt_new_pipeline.jpeg ├── demogpt_new_pipeline1.jpeg ├── demogpt_new_version.gif ├── demogpt_pipeline.png ├── humor_machine.gif ├── plan_based_pipeline.png ├── puzzle.png ├── puzzle_24.png └── web_blogger.gif ├── demogpt ├── __init__.py ├── app.py ├── chains │ ├── __init__.py │ ├── chains.py │ ├── prompts │ │ ├── __init__.py │ │ ├── about.py │ │ ├── app_type.py │ │ ├── combine.py │ │ ├── combine_v2.py │ │ ├── draft.py │ │ ├── feedback.py │ │ ├── final.py │ │ ├── how_to_use.py │ │ ├── imports.py │ │ ├── old_tasks.py │ │ ├── plan.py │ │ ├── plan1.py │ │ ├── plan_feedback.py │ │ ├── plan_refiner.py │ │ ├── plan_with_inputs.py │ │ ├── prompt_chat_refiner.py │ │ ├── refine.py │ │ ├── self_refinement │ │ │ ├── __init__.py │ │ │ └── final_refiner.py │ │ ├── system_inputs.py │ │ ├── task_controller.py │ │ ├── task_list │ │ │ ├── __init__.py │ │ │ ├── chat.py │ │ │ ├── detailed_description.py │ │ │ ├── doc_load.py │ │ │ ├── doc_to_string.py │ │ │ ├── hub_bash.py │ │ │ ├── hub_llm_math.py │ │ │ ├── hub_meteo.py │ │ │ ├── hub_question_answering.py │ │ │ ├── pal_chain.py │ │ │ ├── path_to_file.py │ │ │ ├── prompt_list_parser.py │ │ │ ├── prompt_template.py │ │ │ ├── python_coder.py │ │ │ ├── react.py │ │ │ ├── router.py │ │ │ ├── search.py │ │ │ ├── search_chat.py │ │ │ ├── string_to_doc.py │ │ │ ├── summarize.py │ │ │ ├── ui_input_chat.py │ │ │ ├── ui_input_file.py │ │ │ ├── ui_input_text.py │ │ │ ├── ui_output_chat.py │ │ │ └── ui_output_text.py │ │ ├── task_refiner.py │ │ ├── tasks.py │ │ └── title.py │ ├── self_refiner.py │ ├── task_chains.py │ ├── task_chains_seperate.py │ └── task_definitions.py ├── cli.py ├── controllers.py ├── model.py ├── prompt.py ├── test.py ├── test_cases.py └── utils.py ├── demogpt_agenthub ├── README.md ├── __init__.py ├── agents │ ├── __init__.py │ ├── base.py │ ├── react.py │ └── tool_calling.py ├── apis │ └── __init__.py ├── llms │ ├── __init__.py │ ├── base.py │ └── openai.py ├── prompts │ ├── __init__.py │ ├── agents │ │ ├── react │ │ │ └── success_decider.py │ │ └── tool_calling │ │ │ ├── final_answer.py │ │ │ └── tool_decider.py │ └── rag │ │ └── base.py ├── rag │ ├── __init__.py │ └── base.py ├── tools │ ├── __init__.py │ ├── base.py │ ├── bash.py │ ├── duckduckgo.py │ ├── pubmed.py │ ├── repl.py │ ├── req.py │ ├── research.py │ ├── stack_exchange.py │ ├── weather.py │ ├── wikidata.py │ ├── wikiped.py │ ├── yolo.py │ └── youtube.py └── utils │ └── parsers.py ├── docs ├── README_CN.md ├── ROADMAP.md └── ROADMAP_CN.md ├── pyproject.toml ├── rag_chroma ├── 12c93a31-bb79-46fa-a985-7ab34c742bb7 │ ├── data_level0.bin │ ├── header.bin │ ├── length.bin │ └── link_lists.bin └── chroma.sqlite3 ├── test.ipynb └── tests ├── __init__.py ├── test_llms.py └── test_rag.py /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | src/test_runs/* 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | 12 | # db folders 13 | src/beta/goals_db/ 14 | db_tutorials/ 15 | src/beta/langchain_db_python/ 16 | src/beta/langchain_code/ 17 | 18 | quick_start.txt 19 | test_final_4.py 20 | test_final_5.py 21 | test_plan_refiner.py 22 | 23 | # docs 24 | src/docs/* 25 | src/docs_explanation/* 26 | src/docs_explanation_some/* 27 | src/docs_summary/* 28 | 29 | .pypirc 30 | 31 | # Distribution / packaging 32 | .Python 33 | build/ 34 | develop-eggs/ 35 | dist/ 36 | downloads/ 37 | eggs/ 38 | .eggs/ 39 | lib/ 40 | lib64/ 41 | parts/ 42 | sdist/ 43 | var/ 44 | wheels/ 45 | share/python-wheels/ 46 | *.egg-info/ 47 | .installed.cfg 48 | *.egg 49 | src/demogpt.engg-info/ 50 | MANIFEST 51 | 52 | src/test_app.py 53 | 54 | # PyInstaller 55 | # Usually these files are written by a python script from a template 56 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 57 | *.manifest 58 | *.spec 59 | 60 | # Installer logs 61 | pip-log.txt 62 | pip-delete-this-directory.txt 63 | 64 | # Unit test / coverage reports 65 | htmlcov/ 66 | .tox/ 67 | .nox/ 68 | .coverage 69 | .coverage.* 70 | .cache 71 | nosetests.xml 72 | coverage.xml 73 | *.cover 74 | *.py,cover 75 | .hypothesis/ 76 | .pytest_cache/ 77 | cover/ 78 | 79 | # Translations 80 | *.mo 81 | *.pot 82 | 83 | # Django stuff: 84 | *.log 85 | local_settings.py 86 | db.sqlite3 87 | db.sqlite3-journal 88 | 89 | LangChain.ipynb 90 | src/.chroma/**/* 91 | 92 | # Flask stuff: 93 | instance/ 94 | .webassets-cache 95 | 96 | # Scrapy stuff: 97 | .scrapy 98 | 99 | # Sphinx documentation 100 | docs/_build/ 101 | 102 | # PyBuilder 103 | .pybuilder/ 104 | target/ 105 | 106 | src/alpha/**/* 107 | src/beta/**/* 108 | src/data_beta/**/* 109 | 110 | # Jupyter Notebook 111 | .ipynb_checkpoints 112 | 113 | # IPython 114 | profile_default/ 115 | ipython_config.py 116 | 117 | # pyenv 118 | # For a library or package, you might want to ignore these files since the code is 119 | # intended to run in multiple environments; otherwise, check them in: 120 | # .python-version 121 | 122 | # pipenv 123 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 124 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 125 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 126 | # install all needed dependencies. 127 | #Pipfile.lock 128 | 129 | # poetry 130 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 131 | # This is especially recommended for binary packages to ensure reproducibility, and is more 132 | # commonly ignored for libraries. 133 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 134 | #poetry.lock 135 | 136 | # pdm 137 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 138 | #pdm.lock 139 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 140 | # in version control. 141 | # https://pdm.fming.dev/#use-with-ide 142 | .pdm.toml 143 | 144 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 145 | __pypackages__/ 146 | 147 | # Celery stuff 148 | celerybeat-schedule 149 | celerybeat.pid 150 | 151 | documents/**/* 152 | 153 | # SageMath parsed files 154 | *.sage.py 155 | 156 | # custom files 157 | pipeline.png 158 | demogpt.mp4 159 | 160 | # Environments 161 | .env 162 | .venv 163 | env/ 164 | venv/ 165 | ENV/ 166 | env.bak/ 167 | venv.bak/ 168 | src/.env 169 | src/db/**/* 170 | 171 | # Spyder project settings 172 | .spyderproject 173 | .spyproject 174 | 175 | # Rope project settings 176 | .ropeproject 177 | 178 | # mkdocs documentation 179 | /site 180 | 181 | # mypy 182 | .mypy_cache/ 183 | .dmypy.json 184 | dmypy.json 185 | 186 | # Pyre type checker 187 | .pyre/ 188 | 189 | # pytype static type analyzer 190 | .pytype/ 191 | 192 | # Cython debug symbols 193 | cython_debug/ 194 | 195 | # PyCharm 196 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 197 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 198 | # and can be added to the global gitignore or merged into this file. For a more nuclear 199 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 200 | #.idea/ -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | melihunsal.ai@gmail.com. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to DemoGPT 2 | 3 | Thank you for your interest in contributing to DemoGPT! This document provides guidelines and instructions for contributing to the project. Whether you're adding new features, fixing bugs, or improving documentation, your contributions are welcome. 4 | 5 | ## System Overview 6 | 7 | DemoGPT works through the following steps: 8 | 1. A plan is generated based on the incoming instruction from the user. 9 | 2. Tasks are generated based on the plan. 10 | 3. Code generation is made for each task. 11 | 4. Each generated code is combined, and the final code is generated as a Streamlit app. 12 | 5. The generated Streamlit code is executed with the Streamlit command. 13 | 14 | ## Project Structure 15 | 16 | ### Root Folder 17 | 18 | - `demogpt`: Contains the source code of the project. 19 | - `app.py`: Initial application where users can write down the demo idea (instruction) and the demo title (app title). 20 | - `cli.py`: For starting Streamlit at the beginning. 21 | - `model.py`: Includes the model corresponding to each step. 22 | - `utils.py`: Helper functions for the system. 23 | - `test.py`: Important to test the system components. 24 | - `test_cases.py`: Source includes test cases for `test.py`. 25 | - `chains`: Contains module definitions and task implementations. 26 | - `chains.py`: Module definitions. 27 | - `__init__.py`: Includes the modules. 28 | - `task_chains.py`: Implementations of all available tasks. 29 | - `task_definitions.py`: Definitions of all available tasks. 30 | - `prompts`: Folder containing task files. 31 | 32 | ### Task List Folder (`demogpt/chains/prompts`) 33 | 34 | Contains task files. Only `prompt_template.py`, `ui_input_file.py`, `ui_input_text.py`, `ui_output_text.py` are filled. Others need to be filled according to their needs. 35 | 36 | ## Adding a New Task 37 | 38 | To add a new task, follow these steps: 39 | 40 | 1. **Fill the Corresponding File:** Fill the corresponding file in `demogpt/chains/prompts` with the implementation of the new task. 41 | 2. **Update Task Definitions:** Change the "TASKS" variable in `demogpt/chains/task_definitions.py` to include the new task. 42 | 3. **Add the New Task to Task Chains:** Add the new task in `demogpt/chains/task_chains.py`. 43 | 4. **Modify `__init__.py`:** Modify `demogpt/chains/prompts/__init__.py` in a way that the new task becomes available. 44 | 5. **Add the New Task Call to `demogpt/`:** Add new task to getCodeSnippet function like in the following: 45 | ```python 46 | elif task_type == $task_name: 47 | code = TaskChains.$task_name(task=task,code_snippets=code_snippets) 48 | ``` 49 | 50 | 6. **Update Test Cases:** Update the `TOOL_EXAMPLES` variable in `demogpt/test_cases.py` and add at least one test case to test the new tool. 51 | 7. **Add Test Script:** Add the corresponding test script in `demogpt/test.py` like the following: 52 | 53 | ```python 54 | def test_$new_task_name(self): 55 | for example in TOOL_EXAMPLES[$new_task_name]: 56 | instruction = example["instruction"] 57 | inputs = example["inputs"] 58 | res = TaskChains.$new_task_name(instruction=instruction, inputs=inputs) 59 | self.writeToFile($APPROPRIATE_TASK_NAME, res, instruction) 60 | ``` 61 | **Test the New Task**: To test the new task, in the root, run the corresponding module like in the below: 62 | ```bash 63 | python -m unittest src.test.TestDemoGPT.$function_name 64 | ``` 65 | Then, the test result will be available in the **test.log**. 66 | 67 | ## Modifying The Main Prompts. 68 | 69 | Main prompts are inside of `demogpt/chains/prompts` folder whose names are `combine.py`, `feedback.py`, `plan.py`, `refine.py`, `tasks.py` and `final.py` 70 | 71 | You can also modify those prompts according to their goal which you can check their usage in `demogpt/model.py` 72 | 73 | ## Upcoming Tasks 74 | We are planning to integrate 🦍 Gorilla, a model specifically designed for API calls, as a task. Stay tuned for more details on this exciting addition. 75 | 76 | ## Conclusion 77 | Your contributions are vital to the success and growth of DemoGPT. Whether you're a seasoned developer or just starting, your insights, creativity, and hard work are appreciated. If you have any questions or need further assistance, please don't hesitate to reach out. 78 | 79 | Thank you for being a part of the DemoGPT community! -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Melih Ünsal 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /assets/DemoGPT_banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/DemoGPT_banner.png -------------------------------------------------------------------------------- /assets/DemoGPT_banner_new.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/DemoGPT_banner_new.jpeg -------------------------------------------------------------------------------- /assets/architecture_transparent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/architecture_transparent.png -------------------------------------------------------------------------------- /assets/banner_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/banner_small.png -------------------------------------------------------------------------------- /assets/banner_smaller.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/banner_smaller.png -------------------------------------------------------------------------------- /assets/demogpt_banner_white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/demogpt_banner_white.png -------------------------------------------------------------------------------- /assets/demogpt_banner_white_square.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/demogpt_banner_white_square.png -------------------------------------------------------------------------------- /assets/demogpt_new_banner.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/demogpt_new_banner.jpeg -------------------------------------------------------------------------------- /assets/demogpt_new_pipeline.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/demogpt_new_pipeline.jpeg -------------------------------------------------------------------------------- /assets/demogpt_new_pipeline1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/demogpt_new_pipeline1.jpeg -------------------------------------------------------------------------------- /assets/demogpt_new_version.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/demogpt_new_version.gif -------------------------------------------------------------------------------- /assets/demogpt_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/demogpt_pipeline.png -------------------------------------------------------------------------------- /assets/humor_machine.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/humor_machine.gif -------------------------------------------------------------------------------- /assets/plan_based_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/plan_based_pipeline.png -------------------------------------------------------------------------------- /assets/puzzle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/puzzle.png -------------------------------------------------------------------------------- /assets/puzzle_24.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/puzzle_24.png -------------------------------------------------------------------------------- /assets/web_blogger.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/assets/web_blogger.gif -------------------------------------------------------------------------------- /demogpt/__init__.py: -------------------------------------------------------------------------------- 1 | from demogpt.model import DemoGPT -------------------------------------------------------------------------------- /demogpt/app.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import signal 4 | import sys 5 | 6 | import streamlit as st 7 | import streamlit.components.v1 as components 8 | 9 | current_file_path = os.path.abspath(__file__) 10 | current_directory = os.path.dirname(current_file_path) 11 | parent_directory = os.path.dirname(current_directory) 12 | grandparent_directory = os.path.dirname(parent_directory) 13 | sys.path.append(grandparent_directory) 14 | 15 | from model import DemoGPT 16 | from utils import runStreamlit 17 | 18 | try: 19 | from dotenv import load_dotenv 20 | 21 | load_dotenv() 22 | except Exception as e: 23 | logging.error("dotenv import error but no needed") 24 | 25 | 26 | def generate_response(txt): 27 | """ 28 | Generate response using the LangChainCoder. 29 | 30 | Args: 31 | txt (str): The input text. 32 | 33 | Yields: 34 | dict: A dictionary containing response information. 35 | """ 36 | for data in agent(txt): 37 | yield data 38 | 39 | 40 | def initCode(): 41 | if "code" not in st.session_state: 42 | st.session_state["code"] = "" 43 | st.session_state.edit_mode = False 44 | 45 | 46 | # Page title 47 | title = "🧩 DemoGPT" 48 | 49 | st.set_page_config(page_title=title) 50 | 51 | st.title(title) 52 | 53 | 54 | initCode() 55 | 56 | # Text input 57 | 58 | openai_api_key = st.sidebar.text_input( 59 | "OpenAI API Key", 60 | placeholder="sk-...", 61 | value=os.getenv("OPENAI_API_KEY", ""), 62 | type="password", 63 | ) 64 | 65 | openai_api_base = st.sidebar.text_input( 66 | "Open AI base URL", 67 | placeholder="https://api.openai.com/v1", 68 | ) 69 | 70 | # models = ( 71 | # "gpt-3.5-turbo-0613", 72 | # "gpt-3.5-turbo-0301", 73 | # "gpt-3.5-turbo", 74 | # "gpt-3.5-turbo-16k", 75 | # "gpt-3.5-turbo-16k-0613", 76 | # "gpt-4", 77 | # "gpt-4-0314", 78 | # "gpt-4-0613", 79 | # ) 80 | 81 | models = DemoGPT.get_available_models(openai_api_key) 82 | 83 | 84 | model_name = st.sidebar.selectbox("Model", models) 85 | 86 | overview = st.text_area( 87 | "Explain your LLM-based application idea *", 88 | placeholder="Type your application idea here", 89 | height=100, 90 | help="""## Example prompts 91 | * Character Clone: Want an app that converses like Jeff Bezos? Prompt - "A chat-based application that talks like Jeff Bezos." 92 | * Language Mastery: Need help in learning French? Prompt - "An application that translates English sentences to French and provides pronunciation guidance for learners. 93 | * Content Generation: Looking to generate content? Prompt - "A system that can write ready to share Medium article from website. The resulting Medium article should be creative and interesting and written in a markdown format." 94 | """, 95 | ) 96 | 97 | features = st.text_input( 98 | "List all specific features desired for your app (comma seperated)", 99 | placeholder="Document interpretation, question answering, ...", 100 | help="Please provide a comprehensive list of specific features and functionalities you envision in your application, ensuring each element supports your overall objectives and user needs.(comma seperated)" 101 | ) 102 | 103 | if overview and features: 104 | demo_idea = f"Overview:{overview}\nFeatures:{features}" 105 | elif overview: 106 | demo_idea = overview 107 | else: 108 | demo_idea = "" 109 | 110 | def progressBar(percentage, bar=None): 111 | if bar: 112 | bar.progress(percentage) 113 | else: 114 | return st.progress(percentage) 115 | 116 | 117 | if "pid" not in st.session_state: 118 | st.session_state["pid"] = -1 119 | 120 | if "done" not in st.session_state: 121 | st.session_state["done"] = False 122 | 123 | with st.form("a", clear_on_submit=True): 124 | submitted = st.form_submit_button("Submit") 125 | 126 | 127 | def kill(): 128 | if st.session_state["pid"] != -1: 129 | logging.info(f"Terminating the previous applicaton ...") 130 | try: 131 | os.kill(st.session_state["pid"], signal.SIGTERM) 132 | except Exception as e: 133 | pass 134 | st.session_state["pid"] = -1 135 | 136 | 137 | if submitted: 138 | if not demo_idea: 139 | st.warning("Please enter your demo idea", icon="⚠️") 140 | st.stop() 141 | 142 | st.session_state.messages = [] 143 | if not openai_api_key: 144 | st.warning("Please enter your OpenAI API Key!", icon="⚠️") 145 | elif demo_idea: 146 | bar = progressBar(0) 147 | st.session_state.container = st.container() 148 | try: 149 | agent = DemoGPT(openai_api_key=openai_api_key, openai_api_base=openai_api_base) 150 | agent.setModel(model_name) 151 | except Exception as e: 152 | st.warning(e) 153 | else: 154 | kill() 155 | code_empty = st.empty() 156 | st.session_state.container = st.container() 157 | for data in generate_response(demo_idea): 158 | done = data.get("done", False) 159 | failed = data.get("failed", False) 160 | message = data.get("message", "") 161 | st.session_state["message"] = message 162 | stage = data.get("stage", "stage") 163 | code = data.get("code", "") 164 | progressBar(data["percentage"], bar) 165 | 166 | st.session_state["done"] = done 167 | st.session_state["failed"] = failed 168 | st.session_state["message"] = message 169 | 170 | if done or failed: 171 | st.session_state.code = code 172 | break 173 | 174 | st.info(message, icon="🧩") 175 | st.session_state.messages.append(message) 176 | 177 | elif "messages" in st.session_state: 178 | for message in st.session_state.messages: 179 | st.info(message, icon="🧩") 180 | 181 | if st.session_state.done: 182 | st.success(st.session_state.message) 183 | with st.expander("Code", expanded=True): 184 | code_empty = st.empty() 185 | if st.session_state.edit_mode: 186 | new_code = code_empty.text_area("", st.session_state.code, height=500) 187 | if st.button("Save & Rerun"): 188 | st.session_state.code = ( 189 | new_code # Save the edited code to session state 190 | ) 191 | st.session_state.edit_mode = False # Exit edit mode 192 | code_empty.code(new_code) 193 | kill() 194 | st.session_state["pid"] = runStreamlit( 195 | new_code, openai_api_key, openai_api_base 196 | ) 197 | st.rerun() 198 | 199 | else: 200 | code_empty.code(st.session_state.code) 201 | if st.button("Edit"): 202 | st.session_state.edit_mode = True # Enter edit mode 203 | st.rerun() 204 | example_submitted = False 205 | if submitted: 206 | st.session_state["pid"] = runStreamlit(code, openai_api_key, openai_api_base) 207 | 208 | if st.session_state.get("failed", False): 209 | with st.form("fail"): 210 | st.warning(st.session_state["message"]) 211 | email = st.text_input("Email", placeholder="example@example.com") 212 | email_submit = st.form_submit_button("Send") 213 | if email_submit: 214 | st.success( 215 | "🌟 Thank you for entrusting us with your vision! We're on it and will ping you the moment your app is ready to launch. Stay tuned for a stellar update soon!" 216 | ) -------------------------------------------------------------------------------- /demogpt/chains/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/demogpt/chains/__init__.py -------------------------------------------------------------------------------- /demogpt/chains/chains.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import re 4 | from time import sleep 5 | 6 | import autopep8 7 | from langchain_community.chat_models import ChatOpenAI 8 | from langchain.prompts.chat import (ChatPromptTemplate, 9 | HumanMessagePromptTemplate, 10 | SystemMessagePromptTemplate) 11 | from langchain_core.output_parsers import StrOutputParser, JsonOutputParser 12 | 13 | from demogpt import utils 14 | from demogpt.chains.task_definitions import getPlanGenHelper, getTasks 15 | from demogpt.controllers import validate 16 | 17 | from . import prompts 18 | 19 | 20 | class Chains: 21 | @classmethod 22 | def setLlm( 23 | cls, 24 | model, 25 | openai_api_key=os.getenv("OPENAI_API_KEY", ""), 26 | temperature=0.0, 27 | openai_api_base=None, 28 | has_gpt4=False 29 | ): 30 | cls.openai_api_key=openai_api_key 31 | cls.temperature=temperature 32 | cls.openai_api_base=openai_api_base 33 | cls.has_gpt4=has_gpt4 34 | cls.llm = ChatOpenAI( 35 | model=model, 36 | openai_api_key=openai_api_key, 37 | temperature=temperature, 38 | openai_api_base=openai_api_base 39 | ) 40 | cls.model = model 41 | 42 | @classmethod 43 | def getModel(cls, change=False, temperature=0, change_model="gpt-4-0613"): 44 | if change and cls.has_gpt4: 45 | return ChatOpenAI( 46 | model=change_model, 47 | openai_api_key=cls.openai_api_key, 48 | temperature=temperature, 49 | openai_api_base=cls.openai_api_base 50 | ) 51 | 52 | if temperature > 0: 53 | return ChatOpenAI( 54 | model=cls.model, 55 | openai_api_key=cls.openai_api_key, 56 | temperature=temperature, 57 | openai_api_base=cls.openai_api_base 58 | ) 59 | 60 | return cls.llm 61 | 62 | @classmethod 63 | def setModel(cls, model): 64 | cls.model = model 65 | 66 | @classmethod 67 | def getChain(cls, system_template="", human_template="", change=False, change_model="gpt-4-0613", temperature=0, return_type="text", **kwargs): 68 | prompts = [] 69 | if system_template: 70 | prompts.append(SystemMessagePromptTemplate.from_template(system_template)) 71 | if human_template: 72 | prompts.append(HumanMessagePromptTemplate.from_template(human_template)) 73 | chat_prompt = ChatPromptTemplate.from_messages(prompts) 74 | 75 | if return_type == "json": 76 | parser = utils.refine | JsonOutputParser() 77 | elif return_type == "code": 78 | parser = utils.refine | StrOutputParser() 79 | else: 80 | parser = StrOutputParser() 81 | 82 | chain = chat_prompt | cls.getModel(change=change, temperature=temperature, change_model=change_model) | parser 83 | 84 | return chain.invoke(kwargs) 85 | 86 | @classmethod 87 | def title(cls, instruction): 88 | return cls.getChain( 89 | system_template=prompts.title.system_template, 90 | human_template=prompts.title.human_template, 91 | change=False, 92 | temperature=0.8, 93 | instruction=instruction 94 | ).replace('"','').replace("'","") 95 | 96 | @classmethod 97 | def appType(cls, instruction): 98 | return cls.getChain( 99 | system_template=prompts.app_type.system_template, 100 | human_template=prompts.app_type.human_template, 101 | change=True, 102 | instruction=instruction, 103 | return_type="json" 104 | ) 105 | 106 | @classmethod 107 | def systemInputs(cls, instruction): 108 | return cls.getChain( 109 | system_template=prompts.system_inputs.system_template, 110 | human_template=prompts.system_inputs.human_template, 111 | change=False, 112 | instruction=instruction, 113 | ) 114 | 115 | @classmethod 116 | def planWithInputs(cls, instruction, system_inputs, app_type): 117 | TASK_DESCRIPTIONS, TASK_NAMES, TASK_DTYPES = getTasks(app_type)[:3] 118 | helper = getPlanGenHelper(app_type) 119 | plan = cls.getChain( 120 | system_template=prompts.plan_with_inputs.system_template, 121 | human_template=prompts.plan_with_inputs.human_template, 122 | change=False, 123 | instruction=instruction, 124 | system_inputs=system_inputs, 125 | helper=helper, 126 | TASK_DESCRIPTIONS=TASK_DESCRIPTIONS, 127 | TASK_NAMES=TASK_NAMES, 128 | TASK_DTYPES=TASK_DTYPES, 129 | ) 130 | return cls.refinePlan(plan) 131 | 132 | @classmethod 133 | def planFeedback(cls, instruction, plan): 134 | return cls.getChain( 135 | system_template=prompts.plan_feedback.system_template, 136 | human_template=prompts.plan_feedback.human_template, 137 | change=False, 138 | instruction=instruction, 139 | plan=plan, 140 | return_type="json" 141 | ) 142 | 143 | @classmethod 144 | def planRefiner(cls, instruction, plan, feedback, app_type): 145 | _, TASK_NAMES, _, TASK_PURPOSES = getTasks(app_type)[:4] 146 | return cls.getChain( 147 | system_template=prompts.plan_refiner.system_template, 148 | human_template=prompts.plan_refiner.human_template, 149 | change=True, 150 | instruction=instruction, 151 | plan=plan, 152 | feedback=feedback, 153 | TASK_NAMES=TASK_NAMES, 154 | TASK_PURPOSES=TASK_PURPOSES, 155 | ) 156 | 157 | @classmethod 158 | def tasks(cls, instruction, plan, app_type): 159 | TASK_DESCRIPTIONS, TASK_NAMES= getTasks(app_type)[:2] 160 | 161 | tasks = cls.getChain( 162 | system_template=prompts.tasks.system_template, 163 | human_template=prompts.tasks.human_template, 164 | instruction=instruction, 165 | plan=plan, 166 | TASK_DESCRIPTIONS=TASK_DESCRIPTIONS, 167 | TASK_NAMES=TASK_NAMES, 168 | return_type="json" 169 | ) 170 | 171 | return utils.reformatTasks(tasks) 172 | 173 | @classmethod 174 | def taskController(cls, tasks, app_type): 175 | return validate(tasks, app_type) 176 | 177 | @classmethod 178 | def planController(cls, plan, app_type): 179 | return validate(plan, app_type) 180 | 181 | @classmethod 182 | def refineTasks(cls, instruction, tasks, feedback, app_type): 183 | _, TASK_NAMES, _, TASK_PURPOSES = getTasks(app_type)[:4] 184 | 185 | tasks = cls.getChain( 186 | system_template=prompts.task_refiner.system_template, 187 | human_template=prompts.task_refiner.human_template, 188 | instruction=instruction, 189 | tasks=tasks, 190 | feedback=feedback, 191 | TASK_NAMES=TASK_NAMES, 192 | TASK_PURPOSES=TASK_PURPOSES, 193 | return_type="json" 194 | ) 195 | 196 | return utils.reformatTasks(tasks) 197 | 198 | @classmethod 199 | def combine(cls, instruction, code_snippets, plan): 200 | return cls.getChain( 201 | system_template=prompts.combine.system_template, 202 | human_template=prompts.combine.human_template, 203 | instruction=instruction, 204 | code_snippets=code_snippets, 205 | plan=plan, 206 | return_type="code" 207 | ) 208 | 209 | @classmethod 210 | def howToUse(cls, plan): 211 | steps = cls.getChain( 212 | system_template=prompts.how_to_use.system_template, 213 | human_template=prompts.how_to_use.human_template, 214 | plan=plan 215 | ) 216 | 217 | total_code = f'st.sidebar.markdown("""{steps}""")\n' 218 | return total_code 219 | 220 | @classmethod 221 | def about(cls, instruction, title): 222 | markdown = cls.getChain( 223 | system_template=prompts.about.system_template, 224 | human_template=prompts.about.human_template, 225 | instruction=instruction, 226 | title=title 227 | ) 228 | 229 | code = f'\nst.sidebar.markdown("# About")\nst.sidebar.markdown("""{markdown}""")' 230 | return code 231 | 232 | @classmethod 233 | def imports(cls, code_snippets): 234 | return cls.getChain( 235 | system_template=prompts.imports.system_template, 236 | human_template=prompts.imports.human_template, 237 | code_snippets=code_snippets, 238 | return_type="code" 239 | ) 240 | 241 | @classmethod 242 | def combine_v2(cls, code_snippets, function_names): 243 | code = cls.getChain( 244 | system_template=prompts.combine_v2.system_template, 245 | human_template=prompts.combine_v2.human_template, 246 | change=True, 247 | change_model="gpt-3.5-turbo", 248 | code_snippets=code_snippets, 249 | function_names=function_names, 250 | return_type="code" 251 | ) 252 | 253 | code = autopep8.fix_code(code) 254 | 255 | has_problem = utils.catchErrors(code) 256 | 257 | if has_problem: 258 | print("Switching to the 16k...") 259 | code = cls.getChain( 260 | system_template=prompts.combine_v2.system_template, 261 | human_template=prompts.combine_v2.human_template, 262 | change=True, 263 | change_model="gpt-3.5-turbo-16k-0613", 264 | code_snippets=code_snippets, 265 | function_names=function_names, 266 | return_type="code" 267 | ) 268 | 269 | return code 270 | 271 | @classmethod 272 | def feedback(cls, instruction, code): 273 | return cls.getChain( 274 | system_template=prompts.feedback.system_template, 275 | human_template=prompts.feedback.human_template, 276 | instruction=instruction, 277 | code=code, 278 | ) 279 | 280 | @classmethod 281 | def refinePlan(cls, plan): 282 | pattern = r"\[[a-zA-Z0-9_]+\(.*\)" 283 | steps = plan.strip().split("\n") 284 | refined_plan = [] 285 | index = 1 286 | for i in range(len(steps)): 287 | step = steps[i] 288 | # If current step contains the pattern or next step contains the pattern, then retain 289 | if re.search(pattern, step): 290 | # Remove existing numbering 291 | current_step = re.sub(r"^\d+\.", "", step).strip() 292 | refined_plan.append(f"{index}. {current_step}") 293 | index += 1 294 | return "\n".join(refined_plan) 295 | 296 | @classmethod 297 | def addAboutAndHTU(cls, instruction, title, code_snippets, plan): 298 | sleep(1) 299 | how_to_markdown = cls.howToUse(plan=plan) 300 | sleep(2) 301 | about = cls.about(instruction=instruction, title=title) 302 | pattern = r'(openai_api_key\s*=\s*st\.sidebar\.text_input\((?:[^()]*|\([^)]*\))*\))' 303 | # replacement string with additional code 304 | replacement = how_to_markdown + r'\1' + about 305 | # substitute using regex 306 | final_code = re.sub(pattern, replacement, code_snippets, flags=re.DOTALL) 307 | return final_code 308 | 309 | @classmethod 310 | def getAboutAndHTU(cls, instruction, title, plan): 311 | sleep(1) 312 | how_to = cls.howToUse(plan=plan) 313 | sleep(2) 314 | about = cls.about(instruction=instruction, title=title) 315 | return how_to, about 316 | 317 | @classmethod 318 | def refine(cls, instruction, code, feedback): 319 | return cls.getChain( 320 | system_template=prompts.refine.system_template, 321 | human_template=prompts.refine.human_template, 322 | instruction=instruction, 323 | code=code, 324 | feedback=feedback, 325 | return_type="code" 326 | ) 327 | 328 | @classmethod 329 | def final(cls, draft_code): 330 | return cls.getChain( 331 | system_template=prompts.final.system_template, 332 | human_template=prompts.final.human_template, 333 | draft_code=draft_code, 334 | return_type="code" 335 | ) -------------------------------------------------------------------------------- /demogpt/chains/prompts/__init__.py: -------------------------------------------------------------------------------- 1 | from . import (about, app_type, combine, combine_v2, feedback, final, 2 | how_to_use, imports, plan, plan_feedback, plan_refiner, 3 | plan_with_inputs, prompt_chat_refiner, refine, system_inputs, 4 | task_controller, task_refiner, tasks, title) 5 | from .self_refinement import final_refiner 6 | from .task_list import (chat, doc_load, doc_to_string, string_to_doc, path_to_file, 7 | prompt_template, python_coder, search, search_chat, summarize, 8 | ui_input_chat, ui_output_chat, ui_input_file, ui_input_text, 9 | ui_output_text, detailed_description) 10 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/about.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are a helpful assistant that can generate a concise "about" markdown for an application. 3 | The application is generated based on the given instruction. You will also see the title of the application. 4 | The about markdown should be short and explanatory. 5 | """ 6 | 7 | human_template = """ 8 | Instruction:Create an app that can answer question related to the uploaded documents 9 | Title:📖KnowledgeGPT 10 | About:📖KnowledgeGPT allows you to ask questions about your documents and get accurate answers with instant citations. 11 | 12 | 13 | Instruction:{instruction} 14 | Title:{title} 15 | About: 16 | """ -------------------------------------------------------------------------------- /demogpt/chains/prompts/app_type.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | Please classify the App Idea based on the following criterias and generate the appropriate JSON: 3 | 4 | 1. explanation: 5 | Give detailed explanation about the decision for each one is_ai, is_chat, is_search, is_summary respectively. 6 | 7 | 2. is_ai: 8 | - True if: 9 | a. The app requires natural language understanding or generation. 10 | b. It requires complex computations or algorithms beyond standard Python libraries like 'numpy', 'pandas', or 'requests'. 11 | - False otherwise. 12 | 13 | 3. is_chat: 14 | - True if the primary function of the app is to engage in conversation. 15 | - False otherwise. 16 | 17 | 4. is_search: 18 | - True if: 19 | a. The app idea strictly requires information after cut off date and this information should be fetched in the whole web instead of specific websites. 20 | - False otherwise. 21 | 22 | 4. is_summary: 23 | - True if: 24 | a. The app idea explicitly requires and mentions summarization. 25 | - False otherwise. 26 | 27 | Example JSON Format: 28 | 29 | {{ 30 | "explanation":"string", 31 | "is_ai":"true/false", 32 | "is_chat":"true/false", 33 | "is_search":"true/false", 34 | "is_summary":"true/false" 35 | }} 36 | """ 37 | 38 | human_template = """ 39 | App Idea: an agent that can get analysis of CSV file then summarize it. 40 | JSON:{{ 41 | "explanation":"Making analysis requires ai. It does not include conversation. No up to date information is needed. Summarization is not mentioned anywhere in the app idea", 42 | "is_ai":"true", 43 | "is_chat":"false", 44 | "is_search":"false", 45 | "is_summary":"false" 46 | }} 47 | 48 | App Idea: an application that can get the word count of txt file. 49 | JSON:{{ 50 | "explanation":"Word count is a simple python task so no ai is required. No conversation is expected. No up to date information is needed. Summarization is not mentioned anywhere in the app idea", 51 | "is_ai":"false", 52 | "is_chat":"false", 53 | "is_search":"false", 54 | "is_summary":"false" 55 | }} 56 | 57 | App Idea: create an application that can talk like Jeff Bezos 58 | JSON:{{ 59 | "explanation":"To talk like Jeff Bezos, need generative text model so I need ai. Talking app includes conversation. No up to date information is needed. Summarization is not mentioned anywhere in the app idea", 60 | "is_ai":"true", 61 | "is_chat":"true", 62 | "is_search":"false", 63 | "is_summary":"false" 64 | }} 65 | 66 | App Idea: create an application that can find and list all the male names 67 | JSON:{{ 68 | "explanation":"I need ai to filter the male names because it is not a simple Python task. It does not include conversation. No up to date information is needed. Summarization is not mentioned anywhere in the app idea", 69 | "is_ai":"true", 70 | "is_chat":"false", 71 | "is_search":"false", 72 | "is_summary":"false" 73 | }} 74 | 75 | App Idea: generate an agent that can give suggestions to the uploaded CV 76 | JSON:{{ 77 | "explanation":"Giving suggestions is not a simple Python task so i need ai. Giving suggestions/advice does not require a conversation. No up to date information is needed. Summarization is not mentioned anywhere in the app idea", 78 | "is_ai":"true", 79 | "is_chat":"false", 80 | "is_search":"false", 81 | "is_summary":"false" 82 | }} 83 | 84 | App Idea: a system that can transform given one currency to another 85 | JSON:{{ 86 | "explanation": "To transform from one currency to another, i need flexible analysis so i need ai. Currency transformation does not require conversation. I need to search the up to date currency information from web. Summarization is not mentioned anywhere in the app idea", 87 | "is_ai":"true", 88 | "is_chat":"false", 89 | "is_search":"true", 90 | "is_summary":"false" 91 | }} 92 | 93 | App Idea:{instruction} 94 | JSON: 95 | """ 96 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/combine.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are a coding assistant specialized in working with Streamlit applications and error-free code generation. 3 | Your task is, paying special attention to user input handling, state management and not getting any "not defined" error because of if statements without else. 4 | Generate nothing else but only the code so that it can be directly used. 5 | """ 6 | 7 | human_template = """ 8 | Directly repeat import statements and function definitions 9 | When you define a function with if statement, put else and initialize it otherwise, you will get "not defined" error 10 | 11 | Create a button to trigger the functionality of the app. 12 | 13 | Don't touch imports, and function definitions but only manage the state of the application. 14 | Call functions if all user inputs(if any) are taken and the button is pressed. 15 | 16 | Original Code: 17 | {code_snippets} 18 | ################################ 19 | Error-free Code: 20 | """ 21 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/combine_v2.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | Regenerate the code by combining all the user input parts into st.form. 3 | It is really important not to change other parts and the final code should be error-free and cannot include placeholders, so it should be a full code. 4 | Copy all the function definitions and library imports as is and don't modify or replace them. 5 | Show the result when the form is submitted under the if submit_button: statement. 6 | Keep in mind that don't miss any function definition 7 | Don't forget to add those functions with their original definitions as is 8 | 9 | {function_names} 10 | 11 | The final code content should be in the following format. 12 | 1. Get openai_api_key 13 | 2. Copy and paste all the functions as is 14 | 3. Create only a single global form 15 | 4. Under the global form, take all the user inputs 16 | 5. If form is submitted by st.form_submit_button, call the needed functions 17 | 6. Under the st.form_submit_button, show the results. 18 | 19 | When you need to call any of the function here {function_names}, don't call any of them before st.form_submit_button 20 | """ 21 | 22 | human_template = """ 23 | ============================================================= 24 | DRAFT CODE 1: 25 | 26 | openai_api_key = st.sidebar.text_input( 27 | "OpenAI API Key", 28 | placeholder="sk-...", 29 | value=os.getenv("OPENAI_API_KEY", ""), 30 | type="password", 31 | ) 32 | 33 | def foo1(): 34 | result = "res" 35 | return result 36 | 37 | half_story = foo1() 38 | 39 | if half_story: 40 | st.write(half_story) 41 | 42 | user_choice = st.selectbox("What would you like to do next?", ["Choice1", "Choice2"]) 43 | 44 | def foo2(half_story,user_choice): 45 | result = half_story + user_choice 46 | return result 47 | 48 | if half_story and user_choice: 49 | continued_story = foo2(half_story,user_choice) 50 | else: 51 | continued_story = "" 52 | 53 | if continued_story: 54 | st.markdown(continued_story) 55 | ############################################################# 56 | FINAL CODE 1: 57 | 58 | # Get openai_api_key 59 | openai_api_key = st.sidebar.text_input( 60 | "OpenAI API Key", 61 | placeholder="sk-...", 62 | value=os.getenv("OPENAI_API_KEY", ""), 63 | type="password", 64 | ) 65 | 66 | ### Copy and paste all the functions as is 67 | 68 | def foo1(): 69 | result = "res" 70 | return result 71 | 72 | def foo2(half_story,user_choice): 73 | result = half_story + user_choice 74 | return result 75 | 76 | 77 | ### Create a form 78 | 79 | with st.form(key='story_game'): 80 | # Under the form, take all the user inputs 81 | text_input = st.text_input(label='Enter some text') 82 | user_choice = st.selectbox("What would you like to do next?", ["Choice1", "Choice2"]) 83 | submit_button = st.form_submit_button(label='Submit Story') 84 | # If form is submitted by st.form_submit_button run the logic 85 | if submit_button: 86 | ######## Call the functions 87 | half_story = foo1() 88 | if text_input and user_choice : 89 | continued_story = foo2(text_input,user_choice) 90 | else: 91 | continued_story = "" 92 | 93 | ######## Show the results 94 | if half_story: 95 | #Under the st.form_submit_button, show the results. 96 | st.write(half_story) 97 | 98 | if continued_story: 99 | #Under the st.form_submit_button, show the results. 100 | st.markdown(continued_story) 101 | ############################################################# END OF THE CODE 102 | 103 | 104 | ============================================================= 105 | DRAFT CODE 2: 106 | {code_snippets} 107 | ############################################################# 108 | FINAL CODE 2: 109 | 110 | """ 111 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/draft.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are a coding assistant specialized in working with Streamlit applications and error-free code generation. 3 | Your task is, paying special attention to user input handling, state management and not getting any "not defined" error because of if statements without else. 4 | Don't forget to add title with already defined st.title line 5 | Generate nothing else but only the code so that it can be directly used. 6 | """ 7 | 8 | human_template = """ 9 | Refine the Original Code like in the following order: 10 | 11 | Step-1 Write all the import statements from the Draft Code. 12 | 13 | Step-2 Write all the function definitions from the Draft Code. 14 | 15 | Step-3 Get input from the user. 16 | 17 | Step-4 Put a submit button with an appropriate title. 18 | 19 | Step-5 Call functions only if all user inputs are taken and the button is clicked. 20 | 21 | 22 | Draft Code: 23 | {draft_code} 24 | ################################ 25 | Final Code: 26 | """ 27 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/feedback.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are helpful assistant that can read code then give feedback. 3 | This code is a streamlit implementation of a given instruction. 4 | The key points that you need to consider are as follows: 5 | 1 - Are all the external functions imported correctly? 6 | 2 - Are each if statement has else? 7 | 3 - Is there any state management issue in the code? 8 | 4 - Is there any button to trigger the functioanlity of the code, which is expected? 9 | """ 10 | 11 | human_template = """ 12 | Instruction:{instruction} 13 | Code:{code} 14 | Feedback: 15 | """ 16 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/final.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are a coding assistant specialized in working with Streamlit applications and error-free code generation. 3 | Your task is, paying special attention to user input handling, state management and not getting any "not defined" error because of if statements without else. 4 | Don't forget to add title with already defined st.title line 5 | Generate nothing else but only the code so that it can be directly used. 6 | """ 7 | 8 | human_template = """ 9 | Refine the Original Code like in the following order: 10 | 11 | Step-1 Write all the import statements from the Draft Code. 12 | 13 | Step-2 Write all the function definitions from the Draft Code. 14 | 15 | Step-3 Get input from the user. 16 | 17 | Step-4 Put a submit button with an appropriate title. 18 | 19 | Step-5 Call functions only if all user inputs are taken and the button is clicked. 20 | 21 | 22 | Draft Code: 23 | {draft_code} 24 | ################################ 25 | Final Code: 26 | """ 27 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/how_to_use.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are an AI agent that is good at writing how to use markdown which includes the steps of applications that the user needs to know. 3 | Your task is by looking at the provided plan, generating concise "how to use" markdown. 4 | This how to use, will be an informative guide for the user about how to use the application. 5 | That's why, don't mention the methods but only the parts that the user needs to know. 6 | 7 | Aware that you continue on this below. This lines are mandatory: 8 | ''' 9 | # How to use 10 | 11 | 1. Enter your [OpenAI API key](https://platform.openai.com/account/api-keys) above🔑 12 | 13 | ''' 14 | 15 | Since OpenAI API Key is mentioned once, don't mention again, try to be as concise as possible. 16 | Don't generate redundant steps. 17 | Start with # How to use 18 | Then 1. Enter your [OpenAI API key](https://platform.openai.com/account/api-keys) above🔑 19 | Then continue 2.... 20 | """ 21 | 22 | human_template = """ 23 | Plan:{plan} 24 | 25 | "How to" Markdown: 26 | 27 | """ -------------------------------------------------------------------------------- /demogpt/chains/prompts/imports.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are a Python developer that is responsible for only finding and writing all the function imports in the Python codei. 3 | The imports could be anywhere in the code, beginning, middle, inside of functions... 4 | Your task is writing a Python code including all the imports in the original code but nothing else 5 | """ 6 | 7 | human_template = """ 8 | Original Code:{code_snippets} 9 | Imports: 10 | """ 11 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/old_tasks.py: -------------------------------------------------------------------------------- 1 | # from .task_definitions import TASK_DESCRIPTIONS, TASK_NAMES 2 | 3 | system_template = """ 4 | Create a Python list of task objects that align with the provided instruction and plan. Task objects must be Python dictionaries, and the output should strictly conform to a Python list of JSON objects. 5 | 6 | You must use only the tasks provided in the description: 7 | 8 | {TASK_DESCRIPTIONS} 9 | 10 | task_name could be only one of the task names below: 11 | {TASK_NAMES} 12 | """ 13 | 14 | human_template = """ 15 | Create a Python list of task objects that align with the provided instruction and all steps of the plan. 16 | 17 | Task objects must be Python dictionaries, and the output should strictly conform to a Python list of JSON objects. 18 | 19 | Follow these detailed guidelines: 20 | 21 | Task Objects: Create a Python dictionary for each task using the following keys: 22 | 23 | step: It represents the step number corresponding to which plan step it matches 24 | task_type: Should match one of the task names provided in task descriptions. 25 | task_name: Define a specific name for the task that aligns with the corresponding plan step. 26 | input_key: List the "output_key" values from parent tasks used as input or "none" if there's no input or if it comes from the user. 27 | input_data_type: The list of data types of the inputs 28 | output_key: Designate a unique key for the task's output. It is compatible with the output type if not none 29 | output_data_type: The data type of the output 30 | description: Provide a brief description of the task's goal, mirroring the plan step. 31 | 32 | Ensure that each task corresponds to each step in the plan, and that no step in the plan is omitted. 33 | Ensure that output_key is unique for each task. 34 | Ensure that each task corresponds to each step in the plan 35 | Ensure that an output type of task does not change. 36 | 37 | ########################## 38 | Instruction: Create a system that can analyze the user 39 | Plan: 40 | Let’s think step by step. 41 | 1. Generate question to understand the personality of the user by 'prompt_template' 42 | 2. Show the question to the user with 'ui_output_text' 43 | 3. Get answer from the user for the asked question with 'ui_input_text' 44 | 4. Analyze user's answer by 'prompt_template'. 45 | 5. Show the analyze to the user with 'ui_output_text' 46 | List of Task Objects (Python List of JSON): 47 | [ 48 | {{ 49 | "step": 1, 50 | "task_type": "prompt_template", 51 | "task_name": "generate_question", 52 | "input_key": "none", 53 | "input_data_type": "none", 54 | "output_key": "question", 55 | "output_data_type": "string", 56 | "description": "Generate question to understand the personality of the user" 57 | }}, 58 | {{ 59 | "step": 2, 60 | "task_type": "ui_output_text", 61 | "task_name": "show_question", 62 | "input_key": "question", 63 | "input_data_type": "string", 64 | "output_key": "none", 65 | "output_data_type": "none", 66 | "description": "Display the AI-generated question to the user." 67 | }}, 68 | {{ 69 | "step": 3, 70 | "task_type": "ui_input_text", 71 | "task_name": "get_answer", 72 | "input_key": "none", 73 | "input_data_type": "none", 74 | "output_key": "answer", 75 | "output_data_type": "string", 76 | "description": "Ask the user to input the answer for the generated question" 77 | }}, 78 | {{ 79 | "step": 4, 80 | "task_type": "prompt_template", 81 | "task_name": "analyze_answer", 82 | "input_key": ["question", "answer"], 83 | "input_data_type": ["string","string"], 84 | "output_key": "prediction", 85 | "output_data_type": "string", 86 | "description": "Predict horoscope of the user given the question and user's answer to that question" 87 | }}, 88 | {{ 89 | "step": 5, 90 | "task_type": "ui_output_text", 91 | "task_name": "show_analyze", 92 | "input_key": "prediction", 93 | "input_data_type": "string", 94 | "output_key": "none", 95 | "output_data_type": "none", 96 | "description": "Display the AI's horoscope prediction" 97 | }} 98 | ] 99 | ########################## 100 | Instruction: Create a system that can generate blog post related to a website 101 | Plan: 102 | 1. Get website URL from the user with 'ui_input_text' 103 | 2. Use 'doc_loader' to load the page as Document 104 | 3. Use 'doc_to_string' to convert Document to string 105 | 4. Use 'prompt_template' to generate a blog post using the result of doc_to_string 106 | 5. If blog post is generated, show it to the user with 'ui_output_text'. 107 | List of Task Objects (Python List of JSON): 108 | [ 109 | {{ 110 | "step": 1, 111 | "task_type": "ui_input_text", 112 | "task_name": "get_url", 113 | "input_key": "none", 114 | "input_data_type": "none", 115 | "output_key": "url", 116 | "output_data_type": "string", 117 | "description": "Get website url from the user" 118 | }}, 119 | {{ 120 | "step": 2, 121 | "task_type": "doc_loader", 122 | "task_name": "doc_loader", 123 | "input_key": "url", 124 | "input_data_type": "string", 125 | "output_key": "docs", 126 | "output_data_type": "Document", 127 | "description": "Load the document from the website url" 128 | }}, 129 | {{ 130 | "step": 3, 131 | "task_type": "doc_to_string", 132 | "task_name": "convertDocToString", 133 | "input_key": "docs", 134 | "input_data_type": "Document", 135 | "output_key": "docs_string", 136 | "output_data_type": "string", 137 | "description": "Convert docs to string" 138 | }}, 139 | {{ 140 | "step": 4, 141 | "task_type": "prompt_template", 142 | "task_name": "writeBlogPost", 143 | "input_key": ["docs_string"], 144 | "input_data_type": ["string"], 145 | "output_key": "blog", 146 | "output_data_type": "string", 147 | "description": "Write blog post related to the context of docs_string" 148 | }}, 149 | {{ 150 | "step": 5, 151 | "task_type": "ui_output_text", 152 | "task_name": "show_blog", 153 | "input_key": "blog", 154 | "input_data_type": "string", 155 | "output_key": "none", 156 | "output_data_type": "none", 157 | "description": "Display the generated blog post to the user" 158 | }} 159 | ] 160 | ########################## 161 | Instruction: Summarize uploaded file and convert it to language that user gave. 162 | Plan: 163 | 1. Get file path using 'ui_input_file' 164 | 2. Use 'ui_input_text' to get the output language from the user 165 | 3. Use 'doc_loader' to load the file as Document from file path 166 | 4. Use 'summarize' to summarize the Document 167 | 5. Use 'prompt_template' to translate the summarization 168 | 6. If translation is ready, show it to the user with 'ui_output_text'. 169 | List of Task Objects (Python List of JSON): 170 | [ 171 | {{ 172 | "step": 1, 173 | "task_type": "ui_input_file", 174 | "task_name": "get_path", 175 | "input_key": "none", 176 | "input_data_type": "none", 177 | "output_key": "file_path", 178 | "output_data_type": "string", 179 | "description": "Get path of the file that the user upload" 180 | }}, 181 | {{ 182 | "step": 2, 183 | "task_type": "ui_input_text", 184 | "task_name": "get_language", 185 | "input_key": "none", 186 | "input_data_type": "none", 187 | "output_key": "language", 188 | "output_data_type": "string", 189 | "description": "Get output language for translation" 190 | }}, 191 | {{ 192 | "step": 3, 193 | "task_type": "doc_loader", 194 | "task_name": "doc_loader", 195 | "input_key": "file_path", 196 | "input_data_type": "string", 197 | "output_key": "docs", 198 | "output_data_type": "Document", 199 | "description": "Load the document from the given path" 200 | }}, 201 | {{ 202 | "step": 4, 203 | "task_type": "summarize", 204 | "task_name": "summarizeDoc", 205 | "input_key": "docs", 206 | "input_data_type": "Document", 207 | "output_key": "summarization_result", 208 | "output_data_type": "string", 209 | "description": "Summarize the document" 210 | }}, 211 | {{ 212 | "step": 5, 213 | "task_type": "prompt_template", 214 | "task_name": "translate", 215 | "input_key": ["summarization_result","language"], 216 | "input_data_type": ["string","string"], 217 | "output_key": "translation", 218 | "output_data_type": "string", 219 | "description": "Translate the document into the given language" 220 | }}, 221 | {{ 222 | "step": 6, 223 | "task_type": "ui_output_text", 224 | "task_name": "show_translation", 225 | "input_key": "translation", 226 | "input_data_type": "string", 227 | "output_key": "none", 228 | "output_data_type": "none", 229 | "description": "Display the file summary translation to the user" 230 | }} 231 | ] 232 | ########################## 233 | Instruction:{instruction} 234 | Plan : {plan} 235 | List of Task Objects (Python List of JSON): 236 | """ 237 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/plan.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | Create a plan to fulfill the given instruction. 3 | The plan should be broken down into clear, logical steps that detail how to accomplish the task. 4 | Consider all necessary user interactions, system processes, and validations, 5 | and ensure that the steps are in a logical sequence that corresponds to the given instruction. 6 | Don't generate impossible steps in the plan because only those tasks are available: 7 | {TASK_DESCRIPTIONS} 8 | 9 | Pay attention to the input_data_type and the output_data_type. 10 | If one of the task's output is input of another, then output_data_type of previous one 11 | should be the same as input_data_type of successor. 12 | 13 | Only those task types are allowed to be used: 14 | {TASK_NAMES} 15 | 16 | Highly pay attention to the input data type and the output data type of the tasks while creating the plan. These are the data types: 17 | 18 | {TASK_DTYPES} 19 | 20 | When you create a step in the plan, its input data type 21 | either should be none or the output data type of the caller step. 22 | 23 | If you use a task in a step, highly pay attention to the input data type and the output data type of the task because it should be compatible with the step. 24 | 25 | """ 26 | 27 | human_template = """ 28 | Don't generate redundant steps which is not meant in the instruction. 29 | 30 | 31 | Instruction: Application that can analyze the user 32 | System Inputs: [] 33 | Let's work this out in a step by step way to be sure we have the right answer. 34 | 1. Generate question to understand the personality of the user by 'prompt_template' 35 | 2. Show the question to the user by 'ui_output_text' 36 | 3. Get answer from the user for the asked question by 'ui_input_text' 37 | 4. Analyze user's answer by 'prompt_template'. 38 | 5. Show the result to the user by 'ui_input_text'. 39 | 40 | Instruction: Create a system that can summarize a powerpoint file 41 | System Inputs:[powerpoint_file] 42 | Let's work this out in a step by step way to be sure we have the right answer. 43 | 1. Get file path from the user by 'ui_input_file' for the powerpoint file 44 | 2. Use 'doc_loader' to load the powerpoint file as Document from the file path. 45 | 3. Use 'doc_summarizer' to generate summarization from the Document. 46 | 5. If summarization is ready, display it to the user by 'ui_output_text'. 47 | 48 | Instruction: Create a translator which translates to any language 49 | System Inputs:[output_language, source_text] 50 | Let's work this out in a step by step way to be sure we have the right answer. 51 | 1. Get output language from the user by 'ui_input_text' 52 | 2. Get source text which will be translated from the user by 'ui_input_text' 53 | 3. If all the inputs are filled, use 'prompt_template' to translate text to output language 54 | 4. If translated text is ready, show it to the user by 'ui_output_text' 55 | 56 | Instruction: Generate a system that can generate tweet from hashtags and give a score for the tweet. 57 | System Inputs:[hashtags] 58 | Let's work this out in a step by step way to be sure we have the right answer. 59 | 1. Get hashtags from the user by 'ui_input_text' 60 | 2. If hashtags are filled, use 'prompt_template' to create tweet. 61 | 3. If tweet is created, use 'prompt_template' to generate a score from the tweet. 62 | 4. If score is created, display tweet and score to the user by 'ui_output_text'. 63 | 64 | Instruction: Summarize a text taken from the user 65 | System Inputs:[text] 66 | Let's work this out in a step by step way to be sure we have the right answer. 67 | 1. Get text from the user by 'ui_input_text' 68 | 2. Use 'prompt_template' to summarize the given text. 69 | 3. If summarization is ready, display it to the user by 'ui_output_text'. 70 | 71 | Instruction: Create a platform which lets the user select a lecture and then show topics for that lecture 72 | then give a question to the user. After user gives his/her answer, it gives a score for the answer and give explanation. 73 | System Inputs:[lecture, topic, user_answer] 74 | Let's work this out in a step by step way to be sure we have the right answer. 75 | 1. Use 'prompt_template' to generate lectures 76 | 2. Among those generated by prompt_template, get lecture from the user by 'ui_input_text'. 77 | 3. After user selects a lecture, generate topics releated to that lecture by 'prompt_template'. 78 | 4. Among those generated by prompt_template, get topic from the user by 'ui_input_text' . 79 | 5. After user selects the topic, use 'prompt_template' to generate a question related to that topic and lecture 80 | 6. Get answer from the user by 'ui_input_text'. 81 | 7. Use 'prompt_template' to generate the real answer and score for the user's answer. 82 | 8. Display real and answer and score for the user's answer by 'ui_output_text'. 83 | 84 | Instruction: Create a system that can generate blog post related to a website 85 | System Inputs: [url] 86 | Let's work this out in a step by step way to be sure we have the right answer. 87 | 1. Get website URL from the user by 'ui_input_text' 88 | 2. Use 'doc_loader' to load the website as Document from URL 89 | 3. Use 'doc_to_string' to convert Document to string content 90 | 4. If string content is generated, use 'prompt_template' to generate a blog post related to that string content. 91 | 5. If blog post is generated, display it to the user by 'ui_output_text'. 92 | 93 | Instruction: {instruction} 94 | Let's work this out in a step by step way to be sure we have the right answer. 95 | """ 96 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/plan1.py: -------------------------------------------------------------------------------- 1 | from .task_definitions import TASK_DESCRIPTIONS, TASK_DTYPES, TASK_NAMES 2 | 3 | system_template = f""" 4 | Create a plan to fulfill the given instruction. 5 | The plan should be broken down into clear, logical steps that detail how to accomplish the task. 6 | Consider all necessary user interactions, system processes, and validations, 7 | and ensure that the steps are in a logical sequence that corresponds to the given instruction. 8 | Don't generate impossible steps in the plan because only those tasks are available: 9 | {TASK_DESCRIPTIONS} 10 | 11 | Pay attention to the input_data_type and the output_data_type. 12 | If one of the task's output is input of another, then output_data_type of previous one 13 | should be the same as input_data_type of successor. 14 | 15 | Only those task types are allowed to be used: 16 | {TASK_NAMES} 17 | 18 | Highly pay attention to the input data type and the output data type of the tasks while creating the plan. These are the data types: 19 | 20 | {TASK_DTYPES} 21 | 22 | When you create a step in the plan, its input data type 23 | either should be none or the output data type of the caller step. 24 | 25 | If you use a task in a step, highly pay attention to the input data type and the output data type of the task because it should be compatible with the step. 26 | 27 | """ 28 | 29 | human_template = """ 30 | Don't generate redundant steps which is not meant in the instruction. 31 | 32 | 33 | Instruction: Application that can analyze the user 34 | System Inputs: [] 35 | Let’s think step by step. 36 | 1. Generate question to understand the personality of the user by [prompt_template() ---> question] 37 | 2. Show the question to the user [ui_output_text(question)] 38 | 3. Get answer from the user for the asked question by [ui_input_text(question) ---> answer] 39 | 4. Analyze user's answer by [prompt_template(question,answer) ---> analyze] 40 | 5. Show the result to the user by [ui_output_text(analyze)]. 41 | 42 | Instruction: Create a system that can summarize a powerpoint file 43 | System Inputs:[powerpoint_file] 44 | Let’s think step by step. 45 | 1. Get file path from the user for the powerpoint file [ui_input_file() ---> file_path] 46 | 2. Load the powerpoint file as Document from the file path [doc_loader(file_path) ---> file_doc] 47 | 3. Generate summarization from the Document [doc_summarizer(file_doc) ---> summarized_text] 48 | 5. If summarization is ready, display it to the user [ui_output_text(summarized_text)] 49 | 50 | Instruction: Create a translator which translates to any language 51 | System Inputs:[output_language, source_text] 52 | Let’s think step by step. 53 | 1. Get output language from the user [ui_input_text() ---> output_language] 54 | 2. Get source text which will be translated from the user [ui_input_text() ---> source_text] 55 | 3. If all the inputs are filled, use translate text to output language [prompt_template(output_language, source_text) ---> translated_text] 56 | 4. If translated text is ready, show it to the user [ui_output_text(translated_text)] 57 | 58 | Instruction: Generate a system that can generate tweet from hashtags and give a score for the tweet. 59 | System Inputs:[hashtags] 60 | Let’s think step by step. 61 | 1. Get hashtags from the user [ui_input_text() ---> hashtags] 62 | 2. If hashtags are filled, create the tweet [prompt_template(hashtags) ---> tweet] 63 | 3. If tweet is created, generate a score from the tweet [prompt_template(tweet) ---> score] 64 | 4. If score is created, display tweet and score to the user [ui_output_text(score)] 65 | 66 | Instruction: Summarize a text taken from the user 67 | System Inputs:[text] 68 | Let’s think step by step. 69 | 1. Get text from the user [ui_input_text() ---> text] 70 | 2. Summarize the given text [prompt_template(text) ---> summarized_text] 71 | 3. If summarization is ready, display it to the user [ui_output_text(summarized_text)] 72 | 73 | Instruction: Create a system that can generate blog post related to a website 74 | System Inputs: [url] 75 | Let’s think step by step. 76 | 1. Get website URL from the user [ui_input_text() ---> url] 77 | 2. Load the website as Document from URL [doc_loader(url) ---> web_doc] 78 | 3. Convert Document to string content [doc_to_string(web_doc) ---> web_str ] 79 | 4. If string content is generated, generate a blog post related to that string content [prompt_template(web_str) ---> blog_post] 80 | 5. If blog post is generated, display it to the user [ui_output_text(blog_post)] 81 | 82 | Instruction: {instruction} 83 | System Inputs:{system_inputs} 84 | Let’s think step by step. 85 | """ 86 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/plan_feedback.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | Generate a feedback JSON to the given plan which is prepared for the given instruction. 3 | The JSON includes 2 keys which are "success" and "feedback". 4 | "feedback" corresponds to the feedback to the plan. 5 | "success" corresponds to the success of the plan. If the plan is good then "success" should be True. Otherwise, it should be False. 6 | 7 | In each step, there are tasks in the below format: 8 | [$task_name($args) ---> $output] 9 | 10 | You should check 2 things. 11 | 12 | 1.Only those task names are allowed to be used: 13 | {TASK_NAMES} 14 | 15 | 2. If one of the task's output is input of another, then output_data_type of previous one 16 | should be the same as input_data_type of successor. 17 | These are the data types: 18 | {TASK_DTYPES} 19 | """ 20 | 21 | human_template = """ 22 | Instruction: {instruction} 23 | Plan:{plan} 24 | Feedback JSON: 25 | """ 26 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/plan_refiner.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | Create a new plan which does the given instruction that does not have the problems of the old plan 3 | 4 | Each step can only use one of the functions below: 5 | 6 | {TASK_NAMES} 7 | 8 | These are the explanations of those functions: 9 | 10 | {TASK_PURPOSES} 11 | 12 | The plan should be in the same format as the Problematic Plan but it cannot include the problem in the "Problems" section. 13 | 14 | It is extremely important that all reasoning steps should be in the following format: 15 | $step_num. $description [$task_type($arguments) ---> $result] 16 | 17 | $step_num: is positive integer showing the order of the reasoning step. 18 | $description : says the responsibility of the task. 19 | $task_type : one of the available functions in {TASK_NAMES} 20 | $arguments: input variable(s) for the function (it should be output of the one of the previous functions) 21 | $result : output variable for the function 22 | """ 23 | 24 | human_template = """ 25 | Instruction:{instruction} 26 | 27 | Problematic Plan: 28 | {plan} 29 | 30 | Problems: {feedback} 31 | 32 | New Refined Plan: 33 | """ 34 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/plan_with_inputs.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are a head of engineering team that gives plan to the developer to write application code. 3 | You will see the Client's Message. The developer only does what you say nad he doesn't know Client's Message. 4 | The plan should be broken down into clear, logical steps that detail how to develop the application. 5 | Consider all necessary user interactions, system processes, and validations, 6 | and ensure that the steps are in a logical sequence that corresponds to the given Client's Message. 7 | Don't generate impossible steps in the plan because only those tasks are available: 8 | {TASK_DESCRIPTIONS} 9 | 10 | Pay attention to the input_data_type and the output_data_type. 11 | If one of the task's output is input of another, then output_data_type of previous one 12 | should be the same as input_data_type of successor. 13 | 14 | Only those task types are allowed to be used: 15 | {TASK_NAMES} 16 | 17 | Highly pay attention to the input data type and the output data type of the tasks while creating the plan. These are the data types: 18 | 19 | {TASK_DTYPES} 20 | 21 | When you create a step in the plan, its input data type 22 | either should be none or the output data type of the caller step. 23 | 24 | If you use a task in a step, highly pay attention to the input data type and the output data type of the task because it should be compatible with the step. 25 | 26 | {helper} 27 | """ 28 | 29 | human_template = """ 30 | Don't generate redundant steps which is not meant in the instruction. 31 | For chat-based inputs, use "ui_input_chat" and chat-based outputs use "ui_output_chat" 32 | Keep in mind that you cannot use python task just after plan_and_execute task. 33 | 34 | {helper} 35 | 36 | Client's Message: Application that can analyze the user 37 | System Inputs: [] 38 | Let’s think step by step. 39 | 1. Generate question to understand the personality of the user by [prompt_template() ---> question] 40 | 2. Show the question to the user [ui_output_text(question)] 41 | 3. Get answer from the user for the asked question by [ui_input_text(question) ---> answer] 42 | 4. Analyze user's answer by [prompt_template(question,answer) ---> analyze] 43 | 5. Show the result to the user by [ui_output_text(analyze)]. 44 | 45 | Client's Message: Create a system that can summarize a powerpoint file 46 | System Inputs:[powerpoint_file] 47 | Let’s think step by step. 48 | 1. Get file path from the user for the powerpoint file [ui_input_file() ---> file_path] 49 | 2. Load the powerpoint file as Document from the file path [doc_loader(file_path) ---> file_doc] 50 | 3. Generate summarization from the Document [doc_summarizer(file_doc) ---> summarized_text] 51 | 5. If summarization is ready, display it to the user [ui_output_text(summarized_text)] 52 | 53 | Client's Message: Create a translator app which translates to any language 54 | System Inputs:[output_language, source_text] 55 | Let’s think step by step. 56 | 1. Get output language from the user [ui_input_text() ---> output_language] 57 | 2. Get source text which will be translated from the user [ui_input_text() ---> source_text] 58 | 3. If all the inputs are filled, translate text to output language [prompt_template(output_language, source_text) ---> translated_text] 59 | 4. If translated text is ready, show it to the user [ui_output_text(translated_text)] 60 | 61 | Client's Message: Generate a system that can generate tweet from hashtags and give a score for the tweet. 62 | System Inputs:[hashtags] 63 | Let’s think step by step. 64 | 1. Get hashtags from the user [ui_input_text() ---> hashtags] 65 | 2. If hashtags are filled, create the tweet [prompt_template(hashtags) ---> tweet] 66 | 3. If tweet is created, generate a score from the tweet [prompt_template(tweet) ---> score] 67 | 4. If score is created, display tweet and score to the user [ui_output_text(score)] 68 | 69 | Client's Message: Create an app that enable me to make conversation with a mathematician 70 | System Inputs:[text] 71 | Let’s think step by step. 72 | 1. Get message from the user [ui_input_chat() ---> text] 73 | 2. Generate the response coming from the mathematician [chat(text) ---> mathematician_response] 74 | 3. If response is ready, display it to the user with chat interface [ui_output_chat(mathematician_response)] 75 | 76 | Client's Message: Summarize a text taken from the user 77 | System Inputs:[text] 78 | Let’s think step by step. 79 | 1. Get text from the user [ui_input_text() ---> text] 80 | 2. Summarize the given text [prompt_template(text) ---> summarized_text] 81 | 3. If summarization is ready, display it to the user [ui_output_text(summarized_text)] 82 | 83 | Client's Message: Create a system that can generate blog post related to a website 84 | System Inputs: [url] 85 | Let’s think step by step. 86 | 1. Get website URL from the user [ui_input_text() ---> url] 87 | 2. Load the website as Document from URL [doc_loader(url) ---> web_doc] 88 | 3. Convert Document to string content [doc_to_string(web_doc) ---> web_str ] 89 | 4. If string content is generated, generate a blog post related to that string content [prompt_template(web_str) ---> blog_post] 90 | 5. If blog post is generated, display it to the user [ui_output_text(blog_post)] 91 | 92 | Client's Message: {instruction} 93 | System Inputs:{system_inputs} 94 | Let’s think step by step. 95 | """ 96 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/prompt_chat_refiner.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You will refine the given JSON and return exactly with the same keys according to the given feedback. 3 | You will only change "system_template" and/or "template" depending on the feedback. 4 | In the templates, you are supposed to put strings in curly braces only in the "Inputs" list 5 | """ 6 | 7 | human_template = """ 8 | Original JSON: 9 | {templates} 10 | 11 | Inputs:{inputs} 12 | 13 | Feedback: {feedback} 14 | 15 | Refined JSON: 16 | """ 17 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/refine.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are helpful assistant that can read code, feedcback and instruction to generate the refined version of the code 3 | You are supposed to fix all the problems given to you in the feedback 4 | """ 5 | 6 | human_template = """ 7 | Instruction:{instruction} 8 | Code:{code} 9 | Feedback:{feedback} 10 | Refined Code: 11 | """ 12 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/self_refinement/__init__.py: -------------------------------------------------------------------------------- 1 | from . import * 2 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/self_refinement/final_refiner.py: -------------------------------------------------------------------------------- 1 | FEEDBACK_PROMPT = """ 2 | You are Streamlit and Python expert that can detect the problems of the given Streamlit code designed to do the given instruction. 3 | You are supposed to check 5 types of problems in the code: 4 | 1. All variables and functions should be used after they are defined. 5 | 2. If any variable initialized with "if" statement then it should have "else" to get rid of NameError. 6 | 3. Since Streamlit is a stateless library, all langchain functions should be called after all the parameters are taken. 7 | 4. All buttons must be form buttos. 8 | It is important because form button is preserving the state of the textareas under it. You can find the classical example below: 9 | ``` 10 | with st.form("my_form"): 11 | st.write("Inside the form") 12 | slider_val = st.slider("Form slider") 13 | checkbox_val = st.checkbox("Form checkbox") 14 | 15 | # Every form must have a submit button. 16 | submitted = st.form_submit_button("Submit") 17 | if submitted: 18 | st.write("slider", slider_val, "checkbox", checkbox_val) 19 | 20 | st.write("Outside the form") 21 | ``` 22 | 5. All the display related parts must be under form button. 23 | ################################################################ 24 | You cannot generate code, you can only analyze and give feedback for these points. 25 | 26 | If you can find any problem related to those 5 points list them. 27 | If you cannot find any problem in the code, only say 28 | 29 | Instruction:{instruction} 30 | Code:{result} 31 | """ 32 | 33 | REFINEMENT_PROMPT = """ 34 | You are Streamlit and Python expert that can refine the given code according to the taken feedback. 35 | The draft code is written for the given instruction but there are somme problem in the code. 36 | According to the given feedback, refine the code without creating a new problem or deleting parts. 37 | 38 | Refined Code: 39 | """ 40 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/system_inputs.py: -------------------------------------------------------------------------------- 1 | system_template = f""" 2 | You are a system architect that can determine system inputs of the architecture that the project needs even if it is not explicity mentioned in the Project Decription. 3 | You will get Project Decription from client then generate list of system inputs. 4 | This list includes both the initial and intermediate inputs that the system should take to work properly 5 | If the system is chat-based then, it should have an input corresponding to that as one of the system inputs such as message. 6 | It should be a valid Python list 7 | """ 8 | 9 | human_template = """ 10 | Project Decription: Application that can analyze the user 11 | System Inputs: ["answer"] 12 | 13 | Project Decription: Create a system that can summarize a powerpoint file 14 | System Inputs:["powerpoint_file"] 15 | 16 | Project Decription: Create a Bill Gates clone 17 | System Inputs:["message"] 18 | 19 | Project Decription: Generate a system that enable me to give the teacher field then make a chat with the teacher. 20 | System Inputs:["message", "teacher_field"] 21 | 22 | Project Decription: Create a translator which translates to any language 23 | System Inputs:["output_language", "source_text"] 24 | 25 | Project Decription: Create an app that I can chat with 26 | System Inputs:["message"] 27 | 28 | Project Decription: Generate a system that can generate tweet from hashtags and give a score for the tweet. 29 | System Inputs:["hashtags"] 30 | 31 | Project Decription: Generate a chat-based system that can analyze the given csv file. 32 | System Inputs:["message", "csv_file"] 33 | 34 | Project Decription: Summarize a text taken from the user 35 | System Inputs:["text"] 36 | 37 | Project Decription: Create a platform which lets the user select a lecture and then show topics for that lecture 38 | then give a question to the user. After user gives his/her answer, it gives a score for the answer and give explanation. 39 | System Inputs:["lecture", "topic", "user_answer"] 40 | 41 | Project Decription: Create a system that can generate blog post related to a website 42 | System Inputs: ["url"] 43 | 44 | Project Decription: {instruction} 45 | System Inputs: 46 | """ 47 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_controller.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are helpful assistant for checking if the "Generated Tasks" are correct in terms of input_data_type, output_data_type and their connections. 3 | These are the important keys of task objects that you will analyze: 4 | 5 | task_type: Should match one of the task names provided in task descriptions. 6 | task_name: Define a specific name for the task that aligns with the corresponding plan step. 7 | input_key: List the "output_key" values from parent tasks used as input or "none" if there's no input or if it comes from the user. 8 | input_data_type: The list of data types of the inputs 9 | output_key: Designate a unique key for the task's output. It is compatible with the output type if not none 10 | output_data_type: The data type of the output 11 | 12 | You will check if all the generated tasks' input_data_type and output_data_type are compatible with the original tasks. 13 | 14 | These are the original tasks that you will compare with: 15 | 16 | "Original Tasks": {TASK_DESCRIPTIONS} 17 | 18 | You will create a JSON object with the following 2 keys: 19 | 20 | feedback: List of feedbacks for each task by comparing it with the task in the "Original Tasks" 21 | valid: It is a boolean value that indicates whether these tasks are valid (no problem) or not. 22 | """ 23 | 24 | human_template = """ 25 | "Generated Tasks" : {tasks} 26 | JSON: 27 | """ 28 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/__init__.py: -------------------------------------------------------------------------------- 1 | from . import * 2 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/chat.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | Generate a prompt to guide the model in executing specific role. It acts as directives, providing the context and structure needed for the model to respond appropriately. 3 | 4 | Components: 5 | 1. "system_template": Describes the model's role and task for a given instruction. This string will be used with system_template.format(...) so only used curly braces for inputs 6 | 2. "human_input": It is one of the input keys from the "Inputs" list. It should be the most appropriate one that you think it is coming from chat input. 7 | 2. "variety": Indicates how creative or deterministic the model's response should be. 8 | 3. "function_name": A unique identifier for the specific task or instruction. 9 | 10 | IMPORTANT NOTE: 11 | - Write "system_template" in a way that, system_template.format(input=something for input in inputs) work. 12 | It should also have {{chat_history}} 13 | What I mean is that, put all the elements of Inputs inside of system_template with curly braces so that I can format it with predefined parameters. 14 | Always put the most similar variable name which should be coming from chat input in curly braces at the end . 15 | It should be strictly a JSON format so that it can be directly used by json.loads function in Python. 16 | """ 17 | 18 | human_template = """ 19 | IMPORTANT NOTE: 20 | - ONLY the variables listed under "Inputs" MUST be included in either the "system_template" section within curly braces (e.g., '{{variable_name}}'). Do NOT include any other parameters within curly braces. 21 | - Ensure that the exact variable names listed in "Inputs" are used without any modifications. 22 | - If a variable is listed in "Inputs," it must appear within curly braces in the "system_template". 23 | - It should be strictly a JSON format so that it can be directly used by json.loads function in Python. 24 | ========================================= 25 | Instruction: Generate a blog post from a title. 26 | Inputs: ["human_input","title"] 27 | Args: {{ 28 | "system_template":" 29 | You are a chatbot having a conversation with a human. You are supposed to write a blog post from given title. Human want you to generate a blog post but you are also open to feedback and according to the given feedback, you can refine the blog \n\nTitle:{{title}}\n\n{{chat_history}}\nHuman: {{human_input}}\nBlogger:", 30 | "human_input":"human_input", 31 | "variety": "True", 32 | "function_name": "chat_blogger" 33 | }} 34 | ########################################## 35 | Instruction: Generate a response in the style of a psychologist with a given tone. 36 | Inputs: ["talk_input","tone"] 37 | Args: {{ 38 | "system_template": "You are a psychologist. Reply to your patience with the given tone\n\nTone:{{tone}}\n\n{{chat_history}}\nPatience: {{talk_input}}\nPsychologist:", 39 | "human_input":"talk_input", 40 | "variety": "False", 41 | "function_name": "talk_like_a_psychologist" 42 | }} 43 | ########################################## 44 | Instruction: Answer question related to the uploaded powerpoint file. 45 | Inputs: ["question","powerpoint_doc"] 46 | Args: {{ 47 | "system_template": "You are a chatbot having a conversation with a human.\n\nGiven the following extracted parts of a long document, chat history and a question, create a final answer.\n\n{{powerpoint_doc}}\n\n{{chat_history}}\nHuman: {{question}}\nChatbot:", 48 | "human_input":"question", 49 | "variety": "False", 50 | "function_name": "talk_like_a_psychologist" 51 | }} 52 | ########################################## 53 | Instruction: Generate answer similar to a mathematician 54 | Inputs: ["human_input"] 55 | Args: {{ 56 | "system_template": "You are a mathematician. Solve the human's mathematics problem as efficient as possible.\n\n{{chat_history}}\nHuman: {{human_input}}\nMathematician:", 57 | "human_input":"human_input", 58 | "variety": "True", 59 | "function_name": "solveMathProblem" 60 | }} 61 | ########################################## 62 | Instruction:{instruction} 63 | Inputs:{inputs} 64 | Args: 65 | """ 66 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/detailed_description.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are a helpful assistant that can transform the given directive. 3 | This directive is given to the ai-based question answering system. However, it is possible that the directive misses the functionality of the instruction. 4 | You task is refine the directive in a way that it tells the functionality of the instruction so that the syetem knows how to behave. 5 | You can see the real functionality of the model by looking at the Instruction. 6 | Please generate a 1 sentence long directive. Use the Original Directive's style while generating the Refined Directive. 7 | """ 8 | 9 | human_template = """ 10 | Instruction:{app_idea} 11 | 12 | Original Directive:{instruction} 13 | 14 | Refined Directive: 15 | """ -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/doc_load.py: -------------------------------------------------------------------------------- 1 | loaders = """ 2 | For Local TXT file: 3 | TextLoader 4 | ################################ 5 | For Web Page: 6 | WebBaseLoader 7 | ################################ 8 | For Online PDF: 9 | OnlinePDFLoader 10 | ################################ 11 | For Local PDF: 12 | UnstructuredPDFLoader 13 | ################################ 14 | For Power Point: 15 | UnstructuredPowerPointLoader 16 | ################################ 17 | For CSV: 18 | CSVLoader 19 | ################################ 20 | For Excel: 21 | UnstructuredExcelLoader 22 | ################################ 23 | For Docx: 24 | UnstructuredWordDocumentLoader 25 | ################################ 26 | For Youtube: 27 | YoutubeLoader 28 | ################################ 29 | For Notion Zip File: 30 | NotionDirectoryLoader 31 | """ 32 | 33 | system_template = f""" 34 | Based on the provided context in 'Previous Code', choose the most appropriate loader. 35 | 36 | These are your loader options: 37 | 38 | {loaders} 39 | """ 40 | 41 | human_template = """ 42 | Use the information from 'Previous Code' to determine the loader from one of the loader options. 43 | Don't write any explanation but directly say the loader option 44 | 45 | Instruction: {instruction} 46 | Previous Code: {code_snippets} 47 | Loader Option: 48 | """ 49 | 50 | imports = """ 51 | import shutil 52 | from langchain.document_loaders import * 53 | 54 | """ 55 | 56 | functions = """ 57 | 58 | def {function_name}({argument}): 59 | {loader_line} 60 | docs = loader.load() 61 | return docs 62 | """ 63 | 64 | outputs = """ 65 | if {argument}: 66 | {variable} = {function_name}({argument}) 67 | else: 68 | {variable} = '' 69 | """ -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/doc_to_string.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are a good Python developer and your task is creating and calling a function called {function_name}, 3 | which converts Document object to a string. 4 | You can easily convert Document object docs to string like in the following: 5 | 6 | {variable} = "\".join([doc.page_content for doc in {argument}]) 7 | 8 | Use that function according to the given argument and variable name. 9 | You will assign {variable} to the string version of {argument}. 10 | 11 | Here is the part of the code that you are supposed to continue: 12 | {code_snippets} 13 | """ 14 | 15 | human_template = """ 16 | Argument:{argument} 17 | Variable:{variable} 18 | Python Code: 19 | """ 20 | 21 | outputs = '{variable} = "".join([doc.page_content for doc in {argument}])' 22 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/hub_bash.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/demogpt/chains/prompts/task_list/hub_bash.py -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/hub_llm_math.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/demogpt/chains/prompts/task_list/hub_llm_math.py -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/hub_meteo.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/demogpt/chains/prompts/task_list/hub_meteo.py -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/hub_question_answering.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/demogpt/chains/prompts/task_list/hub_question_answering.py -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/pal_chain.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/demogpt/chains/prompts/task_list/pal_chain.py -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/path_to_file.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are good at writing Python code. 3 | You are supposed to create a function and call that function which does 4 | the given instruction. 5 | Here is the part of the code that you are supposed to continue: 6 | {code_snippets} 7 | """ 8 | 9 | human_template = """ 10 | Write a function to load the file from the path for the argument name, variable and instruction below and also check if the path is not empty: 11 | Instruction:{instruction} 12 | Argument Name : {argument} 13 | Variable Name : {variable} 14 | Python Code: 15 | """ 16 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/prompt_list_parser.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/demogpt/chains/prompts/task_list/prompt_list_parser.py -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/prompt_template.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | Use the templates to guide the model in executing specific tasks or instructions. They act as directives, providing the context and structure needed for the model to respond appropriately. 3 | 4 | Components: 5 | 1. "system_template": Describes the model's role and task for a given instruction. This string will be used with system_template.format(...) so only used curly braces for inputs 6 | 2. "template": Specifies the format for the model's response. This string will be used with template.format(...) so only used curly braces for inputs 7 | 3. "variety": Indicates how creative or deterministic the model's response should be. 8 | 4. "function_name": A unique identifier for the specific task or instruction. 9 | 10 | IMPORTANT NOTE: 11 | - Write "system_template" and "template" in a way that, (system_template+template).format(input=something for input in inputs) work. 12 | What I mean is that, put all the elements of Inputs inside of either template or system_template with curly braces so that I can format it with predefined parameters. 13 | """ 14 | 15 | human_template = """ 16 | IMPORTANT NOTE: 17 | - ONLY the variables listed under "Inputs" MUST be included in either the "system_template" or "template" section within curly braces (e.g., '{{variable_name}}'). Do NOT include any other parameters within curly braces. 18 | - Ensure that the exact variable names listed in "Inputs" are used without any modifications. 19 | - If a variable is listed in "Inputs," it must appear within curly braces in at least one of the "system_template" or "template" sections. 20 | ========================================= 21 | Instruction: Generate a blog post from a title. 22 | Inputs: ["title"] 23 | Args: {{ 24 | "system_template": "You are an assistant designed to write a blog post from the given title: '{{title}}'.", 25 | "template": "Title: {{title}}. Please compose a blog post based on this title.", 26 | "variety": "True", 27 | "function_name": "blogger" 28 | }} 29 | ########################################## 30 | Instruction: Implement a language translation app from one language to another. 31 | Inputs: ["source_language","output_language", "text"] 32 | Args: {{ 33 | "system_template": "You are a language translator. Your task is to translate text from {{source_language}} to {{output_language}}.", 34 | "template": "Please translate the following text to {{output_language}}: '{{text}}'.", 35 | "variety": "False", 36 | "function_name": "translator" 37 | }} 38 | ########################################## 39 | Instruction: Generate an appropriate name for an animal. 40 | Inputs: ["animal"] 41 | Args: {{ 42 | "system_template": "You are tasked with creating a name for an animal. You generate concise and fitting names.", 43 | "template": "The animal is a {{animal}}. Please create a good name for it.", 44 | "variety": "True", 45 | "function_name": "animalNameGenerator" 46 | }} 47 | ########################################## 48 | Instruction: Create a programming-related humor machine. 49 | Inputs: [] 50 | Args: {{ 51 | "system_template": "You are designed to generate humor related to programming. Be creative and entertaining.", 52 | "template": "Please generate a programming-related joke or humorous statement.", 53 | "variety": "True", 54 | "function_name": "humorGenerator" 55 | }} 56 | ########################################## 57 | Instruction: Act as a math teacher to solve a problem. 58 | Inputs: ["math_problem"] 59 | Args: {{ 60 | "system_template": "You are a virtual math teacher, capable of solving any given math problem.", 61 | "template": "The problem is: {{math_problem}}. Please solve it and show the steps.", 62 | "variety": "False", 63 | "function_name": "mathSolver" 64 | }} 65 | ########################################## 66 | Instruction: Compose a piece of classical music. 67 | Inputs: ["instrumentation", "theme"] 68 | Args: {{ 69 | "system_template": "You are a composer creating a piece of classical music with specified instrumentation and theme.", 70 | "template": "Compose a piece using the following instrumentation: {{instrumentation}}, based on the theme: '{{theme}}'.", 71 | "variety": "True", 72 | "function_name": "musicComposer" 73 | }} 74 | ########################################## 75 | Instruction:{instruction} 76 | Inputs:{inputs} 77 | Args: 78 | """ 79 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/python_coder.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | As a proficient Python developer, follow the guidelines below: 3 | 1. Build upon the given "Previous Code". 4 | 2. Ensure to import all required libraries. Do NOT use pandas.compat.StringIO as it's deprecated. 5 | 3. Implement the specified function. 6 | 4. Check if the arguments are valid (not None and/or non empty) 7 | 5. If the arguments are valid, invoke the function using the provided arguments, and store the result in the indicated variable. Otherwise, assign an empty string to the indicatd variable 8 | 5. Ensure the resultant code is error-free and fits naturally as a continuation of the "Previous Code". 9 | 10 | 11 | It should be the complete version of this: 12 | ================================================================ 13 | # all library imports 14 | def {function_name}({argument}): 15 | # complete the function and return the result 16 | 17 | if {argument} is not None and len({argument}) > 0: 18 | {variable} = {function_name}({argument}) 19 | else: 20 | {variable} = '' 21 | ================================================================ 22 | Import all the Python libraries you use in the result. 23 | Your generated function cannot be empty and should be functional. So, you cannot generate a function having only comments. 24 | 25 | Let's get coding! 26 | """ 27 | 28 | human_template = """ 29 | Instruction: {instruction} 30 | Function Name: {function_name} 31 | Arguments: {argument} 32 | Assigned Variable: {variable} 33 | Previous Code: 34 | {code_snippets} 35 | Python Code Continuation: 36 | """ 37 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/react.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/demogpt/chains/prompts/task_list/react.py -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/router.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/demogpt/chains/prompts/task_list/router.py -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/search.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are an AI assistant that write a concise prompt to direct an assistant to make web search for the given instruction. 3 | You will have inputs and instruction. The prompt should be formattable with the inputs which means it should include inputs with curly braces. 4 | """ 5 | 6 | human_template = """ 7 | Instruction: Search the given input 8 | Inputs:input 9 | Prompt: Find the answer of it: {{input}} 10 | 11 | Instruction: Find the list of song releated to the title 12 | Inputs:title 13 | Prompt: Find the list of songs releated to the title: {{title}} 14 | 15 | Instruction:{instruction} 16 | Inputs:{inputs} 17 | Prompt: 18 | """ 19 | 20 | imports = """ 21 | from langchain_community.chat_models import ChatOpenAI 22 | from langchain.llms import OpenAI 23 | from langchain.tools import DuckDuckGoSearchRun 24 | from langchain.agents.tools import Tool 25 | from langchain.agents import initialize_agent, AgentType 26 | from langchain.chains import LLMMathChain 27 | from langchain.callbacks import StreamlitCallbackHandler 28 | """ 29 | 30 | functions = """ 31 | def {function_name}({argument}): 32 | search_input = "{res}".format({argument}={argument}) 33 | llm = OpenAI(openai_api_key=openai_api_key, temperature=0) 34 | llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True) 35 | tools = [ 36 | DuckDuckGoSearchRun(name="Search"), 37 | Tool( 38 | name="Calculator", 39 | func=llm_math_chain.run, 40 | description="useful for when you need to answer questions about math" 41 | ), 42 | ] 43 | model = ChatOpenAI(openai_api_key=openai_api_key, temperature=0) 44 | agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True) 45 | st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) 46 | return agent.run(search_input, callbacks=[st_cb]) 47 | """ 48 | 49 | outputs = """ 50 | if not openai_api_key.startswith('sk-'): 51 | st.warning('Please enter your OpenAI API key!', icon='⚠') 52 | {variable} = "" 53 | elif {argument}: 54 | {variable} = {function_name}({argument}) 55 | else: 56 | {variable} = '' 57 | """ -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/search_chat.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are an AI assistant that write a concise prompt to direct an assistant to make web search for the given instruction. 3 | You will have inputs and instruction. The prompt should be formattable with the inputs which means it should include inputs with curly braces. 4 | """ 5 | 6 | human_template = """ 7 | Instruction: Search the given input 8 | Inputs:input 9 | Prompt: Find the answer of it: {{input}} 10 | 11 | Instruction: Find the list of song releated to the title 12 | Inputs:title 13 | Prompt: Find the list of songs releated to the title: {{title}} 14 | 15 | Instruction:{instruction} 16 | Inputs:{inputs} 17 | Prompt: 18 | """ 19 | 20 | imports = """ 21 | from langchain.agents import ConversationalChatAgent, AgentExecutor 22 | from langchain.tools import DuckDuckGoSearchRun 23 | from langchain.memory.chat_message_histories import StreamlitChatMessageHistory 24 | from langchain.memory import ConversationBufferMemory 25 | from langchain.agents.tools import Tool 26 | from langchain.chains import LLMMathChain 27 | from langchain_community.chat_models import ChatOpenAI 28 | from langchain.callbacks import StreamlitCallbackHandler 29 | 30 | msgs = StreamlitChatMessageHistory() 31 | memory = ConversationBufferMemory( 32 | chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output" 33 | ) 34 | """ 35 | 36 | functions = """ 37 | def {function_name}({argument}): 38 | llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", openai_api_key=openai_api_key) 39 | llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True) 40 | tools = [ 41 | DuckDuckGoSearchRun(name="Search"), 42 | Tool( 43 | name="Calculator", 44 | func=llm_math_chain.run, 45 | description="useful for when you need to answer questions about math" 46 | )] 47 | chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools) 48 | executor = AgentExecutor.from_agent_and_tools( 49 | agent=chat_agent, 50 | tools=tools, 51 | memory=memory, 52 | return_intermediate_steps=True, 53 | handle_parsing_errors=True, 54 | ) 55 | st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) 56 | return executor({argument}, callbacks=[st_cb])["output"] 57 | """ 58 | 59 | outputs = """ 60 | if not openai_api_key.startswith('sk-'): 61 | st.warning('Please enter your OpenAI API key!', icon='⚠') 62 | {variable} = "" 63 | elif {argument}: 64 | {variable} = {function_name}({argument}) 65 | else: 66 | {variable} = '' 67 | """ -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/string_to_doc.py: -------------------------------------------------------------------------------- 1 | imports = """ 2 | from langchain.docstore.document import Document 3 | """ 4 | outputs = """ 5 | {variable} = [Document(page_content={argument}, metadata={{'source': 'local'}})] 6 | """ 7 | 8 | outputs = """ 9 | if not openai_api_key.startswith('sk-'): 10 | st.warning('Please enter your OpenAI API key!', icon='⚠') 11 | {variable} = "" 12 | elif {argument}: 13 | {variable} = {function_name}({argument}) 14 | else: 15 | variable = "" 16 | """ -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/summarize.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You will summarization code with a strict structure like in the below but 3 | loader will change depending on the input 4 | ### 5 | from langchain_community.chat_models import ChatOpenAI 6 | from langchain.chains.summarize import load_summarize_chain 7 | def {function_name}(docs): 8 | llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k", openai_api_key=openai_api_key) 9 | chain = load_summarize_chain(llm, chain_type="stuff") 10 | return chain.run(docs) 11 | if {argument}: 12 | {variable} = summarize(argument) 13 | else: 14 | variable = "" 15 | ### 16 | """ 17 | 18 | human_template = """ 19 | Here is the part of the code that you are supposed to continue: 20 | {code_snippets} 21 | 22 | Write a summarize function for the argument name and variable below: 23 | Argument Name : {argument} 24 | Variable Name : {variable} 25 | Summarization Code: 26 | """ 27 | 28 | imports = """ 29 | from langchain_community.chat_models import ChatOpenAI 30 | from langchain.chains.summarize import load_summarize_chain 31 | """ 32 | 33 | functions = """ 34 | def {function_name}({argument}): 35 | llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k", openai_api_key=openai_api_key) 36 | chain = load_summarize_chain(llm, chain_type="stuff") 37 | with st.spinner('DemoGPT is working on it. It might take 5-10 seconds...'): 38 | return chain.run({argument}) 39 | """ -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/ui_input_chat.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are an AI agent that can generate single like placeholder text depending on the instruction and the variable name. 3 | """ 4 | 5 | human_template = """ 6 | ############################# 7 | variable: url 8 | instruction: Get website URL from the user 9 | placeholder:Enter website URL 10 | ############################# 11 | variable: source_text 12 | instruction: Get source text from the user 13 | placeholder:Type the source text 14 | ############################# 15 | variable: input_language 16 | instruction: Get the input language from the user 17 | placeholder:Enter the input language 18 | ############################# 19 | variable: {variable} 20 | instruction: {instruction} 21 | placeholder: 22 | """ 23 | 24 | code = """ 25 | for message in st.session_state.messages: 26 | with st.chat_message(message["role"]): 27 | st.markdown(message["content"]) 28 | 29 | if {variable} := st.chat_input("{placeholder}"): 30 | with st.chat_message("user"): 31 | st.markdown({variable}) 32 | st.session_state.messages.append({{"role": "user", "content": {variable}}}) 33 | """ 34 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/ui_input_file.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are an AI assistant that can generate JSON with 2 keys which are title and data_type 3 | The values correspond to those keys will be used in the streamlit's file_uploader function like in the below: 4 | st.file_uploader($title,type=$data_type) 5 | So title should be string and data_type should be array of data types such as "txt", "csv" ... 6 | You will decide title and data_type by only considering the instruction given to you. 7 | You need to know that if instruction include Notion file, it means the data type should be zip because Notion export is directly a zip file. 8 | """ 9 | 10 | human_template = """ 11 | Instruction:{instruction} 12 | JSON: 13 | """ 14 | 15 | code = """ 16 | uploaded_file = st.file_uploader("{title}", type={data_type}, key='{variable}') 17 | if uploaded_file is not None: 18 | # Create a temporary file to store the uploaded content 19 | extension = uploaded_file.name.split(".")[-1] 20 | with tempfile.NamedTemporaryFile(delete=False, suffix=f'.{{extension}}') as temp_file: 21 | temp_file.write(uploaded_file.read()) 22 | {variable} = temp_file.name # it shows the file path 23 | else: 24 | {variable} = '' 25 | """ 26 | 27 | inputs = """ 28 | uploaded_file = st.file_uploader("{title}", type={data_type}, key='{variable}') 29 | """ 30 | 31 | outputs = """ 32 | if uploaded_file is not None: 33 | # Create a temporary file to store the uploaded content 34 | extension = uploaded_file.name.split(".")[-1] 35 | with tempfile.NamedTemporaryFile(delete=False, suffix=f'.{{extension}}') as temp_file: 36 | temp_file.write(uploaded_file.read()) 37 | {variable} = temp_file.name # it shows the file path 38 | else: 39 | {variable} = '' 40 | """ 41 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/ui_input_text.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You will write a single line streamlit input code such as st.text_input, st.selectbox... 3 | You will do it to accomplish the given instruction. You will see examples. You will also see the previous code 4 | segment that you will continue on. You will only write a single streamlit code by looking both the instruction and the previous code 5 | """ 6 | 7 | human_template = """ 8 | Previous Code Segment:{code_snippets} 9 | 10 | variable: url 11 | instruction: Get website URL from the user 12 | code: 13 | url = st.text_input("Enter website URL") 14 | 15 | variable: source_text 16 | instruction: Get source text from the user 17 | code: 18 | source_text = st.text_area("Enter source text") 19 | 20 | variable: input_language 21 | instruction: Get the input language from the user 22 | code: 23 | input_language = st.text_input("Enter the input language") 24 | 25 | variable: color 26 | instruction: Select the color 27 | code: 28 | color = st.selectbox("Select the color", ["Red", "Blue", "Green", "Yellow", "purple"]) 29 | 30 | variable: {variable} 31 | instruction: {instruction} 32 | code: 33 | """ 34 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/ui_output_chat.py: -------------------------------------------------------------------------------- 1 | code = """ 2 | with st.chat_message("assistant"): 3 | message_placeholder = st.empty() 4 | full_response = "" 5 | # Simulate stream of response with milliseconds delay 6 | for chunk in {res}.split(): 7 | full_response += chunk + " " 8 | time.sleep(0.05) 9 | # Add a blinking cursor to simulate typing 10 | message_placeholder.markdown(full_response + "▌") 11 | message_placeholder.markdown(full_response) 12 | # Add assistant response to chat history 13 | if full_response: 14 | st.session_state.messages.append({{"role": "assistant", "content": full_response}}) 15 | """ -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_list/ui_output_text.py: -------------------------------------------------------------------------------- 1 | # human_template = """ 2 | # Write and call a function which shows text by streamlit text element code depending on the instruction below: 3 | # Suppose that, streamlit has been imported by "import streamlit as st" so you don't need to import it. 4 | # 5 | # The function arguments are "{args}". 6 | # You can assume that the variables with the same names are already defined. 7 | # So you don't need to give dummy variables to the function while calling. 8 | # You should use "{args}" to call the function 9 | # In the function, also add a descriptive part next to the "{args}" 10 | # After defining the function, call it with "{args}". 11 | # Assume that, they have been already defined but only call the function by checking if the is not None and len(str(the)) > 0 input is not an empty string. 12 | # use st.markdown to show the main text 13 | # 14 | # Here is the part of the code that you are supposed to continue: 15 | # {code_snippets} 16 | # 17 | # Instruction:{instruction} 18 | # Streamlit Code: 19 | # """ 20 | 21 | system_template = """ 22 | You cannot use other st.func_name even if it is image or table or any kind. You are supposed to select one of most appropriate one in the followings: 23 | [st.markdown, st.header, st.subheader, st.caption, st.code, st.text, st.latext, st.write] 24 | """ 25 | 26 | human_template = """ 27 | args: next_segment 28 | data type: string 29 | instruction: Display the generated next narrative segment to the user 30 | code: 31 | if next_segment is not None and len(str(next_segment)) > 0: 32 | st.success(next_segment) 33 | 34 | args:{args} 35 | data type: {data_type} 36 | instruction:{instruction} 37 | code: 38 | 39 | """ 40 | 41 | code = "st.markdown({args})" -------------------------------------------------------------------------------- /demogpt/chains/prompts/task_refiner.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | Refine the Generated Task List by fixing the problem mentinoed in the Feedback 3 | 4 | Task objects must be Python dictionaries, and the output should strictly conform to a Python list of JSON objects. 5 | So use double quotes because this output will be converted to json with json.loads function. 6 | 7 | It is extremely important to include these keys in each task object: 8 | 9 | -step 10 | -task_type 11 | -task_name 12 | -input_key 13 | -input_data_type 14 | -output_key 15 | -output_data_type 16 | -description 17 | 18 | ################################ 19 | 20 | You are only allowed to use those tasks below: 21 | 22 | {TASK_NAMES} 23 | 24 | These are the explanations of those tasks: 25 | 26 | {TASK_PURPOSES} 27 | 28 | Your main job is by considering the "Problems", generating new task list 29 | Please ensure that the New Refined Task List does not contain any problem mentioned in the "Problems". 30 | If needed, you can change the number of tasks, remove/replace/add tasks as long as you use only the allowed tasks 31 | """ 32 | 33 | human_template = """ 34 | Instruction:{instruction} 35 | ################################ 36 | Problematic Task List: 37 | {tasks} 38 | ################################ 39 | Problems: {feedback} 40 | ################################ 41 | New Refined Task List: 42 | """ 43 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/tasks.py: -------------------------------------------------------------------------------- 1 | # from .task_definitions import TASK_DESCRIPTIONS, TASK_NAMES 2 | 3 | system_template = """ 4 | Create a Python list of task objects that align with the provided instruction and plan. Task objects must be Python dictionaries, and the output should strictly conform to a Python list of JSON objects. 5 | 6 | You must use only the tasks provided in the description: 7 | 8 | {TASK_DESCRIPTIONS} 9 | 10 | task_name could be only one of the task names below: 11 | {TASK_NAMES} 12 | """ 13 | 14 | human_template = """ 15 | Create a Python list of task objects that align with the provided instruction and all steps of the plan. 16 | 17 | Task objects must be Python dictionaries, and the output should strictly conform to a Python list of JSON objects. 18 | 19 | Follow these detailed guidelines: 20 | 21 | Task Objects: Create a Python dictionary for each task using the following keys: 22 | 23 | step: It represents the step number corresponding to which plan step it matches 24 | task_type: Should match one of the task names provided in task descriptions. 25 | task_name: Define a specific name for the task that aligns with the corresponding plan step. 26 | input_key: List the "output_key" values from parent tasks used as input or "none" if there's no input or if it comes from the user. 27 | input_data_type: The list of data types of the inputs 28 | output_key: Designate a unique key for the task's output. It is compatible with the output type if not none 29 | output_data_type: The data type of the output 30 | description: Provide a brief description of the task's goal, mirroring the plan step. 31 | 32 | Ensure that each task corresponds to each step in the plan, and that no step in the plan is omitted. 33 | Ensure that output_key is unique for each task. 34 | Ensure that each task corresponds to each step in the plan 35 | Ensure that an output type of task does not change. 36 | 37 | ########################## 38 | Instruction: Create a system that can generate blog post related to a website 39 | Plan: 40 | 1. Get website URL from the user with 'ui_input_text' 41 | 2. Use 'doc_loader' to load the page as Document 42 | 3. Use 'doc_to_string' to convert Document to string 43 | 4. Use 'prompt_template' to generate a blog post using the result of doc_to_string 44 | 5. If blog post is generated, show it to the user with 'ui_output_text'. 45 | List of Task Objects (Python List of JSON): 46 | [ 47 | {{ 48 | "step": 1, 49 | "task_type": "ui_input_text", 50 | "task_name": "get_url", 51 | "input_key": "none", 52 | "input_data_type": "none", 53 | "output_key": "url", 54 | "output_data_type": "string", 55 | "description": "Get website url from the user" 56 | }}, 57 | {{ 58 | "step": 2, 59 | "task_type": "doc_loader", 60 | "task_name": "doc_loader", 61 | "input_key": "url", 62 | "input_data_type": "string", 63 | "output_key": "docs", 64 | "output_data_type": "Document", 65 | "description": "Load the document from the website url" 66 | }}, 67 | {{ 68 | "step": 3, 69 | "task_type": "doc_to_string", 70 | "task_name": "convertDocToString", 71 | "input_key": "docs", 72 | "input_data_type": "Document", 73 | "output_key": "docs_string", 74 | "output_data_type": "string", 75 | "description": "Convert docs to string" 76 | }}, 77 | {{ 78 | "step": 4, 79 | "task_type": "prompt_template", 80 | "task_name": "writeBlogPost", 81 | "input_key": ["docs_string"], 82 | "input_data_type": ["string"], 83 | "output_key": "blog", 84 | "output_data_type": "string", 85 | "description": "Write blog post related to the context of docs_string" 86 | }}, 87 | {{ 88 | "step": 5, 89 | "task_type": "ui_output_text", 90 | "task_name": "show_blog", 91 | "input_key": "blog", 92 | "input_data_type": "string", 93 | "output_key": "none", 94 | "output_data_type": "none", 95 | "description": "Display the generated blog post to the user" 96 | }} 97 | ] 98 | ########################## 99 | Instruction:{instruction} 100 | Plan : {plan} 101 | List of Task Objects (Python List of JSON): 102 | """ 103 | -------------------------------------------------------------------------------- /demogpt/chains/prompts/title.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are an AI agent that can generate appealing. 3 | The app has been generated from the description. You task is generating a good title for it. 4 | It should be attractive and increase the open rate of the app. 5 | The title should be sarcastic. 6 | The title cannot exceed more than 18 characters. 7 | """ 8 | 9 | human_template = """ 10 | Description:{instruction} 11 | Title: 12 | """ -------------------------------------------------------------------------------- /demogpt/chains/self_refiner.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from langchain.chains import LLMChain 4 | from langchain_community.chat_models import ChatOpenAI 5 | from langchain.prompts.chat import (AIMessagePromptTemplate, 6 | ChatPromptTemplate, 7 | HumanMessagePromptTemplate, 8 | SystemMessagePromptTemplate) 9 | from termcolor import colored 10 | from tqdm import trange 11 | 12 | from . import prompts 13 | 14 | PROMPTS = {"final": prompts.final_refiner} 15 | 16 | 17 | class SelfRefiner: 18 | def __init__( 19 | self, 20 | key="final", 21 | max_iter=4, 22 | stop_kw="", 23 | log_intermediate_steps=True, 24 | model="gpt-3.5-turbo", 25 | openai_api_key=os.getenv("OPENAI_API_KEY", ""), 26 | temperature=0.0, 27 | openai_api_base=None, 28 | ): 29 | assert key in PROMPTS 30 | 31 | self.stop_kw = stop_kw 32 | self.max_iter = max_iter 33 | self.log_intermediate_steps = log_intermediate_steps 34 | 35 | self.prompts = { 36 | "feedback": PROMPTS[key].FEEDBACK_PROMPT, 37 | "refine": PROMPTS[key].REFINEMENT_PROMPT, 38 | } 39 | self.llm = ChatOpenAI( 40 | model=model, 41 | openai_api_key=openai_api_key, 42 | temperature=temperature, 43 | openai_api_base=openai_api_base, 44 | ) 45 | 46 | self.conversation_history = self.getPromptTemplate("refine") 47 | 48 | def getPromptTemplate(self, key): 49 | assert key in self.prompts 50 | prompts = [] 51 | prompts.append(SystemMessagePromptTemplate.from_template(self.prompts[key])) 52 | return prompts 53 | 54 | def addToHistory(self, prompt): 55 | if len(self.conversation_history) % 2 == 0: 56 | template = HumanMessagePromptTemplate.from_template(prompt) 57 | else: 58 | template = AIMessagePromptTemplate.from_template(prompt) 59 | 60 | self.conversation_history.append(template) 61 | 62 | def feedback(self, **kwargs): 63 | return LLMChain( 64 | llm=self.llm, 65 | prompt=ChatPromptTemplate.from_template(self.prompts["feedback"]), 66 | ).run(**kwargs) 67 | 68 | def refine(self): 69 | prompt = ChatPromptTemplate.from_messages(self.conversation_history) 70 | return LLMChain(llm=self.llm, prompt=prompt).run({}) 71 | 72 | def isCompleted(self, res): 73 | return self.stop_kw in res 74 | 75 | def run(self, instruction, plan, result): 76 | self.addToHistory("instruction:" + instruction) 77 | self.addToHistory("plan:" + plan) 78 | self.addToHistory("code:" + result) 79 | for _ in trange(self.max_iter): 80 | feedback = self.feedback(instruction=instruction, result=result) 81 | self.addToHistory("feedback:" + feedback) 82 | if self.log_intermediate_steps: 83 | print(colored("feedback:\n" + feedback, "blue")) 84 | if self.isCompleted(feedback): 85 | return result 86 | if self.log_intermediate_steps: 87 | print(colored("refined result:\n" + result, "green")) 88 | result = self.refine().replace("{", "{{").replace("}", "}}") 89 | self.addToHistory("code:" + result) 90 | return result 91 | 92 | 93 | if __name__ == "__main__": 94 | plan_refiner = SelfRefiner() 95 | instruction = """create a game where the system creates a story and stops at the exciting point and asks to the user 96 | to make a selection then after user makes his selection, system continues to the story depending on the user's selection""" 97 | with open("~/Desktop/test/test.py") as f: 98 | result = f.read().replace("{", "{{").replace("}", "}}") 99 | plan_refiner.run(instruction=instruction, result=result) 100 | -------------------------------------------------------------------------------- /demogpt/chains/task_chains.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import re 4 | from difflib import SequenceMatcher 5 | 6 | from langchain.chains import LLMChain 7 | from langchain_community.chat_models import ChatOpenAI 8 | from langchain.prompts.chat import (ChatPromptTemplate, 9 | HumanMessagePromptTemplate, 10 | SystemMessagePromptTemplate) 11 | 12 | from demogpt import utils 13 | from demogpt.chains import prompts 14 | 15 | 16 | class TaskChains: 17 | llm = None 18 | 19 | @classmethod 20 | def setLlm( 21 | cls, 22 | model, 23 | openai_api_key=os.getenv("OPENAI_API_KEY", ""), 24 | temperature=0.0, 25 | openai_api_base=None, 26 | ): 27 | cls.llm = ChatOpenAI( 28 | model=model, 29 | openai_api_key=openai_api_key, 30 | temperature=temperature, 31 | openai_api_base=openai_api_base 32 | ) 33 | 34 | @classmethod 35 | def getChain(cls, system_template="", human_template="", **kwargs): 36 | prompts = [] 37 | if system_template: 38 | prompts.append(SystemMessagePromptTemplate.from_template(system_template)) 39 | if human_template: 40 | prompts.append(HumanMessagePromptTemplate.from_template(human_template)) 41 | chat_prompt = ChatPromptTemplate.from_messages(prompts) 42 | return LLMChain(llm=cls.llm, prompt=chat_prompt).run(**kwargs) 43 | 44 | @classmethod 45 | def uiInputText(cls, task): 46 | variable = ", ".join(task["output_key"]) 47 | instruction = task["description"] 48 | code = cls.getChain( 49 | human_template=prompts.ui_input_text.human_template, 50 | instruction=instruction, 51 | variable=variable, 52 | ) 53 | return utils.refine(code) 54 | 55 | @classmethod 56 | def uiOutputText(cls, task): 57 | args = ", ".join(task["input_key"]) 58 | data_type = task["input_data_type"] 59 | if isinstance(args, list): 60 | args = ",".join(args) 61 | instruction = task["description"] 62 | code = cls.getChain( 63 | system_template=prompts.ui_output_text.system_template, 64 | human_template=prompts.ui_output_text.human_template, 65 | instruction=instruction, 66 | args=args, 67 | data_type=data_type, 68 | ) 69 | return utils.refine(code) 70 | 71 | @classmethod 72 | def uiInputFile(cls, task): 73 | variable = ", ".join(task["output_key"]) 74 | instruction = task["description"] 75 | res = cls.getChain( 76 | system_template=prompts.ui_input_file.system_template, 77 | human_template=prompts.ui_input_file.human_template, 78 | instruction=instruction 79 | ) 80 | res = res[res.find("{") : res.rfind("}") + 1] 81 | res = json.loads(res) 82 | title = res.get("title") 83 | data_type = res.get("data_type") 84 | code = prompts.ui_input_file.code.format(instruction=instruction, 85 | title=title, 86 | data_type=data_type, 87 | variable=variable 88 | ) 89 | return code 90 | 91 | @classmethod 92 | def pathToContent(cls, task, code_snippets): 93 | instruction = task["description"] 94 | argument = ", ".join(task["input_key"]) 95 | variable = ", ".join(task["output_key"]) 96 | 97 | code = cls.getChain( 98 | system_template=prompts.path_to_file.system_template, 99 | human_template=prompts.path_to_file.human_template, 100 | instruction=instruction, 101 | argument=argument, 102 | variable=variable, 103 | code_snippets=code_snippets, 104 | ) 105 | return utils.refine(code) 106 | 107 | @classmethod 108 | def getDetailedDescription(cls, app_idea, instruction): 109 | return cls.getChain( 110 | system_template=prompts.detailed_description.system_template, 111 | human_template=prompts.detailed_description.human_template, 112 | app_idea=app_idea, 113 | instruction=instruction 114 | ) 115 | 116 | @classmethod 117 | def promptTemplate(cls, app_idea, task): 118 | inputs = ", ".join(task["input_key"]) 119 | instruction = task["description"] 120 | 121 | res = cls.getChain( 122 | system_template=prompts.prompt_template.system_template, 123 | human_template=prompts.prompt_template.human_template, 124 | instruction=instruction, 125 | inputs=inputs, 126 | ) 127 | res = res[res.find("{") : res.rfind("}") + 1] 128 | return json.loads(res) 129 | 130 | @classmethod 131 | def uiInputChat(cls, task): 132 | variable = ", ".join(task["output_key"]) 133 | instruction = task["description"] 134 | 135 | placeholder = cls.getChain( 136 | human_template=prompts.ui_input_chat.human_template, 137 | instruction=instruction, 138 | variable=variable, 139 | ) 140 | 141 | code = prompts.ui_input_chat.code.format(variable=variable, placeholder=placeholder) 142 | 143 | return code 144 | 145 | @classmethod 146 | def uiOutputChat(cls, task): 147 | res = ", ".join(task["input_key"]) 148 | 149 | code = prompts.ui_output_chat.code.format(res=res) 150 | 151 | return code 152 | 153 | @classmethod 154 | def chat(cls, app_idea, task): 155 | inputs = ", ".join(task["input_key"]) 156 | instruction = task["description"] 157 | 158 | new_instruction = cls.getDetailedDescription(app_idea=app_idea,instruction=instruction) 159 | 160 | res = cls.getChain( 161 | system_template=prompts.chat.system_template, 162 | human_template=prompts.chat.human_template, 163 | instruction=new_instruction, 164 | inputs=inputs, 165 | ) 166 | res = res.replace("'''", '"""') 167 | res = res[res.find("{") : res.rfind("}") + 1] 168 | return json.loads(res, strict=False) 169 | 170 | @classmethod 171 | def promptTemplateRefiner(cls, templates, inputs, feedback): 172 | res = cls.getChain( 173 | system_template=prompts.prompt_chat_refiner.system_template, 174 | human_template=prompts.prompt_chat_refiner.human_template, 175 | templates=templates, 176 | feedback=feedback, 177 | inputs=inputs, 178 | ) 179 | res = res[res.find("{") : res.rfind("}") + 1] 180 | return json.loads(res) 181 | 182 | @classmethod 183 | def search_chat(cls, task): 184 | argument = ", ".join(task["input_key"]) 185 | variable = ", ".join(task["output_key"]) 186 | function_name = task["task_name"] 187 | instruction = task["description"] 188 | 189 | res = cls.getChain( 190 | system_template=prompts.search_chat.system_template, 191 | human_template=prompts.search_chat.human_template, 192 | instruction=instruction, 193 | inputs=argument, 194 | ) 195 | 196 | res = res.replace('"',"'") 197 | 198 | imports = prompts.search_chat.imports 199 | functions = prompts.search_chat.functions.format(function_name=function_name, argument=argument) 200 | outputs = prompts.search_chat.outputs.format(function_name=function_name, argument=argument,variable=variable) 201 | 202 | code = imports + "\n" + functions + "\n" + outputs + "\n" 203 | 204 | return code 205 | 206 | @classmethod 207 | def search(cls, task): 208 | argument = ", ".join(task["input_key"]) 209 | variable = ", ".join(task["output_key"]) 210 | function_name = task["task_name"] 211 | instruction = task["description"] 212 | 213 | res = cls.getChain( 214 | system_template=prompts.search.system_template, 215 | human_template=prompts.search.human_template, 216 | instruction=instruction, 217 | inputs=argument, 218 | ) 219 | 220 | res = res.replace('"',"'") 221 | 222 | imports = prompts.search.imports 223 | functions = prompts.search.functions.format(function_name=function_name, argument=argument,res=res) 224 | outputs = prompts.search_chat.outputs.format(function_name=function_name, argument=argument,variable=variable) 225 | code = imports + "\n" + functions + "\n" + outputs + "\n" 226 | 227 | return code 228 | 229 | @classmethod 230 | def docLoad(cls, task, code_snippets): 231 | 232 | def get_most_similar_key(input_key, available_keys): 233 | # This function returns the most similar key from available_keys to the input_key. 234 | best_match = None 235 | best_ratio = 0 236 | for key in available_keys: 237 | ratio = SequenceMatcher(None, input_key, key).ratio() 238 | if ratio > best_ratio: 239 | best_ratio = ratio 240 | best_match = key 241 | return best_match 242 | 243 | type2loader = { 244 | "txt": "TextLoader", 245 | "docx":"UnstructuredWordDocumentLoader", 246 | "pdf":"UnstructuredPDFLoader", 247 | "pptx":"UnstructuredPowerPointLoader", 248 | "csv":"CSVLoader", 249 | "xlsx":"UnstructuredExcelLoader", 250 | "zip":"NotionDirectoryLoader", 251 | "online_pdf":"OnlinePDFLoader", 252 | "web":"WebBaseLoader", 253 | "xlsx":"UnstructuredExcelLoader", 254 | "youtube":"YoutubeLoader", 255 | } 256 | 257 | loader2type = {type2loader[dtype]:dtype for dtype in type2loader} 258 | 259 | def getLoaderCall(data_type): 260 | if data_type in loader2type: 261 | data_type = loader2type[data_type] 262 | 263 | loader = type2loader.get(data_type) # First, try to get the exact match 264 | if loader is None: 265 | # If there's no exact match, get the most similar key and retrieve the value 266 | similar_key = get_most_similar_key(data_type, type2loader.keys()) 267 | loader = type2loader[similar_key] 268 | 269 | loader = type2loader[data_type] 270 | 271 | if data_type in [ 272 | "txt", 273 | "online_pdf", 274 | "docx", 275 | "csv" 276 | ]: 277 | loader_line = f"loader = {loader}({argument})" 278 | elif data_type == "web": 279 | loader_line = f"loader = {loader}([{argument}])" 280 | elif data_type in ["pdf", "pptx"]: 281 | loader_line = ( 282 | f'loader = {loader}({argument}, mode="elements", strategy="fast")' 283 | ) 284 | elif data_type == "xlsx": 285 | loader_line = f'loader = {loader}({argument}, mode="elements")' 286 | elif data_type == "youtube": 287 | loader_line = ( 288 | f"loader = {loader}.from_youtube_url({argument}, add_video_info=False)" 289 | ) 290 | elif data_type == "zip": 291 | loader_line = f"""if os.path.exists('Notion_DB') and os.path.isdir('Notion_DB'): 292 | shutil.rmtree('Notion_DB') 293 | os.system(f"unzip {{{argument}}} -d Notion_DB") 294 | loader = {loader}("Notion_DB")""" 295 | else: 296 | loader_line = f"loader = TextLoader({argument})" 297 | 298 | return loader_line 299 | 300 | instruction = task["description"] 301 | argument = task["input_key"][0] 302 | variable = ", ".join(task["output_key"]) 303 | function_name = task["task_name"] 304 | 305 | variable_match = re.search(r"(\w+)\s*=\s*temp_file\.name", code_snippets, re.MULTILINE) 306 | 307 | if variable_match: 308 | variable_name = variable_match.group(1).strip() 309 | else: 310 | variable_name = '' 311 | 312 | match = re.search(r"st\.file_uploader\(\s*?.*?type=\s*\[(.*?)\]\s*?.*?\)", code_snippets, re.DOTALL) 313 | 314 | loader_line = "" 315 | 316 | if variable_name == argument and match: 317 | types = match.group(1).replace("'", "").split(", ") 318 | types = [type.strip() for type in types] 319 | if len(types) == 1: 320 | loader_line = getLoaderCall(types[0]) 321 | else: 322 | loader_line = "\n ".join([f"if {argument}.endswith('.{data_type}'):\n\t{getLoaderCall(data_type)}" for data_type in types]) 323 | else: 324 | print("No match found. Using the chain...") 325 | types = [] 326 | loader = cls.getChain( 327 | system_template=prompts.doc_load.system_template, 328 | human_template=prompts.doc_load.human_template, 329 | instruction=instruction, 330 | code_snippets=code_snippets 331 | ) 332 | 333 | loader_line = getLoaderCall(loader) 334 | 335 | imports = prompts.doc_load.imports 336 | functions = prompts.doc_load.functions.format(function_name=function_name, argument=argument, loader_line=loader_line) 337 | outputs = prompts.doc_load.outputs.format(argument=argument, function_name=function_name, variable=variable) 338 | 339 | code = imports + "\n" + functions + "\n" + outputs + "\n" 340 | 341 | return code 342 | 343 | @classmethod 344 | def stringToDoc(cls, task): 345 | argument = ", ".join(task["input_key"]) 346 | variable = ", ".join(task["output_key"]) 347 | imports = prompts.string_to_doc.imports 348 | outputs = prompts.string_to_doc.outputs.format(variable=variable, argument=argument) 349 | code = imports + "\n" + outputs + "\n" 350 | 351 | return code 352 | 353 | @classmethod 354 | def docToString(cls, task): 355 | argument = ", ".join(task["input_key"]) 356 | variable = ", ".join(task["output_key"]) 357 | code = f'{variable} = "".join([doc.page_content for doc in {argument}])' 358 | return code 359 | 360 | @classmethod 361 | def summarize(cls, task): 362 | argument = ", ".join(task["input_key"]) 363 | variable = ", ".join(task["output_key"]) 364 | function_name = task["task_name"] 365 | 366 | imports = prompts.summarize.imports 367 | functions = functions = prompts.summarize.functions.format(function_name=function_name, argument=argument) 368 | outputs = prompts.string_to_doc.outputs.format(function_name=function_name, variable=variable, argument=argument) 369 | 370 | code = imports + "\n" + functions + "\n" + outputs + "\n" 371 | 372 | return code 373 | 374 | @classmethod 375 | def pythonCoder(cls, task, code_snippets): 376 | instruction = task["description"] 377 | argument = ", ".join(task["input_key"]) 378 | variable = ", ".join(task["output_key"]) 379 | function_name = task["task_name"] 380 | 381 | code = cls.getChain( 382 | system_template=prompts.python_coder.system_template, 383 | human_template=prompts.python_coder.human_template, 384 | instruction=instruction, 385 | argument=argument, 386 | variable=variable, 387 | function_name=function_name, 388 | code_snippets=code_snippets, 389 | ) 390 | return utils.refine(code) -------------------------------------------------------------------------------- /demogpt/chains/task_definitions.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | AVAILABLE_TASKS_COUNT = 14 4 | 5 | ################################ 6 | 7 | ALL_TASKS = [ 8 | { 9 | "name": "ui_input_text", 10 | "description": "Gets input from the user via a text field.", 11 | "good_at": "Retrieving text input from the user.", 12 | "input_data_type": "none", 13 | "output_data_type": "string", 14 | "purpose": "Collect user-entered text for further processing.", 15 | }, 16 | { 17 | "name": "ui_input_file", 18 | "description": "Provide a mechanism for users to upload a file and return its path. The task involves creating a file upload widget and returning its file path", 19 | "good_at": "Enabling file uploads and making the file path available for doc_load", 20 | "input_data_type": "none", 21 | "output_data_type": "string", 22 | "purpose": "Getting local file path with upload file widget so that doc_load can use this path", 23 | }, 24 | { 25 | "name": "ui_output_text", 26 | "description": "Shows text output to the user.", 27 | "good_at": "Showing text to the user.", 28 | "input_data_type": "*", 29 | "output_data_type": "none", 30 | "purpose": "Displaying textual information to the user.", 31 | }, 32 | { 33 | "name": "prompt_template", 34 | "description": "Generate any string output according to the given instruction by AI", 35 | "good_at": "Creating context-aware string, responses, role play, instructions, that can be generated by AI", 36 | "input_data_type": "*string", 37 | "output_data_type": "string", 38 | "purpose": "Using AI to generate smart text output from given context or instruction", 39 | }, 40 | { 41 | "name": "doc_loader", 42 | "description": "Load file content from path (notion zip file path or txt or url or pdf file path or csv file path or powerpoint file path or docx path or youtube url or excel file path) and generate docs", 43 | "good_at": "Loading from external sources only from url or path not the content", 44 | "input_data_type": "string", 45 | "output_data_type": "Document", 46 | "purpose": "Loading external files", 47 | }, 48 | { 49 | "name": "doc_to_string", 50 | "description": "Convert Document object to string", 51 | "good_at": "Converting Document object to string where the next task is expecting string instead of Document object", 52 | "input_data_type": "Document", 53 | "output_data_type": "string", 54 | "purpose": "Converting Document object to string", 55 | }, 56 | { 57 | "name": "string_to_doc", 58 | "description": "Convert string to Document object", 59 | "good_at": "Converting string to Document object where the next task is expecting Document object instead of string", 60 | "input_data_type": "string", 61 | "output_data_type": "Document", 62 | "purpose": "Converting string to Document object", 63 | }, 64 | { 65 | "name": "ui_input_chat", 66 | "description": "Get user message/text input for conversation-based application", 67 | "good_at": "Getting text input from the user for chat-based application", 68 | "input_data_type": "none", 69 | "output_data_type": "string", 70 | "purpose": "For chat interface, get user text input. It does not need to be included multiple times", 71 | }, 72 | { 73 | "name": "ui_output_chat", 74 | "description": "Display the conversation history in a chat-based application. It is the only thing that you can use for displaying the chat", 75 | "good_at": "Displaying chat history", 76 | "input_data_type": "string", 77 | "output_data_type": "none", 78 | "purpose": "For conversation-based apps, it displays the chat conversation with history.", 79 | }, 80 | { 81 | "name": "chat", 82 | "description": "Chat version of prompt_template that can remember the conversation history while responding", 83 | "good_at": "Chatbot like applications and any application requiring chat property.", 84 | "input_data_type": "*string", 85 | "output_data_type": "string", 86 | "purpose": "For conversation-based apps, it generates the responses while remembering the conversation history", 87 | }, 88 | { 89 | "name": "python", 90 | "description": """Implement and call generic python function from given description which can be done using the libraries: 91 | [NumPy, Matplotlib, Seaborn, Scikit-Learn, NLTK, SciPy, OpenCV, Pandas]""", 92 | "good_at": "Writing generic python code.", 93 | "input_data_type": "*", 94 | "output_data_type": "*", 95 | "purpose": "It generates python code from general purpose instructions", 96 | }, 97 | { 98 | "name": "plan_and_execute", 99 | "description": "It is intelligent AI agent that can answer any specific question on internet.", 100 | "good_at": "Applications requiring up to date knowledge on the internet.", 101 | "input_data_type": "string", 102 | "output_data_type": "string", 103 | "purpose": "By using internet, it autonomously give answer for any question available in the web. It can answer questions as specific as possible so you don't need to iterate over the answer.", 104 | }, 105 | { 106 | "name": "search_chat", 107 | "description": "It is intelligent chat-based AI agent that can answer any specific question on internet.", 108 | "good_at": "Applications requiring up to date knowledge on the internet. It can also be used in chat app", 109 | "input_data_type": "string", 110 | "output_data_type": "string", 111 | "purpose": "By using internet, it autonomously give answer for any question available in the web. It can answer questions as specific as possible so you don't need to iterate over the answer. It also remembers the chat history while responsing", 112 | }, 113 | { 114 | "name": "doc_summarizer", 115 | "description": "Summarize Document Objects", 116 | "good_at": "Summarizing long Document Objects", 117 | "input_data_type": "Document", 118 | "output_data_type": "string", 119 | "purpose": "Summarize long Document Objects", 120 | }, 121 | { 122 | "name": "prompt_list_parser", 123 | "description": "Transform the input text into a list.", 124 | "good_at": "Transforming text into a list.", 125 | "input_data_type": "string", 126 | "output_data_type": "*", 127 | "purpose": "Converts textual data into structured list format.", 128 | }, 129 | { 130 | "name": "router", 131 | "description": "When there are multiple prompt_template objects, it uses the appropriate one to answer the question.", 132 | "good_at": "Handling different types of questions that require different abilities.", 133 | "input_data_type": "*prompt_template", 134 | "output_data_type": "string", 135 | "purpose": "Routes queries to the appropriate handler based on context or type.", 136 | }, 137 | { 138 | "name": "react", 139 | "description": "Answer questions that require external search on the web.", 140 | "good_at": "Answering questions that require Google search or other web searches.", 141 | "input_data_type": "string", 142 | "output_data_type": "string", 143 | "purpose": "Finds information online to answer user queries.", 144 | }, 145 | { 146 | "name": "cpal_chain", 147 | "description": "Solve math problems end to end", 148 | "good_at": "Directly solving any math problems", 149 | "input_data_type": "string", 150 | "output_data_type": "string", 151 | "purpose": "Performing mathematical calculations and solving problems based on the input question", 152 | }, 153 | { 154 | "name": "hub_bash", 155 | "description": "Do operations on the bash by running needed scripts on the terminal to apply the command.", 156 | "good_at": "Executing bash commands and providing results.", 157 | "input_data_type": "string", 158 | "output_data_type": "string", 159 | "purpose": "Running scripts or commands on the terminal and returning the output.", 160 | }, 161 | { 162 | "name": "hub_meteo", 163 | "description": "Gives weather-related information from the question.", 164 | "good_at": "Answering weather-related questions.", 165 | "input_data_type": "string", 166 | "output_data_type": "string", 167 | "purpose": "Providing weather forecasts, conditions, and related information.", 168 | }, 169 | ] 170 | 171 | 172 | def jsonFixer(data): 173 | data = json.dumps(data, indent=4) 174 | return data.replace("{", "{{").replace("}", "}}") 175 | 176 | def isTaskAvailable(task, app_chat, app_prompt_template, app_search, app_summary): 177 | if not app_chat: 178 | if "chat" in task["name"]: 179 | return False 180 | elif task["name"] == "python": 181 | return False 182 | elif task["name"] == "plan_and_execute": 183 | return False 184 | elif task["name"] == "prompt_template": 185 | return False 186 | elif app_search: 187 | if task["name"] == "chat": 188 | return False 189 | 190 | if not app_prompt_template: 191 | if task["name"] in [ 192 | "prompt_template", 193 | "doc_loader", 194 | "doc_to_string", 195 | "string_to_doc" 196 | ]: 197 | return False 198 | 199 | if not app_summary: 200 | if task["name"] == "doc_summarizer": 201 | return False 202 | 203 | elif task["name"] == "python": 204 | return False 205 | 206 | if not app_search: 207 | if task["name"] == "plan_and_execute": 208 | return False 209 | if task["name"] == "search_chat": 210 | return False 211 | 212 | elif task["name"] == "python": 213 | return False 214 | 215 | return True 216 | 217 | 218 | def getAvailableTasks(app_type): 219 | app_prompt_template = True # neutral 220 | 221 | app_chat = app_type["is_chat"] == "true" 222 | app_search = app_type["is_search"] == "true" 223 | app_summary = app_type["is_summary"] == "true" 224 | app_prompt_template = app_type["is_ai"] == "true" 225 | 226 | tasks = [] 227 | for task in ALL_TASKS[:AVAILABLE_TASKS_COUNT]: 228 | if isTaskAvailable(task, app_chat, app_prompt_template, app_search, app_summary): 229 | tasks.append(task) 230 | 231 | return tasks 232 | 233 | 234 | def getTasks(app_type): 235 | TASKS = getAvailableTasks(app_type) 236 | TASK_TYPE2_TASK = {task["name"]: task for task in TASKS} 237 | 238 | TASK_NAMES = [task["name"] for task in TASKS] 239 | 240 | TASK_PURPOSES = {task["name"]: task["purpose"] for task in TASKS} 241 | TASK_PURPOSES = jsonFixer(TASK_PURPOSES) 242 | 243 | TASK_DESCRIPTIONS = jsonFixer(TASKS) 244 | 245 | TASK_DTYPES = { 246 | task["name"]: { 247 | "input_data_type": task["input_data_type"], 248 | "output_data_type": task["output_data_type"], 249 | } 250 | for task in TASKS 251 | } 252 | 253 | TASK_DTYPES = jsonFixer(TASK_DTYPES) 254 | 255 | return TASK_DESCRIPTIONS, TASK_NAMES, TASK_DTYPES, TASK_PURPOSES, TASK_TYPE2_TASK 256 | 257 | 258 | def getPlanGenHelper(app_type): 259 | prompt_template_must = False 260 | app_chat_must = app_type["is_chat"] == "true" 261 | app_summarize_must = app_type["is_summary"] == "true" 262 | app_search_must = app_type["is_search"] == "true" 263 | if not (app_chat_must or app_summarize_must or app_search_must): 264 | if app_type["is_ai"] == "true": 265 | prompt_template_must = True 266 | 267 | helper = "" 268 | if prompt_template_must: 269 | helper += "Since the application is AI-based, you must use 'prompt_template' task in the steps.\n" 270 | if app_summarize_must: 271 | helper += "Since the application requires summarization, you must use 'doc_summarizer' task in the steps.\n" 272 | if app_search_must: 273 | if app_chat_must: 274 | helper += "Since the application requires up to date knowledge in the web, you must use 'search_chat' task in the steps.\n" 275 | else: 276 | helper += "Since the application requires up to date knowledge in the web, you must use 'plan_and_execute' task in the steps.\n" 277 | 278 | if app_chat_must: 279 | if app_search_must: 280 | helper += "Since the application is chat-based, you must use 'ui_input_chat' and 'ui_output_chat' and 'search_chat' tasks in the steps.\n" 281 | else: 282 | helper += "Since the application is chat-based, you must use 'ui_input_chat' and 'ui_output_chat' and 'chat' tasks in the steps.\n" 283 | 284 | return helper 285 | -------------------------------------------------------------------------------- /demogpt/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import subprocess 5 | 6 | 7 | def main(): 8 | current_dir = os.path.dirname(os.path.realpath(__file__)) 9 | subprocess.run(["streamlit", "run", os.path.join(current_dir, "app.py")]) 10 | 11 | 12 | if __name__ == "__main__": 13 | main() 14 | -------------------------------------------------------------------------------- /demogpt/prompt.py: -------------------------------------------------------------------------------- 1 | from langchain_openai import ChatOpenAI 2 | from langchain.prompts.chat import (ChatPromptTemplate, 3 | HumanMessagePromptTemplate, 4 | SystemMessagePromptTemplate) 5 | 6 | def get_prompt(system_template: str, human_template: str): 7 | prompts = [] 8 | if system_template: 9 | prompts.append(SystemMessagePromptTemplate.from_template(system_template)) 10 | if human_template: 11 | prompts.append(HumanMessagePromptTemplate.from_template(human_template)) 12 | return ChatPromptTemplate.from_messages(prompts) 13 | 14 | if __name__ == "__main__": 15 | system_template = "System: {system_message}" 16 | human_template = "Human: {human_message}" 17 | prompt = get_prompt(system_template, human_template) 18 | print(prompt) -------------------------------------------------------------------------------- /demogpt/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.abspath("demogpt/")) 5 | import json 6 | import unittest 7 | 8 | from tqdm import tqdm 9 | 10 | from . import utils 11 | from .chains.chains import Chains 12 | from .chains.task_chains import TaskChains 13 | from .test_cases import CODE_SNIPPETS, INSTRUCTIONS, TEST_CASES, TOOL_EXAMPLES 14 | 15 | 16 | class TestDemoGPT(unittest.TestCase): 17 | TEST_INDEX = 5 18 | INSTRUCTION = INSTRUCTIONS[TEST_INDEX] 19 | REFINE_ITERATIONS = 10 20 | # "Create a system that can summarize a content taken from url then create a blog post on the summarization" 21 | # "Create a system that can solve any math problem" 22 | TITLE = "My App" 23 | 24 | @classmethod 25 | def setUpClass(cls): 26 | cls.f = open(f"test_{TestDemoGPT.TEST_INDEX}.log", "w") 27 | 28 | # it sets the model name 29 | model_name = "gpt-3.5-turbo-0613" 30 | 31 | Chains.setLlm(model_name) 32 | TaskChains.setLlm(model_name) 33 | 34 | @classmethod 35 | def writeToFile(cls, title, res, instruction): 36 | cls.f.write(title) 37 | cls.f.write("\n") 38 | cls.f.write(instruction) 39 | cls.f.write("\n") 40 | cls.f.write(res) 41 | cls.f.write("\n") 42 | cls.f.flush() 43 | 44 | @classmethod 45 | def writeFinalToFile(cls, res, instruction): 46 | with open(f"test_final_{TestDemoGPT.TEST_INDEX}.py", "w") as f: 47 | f.write("#" + instruction + "\n") 48 | f.write(res) 49 | f.flush() 50 | 51 | def test_plan(self): 52 | for test_case in tqdm(TEST_CASES): 53 | instruction = test_case["instruction"] 54 | instruction = ( 55 | "Create a system that can summarize a website from the given URL." 56 | ) 57 | plan = Chains.plan(instruction) 58 | self.writeToFile("PLAN", plan, instruction) 59 | break 60 | 61 | def test_tasks(self): 62 | for test_case in tqdm(TEST_CASES): 63 | instruction = test_case["instruction"] 64 | plan = test_case["plan"] 65 | task_list = Chains.tasks(instruction=instruction, plan=plan) 66 | self.writeToFile("TASK LIST", json.dumps(task_list, indent=4), instruction) 67 | 68 | def test_feedback(self): 69 | for test_id in range(4, 5): 70 | instruction = INSTRUCTIONS[test_id] 71 | with open(f"test_final_{test_id}.py") as f: 72 | code = f.read() 73 | feedback = Chains.feedback(instruction=instruction, code=code) 74 | self.writeToFile("FEEDBACK", feedback, instruction) 75 | 76 | def test_refine(self): 77 | for test_id in range(4, 5): 78 | instruction = INSTRUCTIONS[test_id] 79 | with open(f"test_final_{test_id}.py") as f: 80 | code = f.read() 81 | feedback = Chains.feedback(instruction=instruction, code=code) 82 | refined_code = Chains.refine( 83 | instruction=instruction, code=code, feedback=feedback 84 | ) 85 | self.writeToFile("REFINED CODE", refined_code, instruction) 86 | 87 | def test_final(self): 88 | for test_case in tqdm(CODE_SNIPPETS): 89 | instruction = test_case["instruction"] 90 | code_snippets = test_case["code_snippets"] 91 | code_snippets = utils.IMPORTS_CODE_SNIPPET + code_snippets 92 | final_code = Chains.final(draft_code=code_snippets) 93 | self.writeToFile("FINAL CODE", final_code, instruction) 94 | 95 | def test_task_ui_input_text(self): 96 | for example in TOOL_EXAMPLES["ui_input_text"]: 97 | instruction = example["instruction"] 98 | variable = example["variable"] 99 | res = TaskChains.uiInputText(instruction=instruction, variable=variable) 100 | self.writeToFile("UI INPUT TEXT", res, instruction) 101 | 102 | def test_task_ui_output_text(self): 103 | for example in TOOL_EXAMPLES["ui_input_text"]: 104 | instruction = example["instruction"] 105 | args = example["args"] 106 | res = TaskChains.uiOutputText(instruction=instruction, args=args) 107 | self.writeToFile("UI OUTPUT TEXT", res, instruction) 108 | 109 | def test_task_prompt_template(self): 110 | for example in TOOL_EXAMPLES["ui_input_text"]: 111 | instruction = example["instruction"] 112 | inputs = example["inputs"] 113 | res = TaskChains.promptChatTemplate(instruction=instruction, inputs=inputs) 114 | self.writeToFile("PROMPT CHAT TEMPLATE", res, instruction) 115 | 116 | def test(self): 117 | title = TestDemoGPT.TITLE 118 | 119 | instruction = TestDemoGPT.INSTRUCTION 120 | 121 | plan = Chains.plan(instruction) 122 | 123 | self.writeToFile("PLAN", plan, instruction) 124 | 125 | task_list = Chains.tasks(instruction=instruction, plan=plan) 126 | 127 | self.writeToFile("TASK LIST", json.dumps(task_list, indent=4), instruction) 128 | 129 | task_controller_result = Chains.taskController(tasks=task_list) 130 | 131 | self.writeToFile( 132 | "TASK CONTROLLER RESULT", 133 | json.dumps(task_controller_result, indent=4), 134 | instruction, 135 | ) 136 | for _ in range(TestDemoGPT.REFINE_ITERATIONS): 137 | if not task_controller_result["valid"]: 138 | task_list = Chains.refineTasks( 139 | instruction=instruction, 140 | tasks=task_list, 141 | feedback=task_controller_result["feedback"], 142 | ) 143 | self.writeToFile( 144 | "REFINED TASK LIST", json.dumps(task_list, indent=4), instruction 145 | ) 146 | task_controller_result = Chains.taskController(tasks=task_list) 147 | else: 148 | break 149 | 150 | self.writeToFile( 151 | "FEEDBACK", task_controller_result["feedback"], instruction 152 | ) 153 | 154 | code_snippets = utils.init(title) 155 | 156 | self.writeToFile("CODE SNIPPETS", "", instruction) 157 | 158 | for task in tqdm(task_list): 159 | code = utils.getCodeSnippet(task, code_snippets) 160 | code = "#" + task["description"] + "\n" + code 161 | code_snippets += code 162 | self.writeToFile("", code, "") 163 | 164 | """draft_code = Chains.draft(instruction=instruction, 165 | code_snippets=code_snippets, 166 | plan=plan 167 | ) 168 | 169 | self.writeToFile("COMBINED CODE",code_snippets,instruction) 170 | """ 171 | 172 | final_code = Chains.final(draft_code=code_snippets) 173 | 174 | self.writeFinalToFile(final_code, instruction) 175 | 176 | def test_all(self): 177 | for test_case in tqdm(TEST_CASES): 178 | instruction = test_case["instruction"] 179 | TestDemoGPT.test(instruction) 180 | TestDemoGPT.f.close() 181 | 182 | 183 | if __name__ == "__main__": 184 | TestDemoGPT.INSTRUCTION = os.environ.get("instruction", TestDemoGPT.INSTRUCTION) 185 | TestDemoGPT.TITLE = os.environ.get("title", TestDemoGPT.TITLE) 186 | unittest.test() 187 | -------------------------------------------------------------------------------- /demogpt_agenthub/README.md: -------------------------------------------------------------------------------- 1 | # 🚀 DemoGPT AgentHub 2 | 3 | Welcome to DemoGPT AgentHub! This powerful library allows you to create, customize, and use AI agents with various tools. Let's dive in and explore how you can leverage this amazing library! 🎉 4 | 5 | ## 📚 Table of Contents 6 | 7 | - [Installation](#-installation) 8 | - [Creating Tools](#-creating-tools) 9 | - [Available Tools](#-available-tools) 10 | - [Initializing an Agent](#-initializing-an-agent) 11 | - [Using an Agent](#-using-an-agent) 12 | - [Available Agent Types](#-available-agent-types) 13 | - [Contributing](#-contributing) 14 | - [License](#-license) 15 | 16 | ## 🛠 Installation 17 | 18 | To install DemoGPT AgentHub, simply run: 19 | 20 | ```bash 21 | pip install demogpt 22 | ``` 23 | 24 | 25 | ## 🔧 Creating Tools 26 | 27 | Creating custom tools is easy! Here's how you can create your own tool: 28 | 29 | 1. Inherit from the `BaseTool` class 30 | 2. Implement the `run` method 31 | 3. Set the `name` and `description` attributes 32 | 33 | Here's an example: 34 | 35 | ```python 36 | from demogpt_agenthub.tools import BaseTool 37 | class MyCustomTool(BaseTool): 38 | def __init__(self): 39 | self.name = "MyCustomTool" 40 | self.description = "This tool does something amazing!" 41 | super().__init__() 42 | def run(self, query): 43 | # Implement your tool's functionality here 44 | return f"Result for: {query}" 45 | ``` 46 | 47 | ## 🧰 Available Tools 48 | 49 | DemoGPT AgentHub comes with several built-in tools: 50 | 51 | - 🔍 DuckDuckGoSearchTool 52 | - 🌦 WeatherTool 53 | - 📚 WikipediaTool 54 | - 🐚 BashTool 55 | - 🐍 PythonTool 56 | - 📄 ArxivTool 57 | - 🎥 YouTubeSearchTool 58 | - 💻 StackOverFlowTool 59 | - 🌐 RequestUrlTool 60 | - 🗃 WikiDataTool 61 | - 🏥 PubmedTool 62 | 63 | ## 🤖 Initializing an Agent 64 | 65 | To create an agent, you'll need to: 66 | 67 | 1. Import the desired agent type 68 | 2. Initialize the tools you want to use 69 | 3. Create an instance of the agent with the tools and LLM 70 | 71 | Here's an example: 72 | 73 | ```python 74 | from demogpt_agenthub.agents import ToolCallingAgent 75 | from demogpt_agenthub.llms import OpenAIChatModel 76 | from demogpt_agenthub.tools import DuckDuckGoSearchTool, WeatherTool 77 | search_tool = DuckDuckGoSearchTool() 78 | weather_tool = WeatherTool() 79 | llm = OpenAIChatModel(model_name="gpt-4o-mini") 80 | agent = ToolCallingAgent(tools=[search_tool, weather_tool], llm=llm, verbose=True) 81 | ``` 82 | 83 | 84 | ## 🎮 Using an Agent 85 | 86 | Once you've created an agent, you can use it to ask questions or perform tasks: 87 | 88 | ```python 89 | query = "What's the weather like in New York today?" 90 | response = agent.run(query) 91 | print(response) 92 | ``` 93 | 94 | 95 | ## 👥 Available Agent Types 96 | 97 | Currently, DemoGPT AgentHub supports the following agent types: 98 | 99 | 1. 🛠 ToolCallingAgent: An agent that can use multiple tools to answer questions and perform tasks. 100 | 101 | More agent types will be added in future updates! 102 | 103 | ## 🤝 Contributing 104 | 105 | We welcome contributions to DemoGPT AgentHub! If you have ideas for new features, tools, or improvements, please open an issue or submit a pull request. 106 | 107 | ## 📄 License 108 | 109 | DemoGPT AgentHub is released under the MIT License. See the LICENSE file for more details. 110 | -------------------------------------------------------------------------------- /demogpt_agenthub/__init__.py: -------------------------------------------------------------------------------- 1 | from demogpt_agenthub.tools import * 2 | from demogpt_agenthub.prompts import * -------------------------------------------------------------------------------- /demogpt_agenthub/agents/__init__.py: -------------------------------------------------------------------------------- 1 | from demogpt_agenthub.agents.base import BaseAgent 2 | from demogpt_agenthub.agents.tool_calling import ToolCallingAgent 3 | from demogpt_agenthub.agents.react import ReactAgent -------------------------------------------------------------------------------- /demogpt_agenthub/agents/base.py: -------------------------------------------------------------------------------- 1 | from langchain_core.prompts import ChatPromptTemplate 2 | from langchain_core.output_parsers import JsonOutputParser, StrOutputParser 3 | from demogpt_agenthub.utils.parsers import BooleanOutputParser 4 | from demogpt_agenthub.prompts.agents.tool_calling import tool_decider, final_answer 5 | from demogpt_agenthub.prompts.agents.react import success_decider 6 | 7 | from dotenv import load_dotenv 8 | import inspect 9 | 10 | load_dotenv() 11 | 12 | class BaseAgent: 13 | def __init__(self, tools, llm, verbose=False, max_iter=10): 14 | self.llm = llm 15 | self.tool_decider_prompt = ChatPromptTemplate.from_messages([ 16 | ("system", tool_decider.system_template), 17 | ("human", tool_decider.human_template) 18 | ]) 19 | self.tool_decider = self.tool_decider_prompt | self.llm | JsonOutputParser() 20 | self.final_answer_prompt = ChatPromptTemplate.from_messages([ 21 | ("system", final_answer.system_template), 22 | ("human", final_answer.human_template) 23 | ]) 24 | self.final_answer = self.final_answer_prompt | self.llm | StrOutputParser() 25 | 26 | self.success_decider_prompt = ChatPromptTemplate.from_messages([ 27 | ("system", success_decider.system_template), 28 | ("human", success_decider.human_template) 29 | ]) 30 | 31 | self.success_decider = self.success_decider_prompt | self.llm | BooleanOutputParser() 32 | 33 | self.history = [] 34 | self.tools = tools 35 | self.verbose = verbose 36 | self.tools = {tool.name: tool for tool in tools} 37 | self.max_iter = max_iter 38 | 39 | @property 40 | def tool_explanations(self): 41 | explanations = {} 42 | for tool in self.tools.values(): 43 | # Get the run function signature 44 | sig = inspect.signature(tool.run) 45 | # Format parameters, excluding 'self' 46 | params = [f"{name}: {param.annotation.__name__ if param.annotation != inspect._empty else 'any'}" 47 | for name, param in sig.parameters.items() if name != 'self'] 48 | param_str = ", ".join(params) 49 | 50 | explanations[tool.name] = { 51 | "description": tool.description, 52 | "run_description": tool.run.__doc__ or "No description available", 53 | "parameters": f"Input parameters: ({param_str})" 54 | } 55 | return explanations 56 | 57 | @property 58 | def context(self): 59 | return "\n".join([f"{name}: {message}" for name, message in self.history]) 60 | 61 | def add_message(self, name, message): 62 | self.history.append((name, message)) 63 | 64 | def pretty_print(self, message_type, content): 65 | """ 66 | Displays the given content with color-coded formatting based on the message type. 67 | 68 | Args: 69 | message_type (str): The type of message (e.g., "Decision", "Tool call", "Tool result", "Answer"). 70 | content (str): The content to be displayed. 71 | """ 72 | 73 | if not self.verbose: 74 | return 75 | 76 | color_codes = { 77 | "Decision": "\033[95m", # Magenta 78 | "Reasoning": "\033[94m", # Blue 79 | "Tool call": "\033[93m", # Yellow 80 | "Tool args": "\033[93m", # Yellow 81 | "Tool result": "\033[92m", # Green 82 | "Not Completed": "\033[91m", # Red 83 | "Answer": "\033[92m" # Green 84 | } 85 | 86 | if message_type in color_codes: 87 | print(f"{message_type}:") 88 | print(f"{color_codes[message_type]}{content}\033[0m") 89 | else: 90 | print(f"{message_type}:") 91 | print(content) 92 | 93 | def run(self, prompt: str): 94 | """ 95 | Abstract method to process a user's prompt and return a response. 96 | 97 | This method should be implemented by subclasses to define the specific 98 | behavior of how the agent processes and responds to user prompts. 99 | 100 | Args: 101 | prompt (str): The user's input prompt or question. 102 | 103 | Returns: 104 | str: The agent's response to the user's prompt. 105 | 106 | Raises: 107 | NotImplementedError: If the subclass does not implement this method. 108 | """ 109 | raise NotImplementedError("Subclasses must implement the 'ask' method.") -------------------------------------------------------------------------------- /demogpt_agenthub/agents/react.py: -------------------------------------------------------------------------------- 1 | from demogpt_agenthub.agents import BaseAgent 2 | 3 | class ReactAgent(BaseAgent): 4 | def __init__(self, tools, llm, verbose=False, max_iter=10): 5 | super().__init__(tools, llm, verbose, max_iter) 6 | 7 | def run(self, prompt): 8 | self.add_message("User", prompt) 9 | result_ready = False 10 | iter = 0 11 | while not result_ready and iter < self.max_iter: 12 | result_ready = self.success_decider.invoke({"task": prompt, "context": self.context, "tools": self.tool_explanations}) 13 | self.pretty_print("Decision", result_ready) 14 | if result_ready: 15 | break 16 | decision = self.tool_decider.invoke({"task": prompt, "context": self.context, "tools": self.tool_explanations}) 17 | self.add_message("Agent", decision["reasoning"]) 18 | self.pretty_print("Reasoning", decision["reasoning"]) 19 | self.pretty_print("Tool call", decision["tool"]) 20 | tool_call = self.tools[decision["tool"]] 21 | tool_args = decision["argument"] 22 | self.pretty_print("Tool args", tool_args) 23 | tool_result = tool_call.run(**tool_args) 24 | self.add_message(decision["tool"], tool_result) 25 | self.pretty_print("Tool result", tool_result) 26 | iter += 1 27 | if not result_ready: 28 | self.pretty_print("Not Completed", """The task was not completed within the maximum number of iterations. The agent will try to answer with the available context. 29 | If you want to try again, you can increase the maximum number of iterations by setting the max_iter parameter when creating the agent.""") 30 | answer = self.final_answer.invoke({"query": prompt, "context": self.context}) 31 | self.add_message("Agent", answer) 32 | self.pretty_print("Answer", answer) 33 | return answer 34 | 35 | if __name__ == "__main__": 36 | from demogpt_agenthub.tools import DuckDuckGoSearchTool, WeatherTool, PythonTool 37 | from demogpt_agenthub.llms import OpenAIChatModel 38 | search_tool = DuckDuckGoSearchTool() 39 | weather_tool = WeatherTool() 40 | python_tool = PythonTool() 41 | agent = ReactAgent(tools=[search_tool, weather_tool, python_tool], llm=OpenAIChatModel(model_name="gpt-4o-mini"), verbose=True) 42 | query = "What is the weather's temperature's square root in the country where Christiano Ronaldo is currently playing? Please precisely calculate the result." 43 | print(agent.run(query)) -------------------------------------------------------------------------------- /demogpt_agenthub/agents/tool_calling.py: -------------------------------------------------------------------------------- 1 | from demogpt_agenthub.agents import BaseAgent 2 | 3 | class ToolCallingAgent(BaseAgent): 4 | def __init__(self, tools, llm, verbose=False): 5 | super().__init__(tools, llm, verbose) 6 | 7 | def run(self, prompt): 8 | self.add_message("User", prompt) 9 | # Format tool explanations for better readability 10 | formatted_tools = "\nAvailable Tools:\n" 11 | for tool_name, details in self.tool_explanations.items(): 12 | formatted_tools += f"\n{tool_name}:\n" 13 | formatted_tools += f" Description: {details['description']}\n" 14 | formatted_tools += f" {details['parameters']}\n" 15 | formatted_tools += f" Run Function: {details['run_description']}\n" 16 | decision = self.tool_decider.invoke({"task": prompt, "context": self.context, "tools": formatted_tools}) 17 | self.add_message("Agent", decision["reasoning"]) 18 | self.pretty_print("Reasoning", decision["reasoning"]) 19 | self.pretty_print("Tool call", decision["tool"]) 20 | tool_call = self.tools[decision["tool"]] 21 | tool_args = decision["argument"] 22 | tool_result = tool_call.run(**tool_args) 23 | self.add_message(decision["tool"], tool_result) 24 | self.pretty_print("Tool result", tool_result) 25 | answer = self.final_answer.invoke({"query": prompt, "context": self.context}) 26 | self.add_message("Agent", answer) 27 | self.pretty_print("Answer", answer) 28 | return answer 29 | 30 | if __name__ == "__main__": 31 | from demogpt_agenthub.tools import DuckDuckGoSearchTool, WeatherTool, WikipediaTool 32 | from demogpt_agenthub.llms import OpenAIChatModel 33 | search_tool = DuckDuckGoSearchTool() 34 | weather_tool = WeatherTool() 35 | wikipedia_tool = WikipediaTool() 36 | agent = ToolCallingAgent(tools=[search_tool, weather_tool, wikipedia_tool], llm=OpenAIChatModel(model_name="gpt-4o-mini"), verbose=True) 37 | query = "Who is Daron Acemoglu?" 38 | print(agent.run(query)) -------------------------------------------------------------------------------- /demogpt_agenthub/apis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/demogpt_agenthub/apis/__init__.py -------------------------------------------------------------------------------- /demogpt_agenthub/llms/__init__.py: -------------------------------------------------------------------------------- 1 | from demogpt_agenthub.llms.openai import OpenAIModel, OpenAIChatModel 2 | -------------------------------------------------------------------------------- /demogpt_agenthub/llms/base.py: -------------------------------------------------------------------------------- 1 | class BaseLLM: 2 | def run(self, prompt: str) -> str: 3 | raise NotImplementedError("Subclasses must implement the 'run' method") -------------------------------------------------------------------------------- /demogpt_agenthub/llms/openai.py: -------------------------------------------------------------------------------- 1 | from langchain_openai import OpenAI, ChatOpenAI 2 | from demogpt_agenthub.llms.base import BaseLLM 3 | 4 | class OpenAIModel(OpenAI, BaseLLM): 5 | def run(self, prompt: str) -> str: 6 | return self.invoke(prompt) 7 | 8 | class OpenAIChatModel(ChatOpenAI, BaseLLM): 9 | def run(self, prompt: str) -> str: 10 | return self.invoke(prompt).content -------------------------------------------------------------------------------- /demogpt_agenthub/prompts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/demogpt_agenthub/prompts/__init__.py -------------------------------------------------------------------------------- /demogpt_agenthub/prompts/agents/react/success_decider.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are a helpful assistant that is good at deciding if the context successfully includes the information needed to complete the task. 3 | The context is a list of messages between the user and the agent. 4 | The agent will sometimes call tools to get more information. 5 | The called tools together with their results are also included in the context. 6 | 7 | You will be given a task and the current context. 8 | You need to decide if the context includes all the necessary information needed to complete the task. 9 | You will first reason about the task and the context. 10 | Then, you will give if there is any missing information. 11 | If the task requires a computation, you must use the necessary tool provided to get the missing information. 12 | For the computation heavy tasks, you must use the necessary tool provided to get the missing information. 13 | If you think the there is no need to use any tool, respond with "". 14 | If you think the you still need to call a tool, respond with "". 15 | """ 16 | 17 | human_template = """ 18 | Task: {task} 19 | 20 | Tools: {tools} 21 | 22 | Context: {context} 23 | 24 | Your reasoning and decision ( or ): 25 | """ -------------------------------------------------------------------------------- /demogpt_agenthub/prompts/agents/tool_calling/final_answer.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are a helpful assistant that can answer the user query based on the provided context. 3 | The context is a list of messages between the user and the assistant. 4 | The assistant tried to find the answer using various tools. 5 | Your job is to answer the user query based on the context. 6 | Asnwer as a kind assistant. 7 | """ 8 | 9 | human_template = """ 10 | User: {query} 11 | ============= 12 | Context: 13 | {context} 14 | ============= 15 | Answer: 16 | """ 17 | -------------------------------------------------------------------------------- /demogpt_agenthub/prompts/agents/tool_calling/tool_decider.py: -------------------------------------------------------------------------------- 1 | system_template = """ 2 | You are a helpful assistant that's good at accomplishing tasks. 3 | The task comes from the user and you have to decide which tool to use to accomplish the task. 4 | You'll be given the current context and the tools that you have. 5 | The tools consist of a name, description, and a function that can be used to call the tool. 6 | 7 | Your response should be a valid JSON object in the following format: 8 | 9 | {{ 10 | "reasoning": , 11 | "tool": , 12 | "argument": 13 | }} 14 | """ 15 | 16 | human_template = """ 17 | Task: {task} 18 | ============== 19 | Context: {context} 20 | ============== 21 | Tools: {tools} 22 | ============== 23 | Response JSON: 24 | """ -------------------------------------------------------------------------------- /demogpt_agenthub/prompts/rag/base.py: -------------------------------------------------------------------------------- 1 | template = """Answer the question based only on the following context: 2 | {context} 3 | 4 | Question: {question} 5 | """ -------------------------------------------------------------------------------- /demogpt_agenthub/rag/__init__.py: -------------------------------------------------------------------------------- 1 | from demogpt_agenthub.rag.base import BaseRAG -------------------------------------------------------------------------------- /demogpt_agenthub/rag/base.py: -------------------------------------------------------------------------------- 1 | from langchain_core.documents import Document 2 | from langchain_core.output_parsers import StrOutputParser 3 | from langchain_core.runnables import RunnablePassthrough 4 | from langchain_core.prompts import ChatPromptTemplate 5 | from langchain_community.document_loaders import ( 6 | TextLoader, 7 | WebBaseLoader, 8 | PyPDFLoader, 9 | DirectoryLoader, 10 | JSONLoader 11 | ) 12 | from langchain_community.document_loaders.csv_loader import CSVLoader 13 | from typing import List, Union 14 | import os 15 | import shutil 16 | import logging 17 | 18 | from demogpt_agenthub.llms.base import BaseLLM 19 | from demogpt_agenthub.prompts.rag.base import template 20 | 21 | logging.basicConfig(level=logging.INFO) 22 | logger = logging.getLogger(__name__) 23 | 24 | class BaseRAG: 25 | OPENAI_EMBEDDING_MODELS = [ 26 | "text-embedding-3-small", 27 | "text-embedding-3-large", 28 | "text-embedding-ada-002" 29 | ] 30 | def __init__(self, llm: BaseLLM, 31 | vectorstore: str, 32 | persistent_path: str, 33 | index_name: str, 34 | reset_vectorstore: bool = False, 35 | embedding_model_name: str = "sentence-transformers/all-mpnet-base-v2", 36 | filter: dict = None, 37 | k: int = 4, 38 | verbose: bool = False): 39 | self.name = "RAG" 40 | self.description = "A tool that can search for information in vector database by using a query." 41 | self.llm = llm 42 | self.persistent_path = persistent_path 43 | self.verbose = verbose 44 | self.index_name = index_name 45 | self.search_kwargs={ 46 | 'k': k, 47 | 'filter': filter 48 | } 49 | self.load_embedding_model(embedding_model_name) 50 | self.load_vectorstore(vectorstore, reset_vectorstore) 51 | self.output_parser = StrOutputParser() 52 | 53 | prompt = ChatPromptTemplate.from_template(template) 54 | 55 | self.rag_chain = ( 56 | {"context": self.retriever, "question": RunnablePassthrough()} 57 | | prompt 58 | | self.llm 59 | | self.output_parser 60 | ) 61 | 62 | def load_embedding_model(self, model_name: str): 63 | if model_name in self.OPENAI_EMBEDDING_MODELS: 64 | from langchain_openai import OpenAIEmbeddings 65 | self.embedding_model = OpenAIEmbeddings(model=model_name) 66 | else: 67 | from langchain_huggingface import HuggingFaceEmbeddings 68 | self.embedding_model = HuggingFaceEmbeddings(model_name=model_name) 69 | 70 | def load_vectorstore(self, vectorstore, reset_vectorstore: bool = False): 71 | # First, close any existing connection 72 | if hasattr(self, 'vectorstore') and hasattr(self.vectorstore, '_client'): 73 | self.vectorstore._client.close() 74 | 75 | if reset_vectorstore: 76 | if os.path.exists(self.persistent_path): 77 | print(f"Removing existing vectorstore at {self.persistent_path}") 78 | shutil.rmtree(self.persistent_path) 79 | if vectorstore == "chroma": 80 | from langchain_chroma import Chroma 81 | self.vectorstore = Chroma(embedding_function=self.embedding_model, persist_directory=self.persistent_path) 82 | elif vectorstore == "pinecone": 83 | from langchain_pinecone import PineconeVectorStore 84 | self.vectorstore = PineconeVectorStore(index_name=self.index_name) 85 | elif vectorstore == "faiss": 86 | from langchain_community.vectorstores import FAISS 87 | self.vectorstore = FAISS(self.persistent_path, self.embedding_model) 88 | else: 89 | raise ValueError(f"Vectorstore {vectorstore} not supported") 90 | 91 | self.retriever = self.vectorstore.as_retriever() 92 | 93 | def _add_documents(self, documents: List[Document]): 94 | self.vectorstore.add_documents(documents) 95 | 96 | def add_texts(self, texts: Union[str, List[str]]): 97 | if isinstance(texts, str): 98 | texts = [texts] 99 | self.vectorstore.add_texts(texts) 100 | 101 | def add_files(self, file_paths : Union[str, List[str]]): 102 | if isinstance(file_paths, str): 103 | file_paths = [file_paths] 104 | docs = [] 105 | for file_path in file_paths: 106 | if file_path.endswith(".txt"): 107 | loader = TextLoader(file_path) 108 | elif file_path.endswith(".pdf"): 109 | loader = PyPDFLoader(file_path) 110 | elif file_path.endswith(".json"): 111 | loader = JSONLoader(file_path) 112 | elif file_path.endswith(".csv"): 113 | loader = CSVLoader(file_path) 114 | elif file_path.startswith("http"): 115 | loader = WebBaseLoader(file_path) 116 | else: 117 | logger.info(f"File {file_path} not supported") 118 | continue 119 | try: 120 | docs.extend(loader.load()) 121 | except Exception as e: 122 | logger.error(f"Error loading file {file_path}: {e}") 123 | self._add_documents(docs) 124 | 125 | def run(self, query: str): 126 | return self.rag_chain.invoke(query) 127 | 128 | if __name__ == "__main__": 129 | from demogpt_agenthub.llms.openai import OpenAIChatModel 130 | rag = BaseRAG(llm=OpenAIChatModel(model="gpt-4o-mini"), 131 | vectorstore="chroma", 132 | persistent_path="rag_chroma", 133 | index_name="rag_index", 134 | reset_vectorstore=True, 135 | embedding_model_name="sentence-transformers/all-mpnet-base-v2", 136 | filter={"search_kwargs": {"score_threshold": 0.5}} 137 | ) 138 | rag.add_files(["/home/melih/Downloads/results - 2024-11-04T160209.186.pdf"]) 139 | print(rag.query("How many people joined the interview with Age 40 to 49?")) 140 | -------------------------------------------------------------------------------- /demogpt_agenthub/tools/__init__.py: -------------------------------------------------------------------------------- 1 | from demogpt_agenthub.tools.base import BaseTool 2 | from demogpt_agenthub.tools.duckduckgo import DuckDuckGoSearchTool 3 | from demogpt_agenthub.tools.bash import BashTool 4 | from demogpt_agenthub.tools.repl import PythonTool 5 | from demogpt_agenthub.tools.research import ArxivTool 6 | from demogpt_agenthub.tools.youtube import YouTubeSearchTool 7 | from demogpt_agenthub.tools.stack_exchange import StackOverFlowTool 8 | from demogpt_agenthub.tools.req import RequestUrlTool 9 | from demogpt_agenthub.tools.wikiped import WikipediaTool 10 | from demogpt_agenthub.tools.wikidata import WikiDataTool 11 | from demogpt_agenthub.tools.pubmed import PubmedTool 12 | from demogpt_agenthub.tools.weather import WeatherTool # requires OPENWEATHERMAP_API_KEY 13 | from demogpt_agenthub.tools.yolo import YoloTool -------------------------------------------------------------------------------- /demogpt_agenthub/tools/base.py: -------------------------------------------------------------------------------- 1 | class BaseTool: 2 | def __init__(self, **data): 3 | super().__init__(**data) 4 | self.setAttributes() 5 | 6 | def setAttributes(self): 7 | if hasattr(self, 'tool') and self.tool: 8 | for attribute in self.tool.__dict__: 9 | setattr(self, attribute, self.tool.__dict__[attribute]) -------------------------------------------------------------------------------- /demogpt_agenthub/tools/bash.py: -------------------------------------------------------------------------------- 1 | from langchain_community.tools import ShellTool 2 | from demogpt_agenthub.tools import BaseTool 3 | class BashTool(BaseTool): 4 | def __init__(self): 5 | self.tool = ShellTool() 6 | super().__init__() 7 | 8 | def run(self, commands): 9 | if isinstance(commands, str): 10 | return self.tool.run(({"commands": [commands]})) 11 | return self.tool.run(({"commands": commands})) 12 | 13 | if __name__ == "__main__": 14 | commands = ["ls", "pwd"] 15 | tool = BashTool() 16 | print(tool.run(commands)) 17 | command = "ls" 18 | print(tool.run(command)) -------------------------------------------------------------------------------- /demogpt_agenthub/tools/duckduckgo.py: -------------------------------------------------------------------------------- 1 | from demogpt_agenthub.tools import BaseTool 2 | 3 | class DuckDuckGoSearchTool(BaseTool): 4 | def __init__(self, raw_results=False, max_results=4, backend="text"): 5 | if raw_results: 6 | from langchain_community.tools import DuckDuckGoSearchResults 7 | self.tool = DuckDuckGoSearchResults(max_results=max_results, backend=backend) 8 | else: 9 | from langchain_community.tools import DuckDuckGoSearchRun 10 | self.tool = DuckDuckGoSearchRun() 11 | 12 | super().__init__() 13 | 14 | def run(self, inp: str): 15 | return self.tool.run(inp) 16 | 17 | if __name__ == "__main__": 18 | query = "What is the capital of France?" 19 | raw_tool = DuckDuckGoSearchTool(raw_results=True, backend="news") 20 | print(raw_tool.run(query)) 21 | tool = DuckDuckGoSearchTool() 22 | print(tool.run(query)) 23 | 24 | -------------------------------------------------------------------------------- /demogpt_agenthub/tools/pubmed.py: -------------------------------------------------------------------------------- 1 | from langchain_community.tools.pubmed.tool import PubmedQueryRun 2 | from demogpt_agenthub.tools import BaseTool 3 | 4 | class PubmedTool(BaseTool): 5 | def __init__(self): 6 | self.tool = PubmedQueryRun() 7 | super().__init__() 8 | 9 | def run(self, query): 10 | return self.tool.invoke(query) 11 | 12 | if __name__ == "__main__": 13 | tool = PubmedTool() 14 | print(tool.run("covid")) -------------------------------------------------------------------------------- /demogpt_agenthub/tools/repl.py: -------------------------------------------------------------------------------- 1 | from langchain_experimental.utilities import PythonREPL 2 | from demogpt_agenthub.tools import BaseTool 3 | 4 | class PythonTool(BaseTool): 5 | def __init__(self): 6 | self.tool = PythonREPL() 7 | super().__init__() 8 | self.name = "Python Interpreter" 9 | self.description = """A tool that can execute Python code to perform precise calculations by using the Python programming language. For any calculations that require a high degree of precision, this tool must be used. 10 | You must add print statement to the code to see the results of the calculations. Otherwise, the result will not be displayed.""" 11 | 12 | def run(self, code): 13 | return self.tool.run(code) 14 | 15 | if __name__ == "__main__": 16 | tool = PythonTool() 17 | code = "print('Hello, World!')" 18 | print(tool.run(code)) -------------------------------------------------------------------------------- /demogpt_agenthub/tools/req.py: -------------------------------------------------------------------------------- 1 | from langchain_community.utilities import TextRequestsWrapper 2 | from demogpt_agenthub.tools import BaseTool 3 | 4 | class RequestUrlTool(BaseTool): 5 | def __init__(self): 6 | self.tool = TextRequestsWrapper() 7 | super().__init__() 8 | 9 | def run(self, url): 10 | return self.tool.get(url) 11 | 12 | if __name__ == "__main__": 13 | tool = RequestUrlTool() 14 | print(tool.run("https://www.google.com")) -------------------------------------------------------------------------------- /demogpt_agenthub/tools/research.py: -------------------------------------------------------------------------------- 1 | from langchain_community.utilities import ArxivAPIWrapper 2 | from demogpt_agenthub.tools import BaseTool 3 | 4 | class ArxivTool(BaseTool): 5 | def __init__(self): 6 | self.tool = ArxivAPIWrapper() 7 | super().__init__() 8 | 9 | def run(self, query): 10 | return self.tool.run(query) 11 | 12 | if __name__ == "__main__": 13 | tool = ArxivTool() 14 | print(tool.run("2106.01495")) -------------------------------------------------------------------------------- /demogpt_agenthub/tools/stack_exchange.py: -------------------------------------------------------------------------------- 1 | from langchain_community.utilities import StackExchangeAPIWrapper 2 | from demogpt_agenthub.tools import BaseTool 3 | 4 | class StackOverFlowTool(BaseTool): 5 | def __init__(self): 6 | self.tool = StackExchangeAPIWrapper() 7 | super().__init__() 8 | 9 | def run(self, query): 10 | return self.tool.run(query) 11 | 12 | if __name__ == "__main__": 13 | query = "How to create a list in python" 14 | tool = StackOverFlowTool() 15 | print(tool.run(query)) -------------------------------------------------------------------------------- /demogpt_agenthub/tools/weather.py: -------------------------------------------------------------------------------- 1 | import os 2 | from langchain_community.utilities import OpenWeatherMapAPIWrapper 3 | from langchain_community.tools.openweathermap.tool import OpenWeatherMapQueryRun 4 | from demogpt_agenthub.tools import BaseTool 5 | 6 | class WeatherTool(BaseTool): 7 | def __init__(self): 8 | api_key = os.environ.get('OPENWEATHERMAP_API_KEY') 9 | if not api_key: 10 | raise ValueError('OPENWEATHERMAP_API_KEY environment variable is not set') 11 | wrapper = OpenWeatherMapAPIWrapper() 12 | self.tool = OpenWeatherMapQueryRun(api_wrapper=wrapper) 13 | super().__init__() 14 | 15 | def run(self, city): 16 | return self.tool.run(city) 17 | 18 | if __name__ == '__main__': 19 | tool = WeatherTool() 20 | print(tool.run('London')) -------------------------------------------------------------------------------- /demogpt_agenthub/tools/wikidata.py: -------------------------------------------------------------------------------- 1 | from langchain_community.tools.wikidata.tool import WikidataAPIWrapper, WikidataQueryRun 2 | from demogpt_agenthub.tools import BaseTool 3 | 4 | class WikiDataTool(BaseTool): 5 | def __init__(self): 6 | self.tool = WikidataQueryRun(api_wrapper=WikidataAPIWrapper()) 7 | super().__init__() 8 | 9 | def run(self, query): 10 | return self.tool.run(query) 11 | 12 | if __name__ == "__main__": 13 | query = "Alan Turing" 14 | tool = WikiDataTool() 15 | print(tool.run(query)) -------------------------------------------------------------------------------- /demogpt_agenthub/tools/wikiped.py: -------------------------------------------------------------------------------- 1 | from langchain_community.tools import WikipediaQueryRun 2 | from langchain_community.utilities import WikipediaAPIWrapper 3 | from demogpt_agenthub.tools import BaseTool 4 | 5 | class WikipediaTool(BaseTool): 6 | def __init__(self): 7 | self.tool = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper()) 8 | super().__init__() 9 | 10 | def run(self, query): 11 | return self.tool.run(query) 12 | 13 | if __name__ == "__main__": 14 | query = "How to create a list in python" 15 | tool = WikipediaTool() 16 | print(tool.run(query)) -------------------------------------------------------------------------------- /demogpt_agenthub/tools/yolo.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from demogpt_agenthub.tools.base import BaseTool 3 | from ultralytics import YOLO 4 | 5 | class YoloTool(BaseTool): 6 | def __init__(self): 7 | self.model = YOLO("yolo11n.pt") 8 | super().__init__() 9 | self.name = "YOLO Object Detection" 10 | self.description = """A tool that can execute YOLO object detection by using the YOLO model. 11 | It returns a detailed description of the detected objects in the image. 12 | It gets the path of the image as an argument.""" 13 | 14 | def run(self, path): 15 | results = self.model(path)[0] 16 | boxes = results.boxes 17 | names = results.names 18 | classes = results.boxes.cls 19 | resulting_str = "" 20 | object2count = defaultdict(int) 21 | for box, cls in zip(boxes, classes): 22 | 23 | x1, y1, x2, y2 = map(int, box.xyxy[0].tolist()) 24 | _class = names[int(cls)] 25 | object2count[_class] += 1 26 | resulting_str += f"The {_class}[{object2count[_class]}] is from ({x1}, {y1}) to ({x2}, {y2}).\n" 27 | intro = "" 28 | for obj, count in object2count.items(): 29 | intro += f"{count} {obj}, " 30 | intro = intro.strip(", ") 31 | if len(intro) > 0: 32 | intro = "There are " + intro + " in the image.\n" 33 | else: 34 | intro = "There are no objects in the image.\n" 35 | resulting_str = intro + resulting_str 36 | if len(resulting_str) > 0: 37 | resulting_str = "in the image.\n" + resulting_str 38 | return resulting_str 39 | 40 | if __name__ == "__main__": 41 | tool = YoloTool() 42 | path = "/home/melih/Downloads/bus.jpg" 43 | print(tool.run(path)) -------------------------------------------------------------------------------- /demogpt_agenthub/tools/youtube.py: -------------------------------------------------------------------------------- 1 | from langchain_community.tools import YouTubeSearchTool as Youtube 2 | from demogpt_agenthub.tools import BaseTool 3 | 4 | class YouTubeSearchTool(BaseTool): 5 | def __init__(self): 6 | self.tool = Youtube() 7 | super().__init__() 8 | 9 | def run(self, query): 10 | return self.tool.run(query) 11 | 12 | if __name__ == "__main__": 13 | query = "lex friedman" 14 | tool = YouTubeSearchTool() 15 | print(tool.run(query)) -------------------------------------------------------------------------------- /demogpt_agenthub/utils/parsers.py: -------------------------------------------------------------------------------- 1 | from langchain_core.exceptions import OutputParserException 2 | from langchain_core.output_parsers import BaseOutputParser 3 | 4 | 5 | # The [bool] desribes a parameterization of a generic. 6 | # It's basically indicating what the return type of parse is 7 | # in this case the return type is either True or False 8 | class BooleanOutputParser(BaseOutputParser[bool]): 9 | """Custom boolean parser.""" 10 | 11 | true_val: str = "" 12 | false_val: str = "" 13 | 14 | def parse(self, text: str) -> bool: 15 | cleaned_text = text.strip().upper() 16 | if self.true_val.upper() in cleaned_text: 17 | return True 18 | elif self.false_val.upper() in cleaned_text: 19 | return False 20 | else: 21 | raise OutputParserException( 22 | f"BooleanOutputParser expected output value to either be " 23 | f"{self.true_val} or {self.false_val} (case-insensitive). " 24 | f"Received {cleaned_text}." 25 | ) 26 | 27 | @property 28 | def _type(self) -> str: 29 | return "boolean_output_parser" -------------------------------------------------------------------------------- /docs/README_CN.md: -------------------------------------------------------------------------------- 1 | # ![favicon](../assets/puzzle.png) DemoGPT:自动 LangChain 管道生成 2 | 3 |

4 | DemoGPT logo:自动生成 LangChain 流程 5 |

6 | 7 |

8 | ⚡ 仅使用提示即可快速创建演示。 ⚡ 9 |

10 | 11 |

12 | 版本 13 | 官方网站 14 | DemoGPT文档 15 |

16 | 17 |

18 | CN doc 19 | EN doc 20 | roadmap 21 | License: MIT 22 |

23 | 24 |

25 | 打开一个问题 26 | 已关闭的问题 27 | DemoGPT  星星 28 |

29 | 30 |

31 | Twitter Follow 32 |

33 | 34 |

35 | Streamlit应用 36 | 37 |

38 | 39 | 40 | ## 🔥 演示 41 | 42 | 要快速演示,您可以访问[我们的网站](https://demogpt.io) 43 | 44 | https://github.com/melih-unsal/DemoGPT/assets/34304254/8991e296-b6fe-4817-bd08-4dab6d13020d 45 | 46 | ## 📚 文档 47 | 48 | 请访问我们的[文档站点](https://melih-unsal.github.io/DemoGPT-Docs/),查看完整的操作文档和指南 49 | 50 | ## 📑 目录 51 | 52 | - [简介](#-简介) 53 | - [管道](#%EF%B8%8F-管道) 54 | - [安装](#-安装) 55 | - [使用](#-使用) 56 | - [贡献](#-贡献) 57 | - [许可证](#-许可证) 58 | 59 | ## 📌 简介 60 | 61 | DemoGPT 是一款创新的开源项目,旨在简化基于语言学习模型(LLM)的应用程序的开发。它利用 GPT-3.5-turbo 的能力,使用'Thought Tree' (ToT) 方法自动生成 LangChain 代码。传统上,LangChain 被用于为基于 LLM 的应用程序创建管道,而通过 DemoGPT,我们正在改变处理这些管道的方式。 62 | 63 | 这个过程是全自动的,DemoGPT 会生成代码,运行测试,并逐步开发项目。每一段代码都会被单独测试和评估。如果它通过了自动生成的测试,开发就会继续,从而实现高效和无误的开发。 64 | 65 | ## ⚙️ 架构 66 | ### DemoGPT 架构 67 | ![DemoGPT 架构](../assets/demogpt_new_pipeline1.jpeg?raw=true "DemoGPT Architecture") 68 | 69 | ## 🔧 安装 70 | 71 | 1. 克隆仓库: 72 | ```sh 73 | git clone https://github.com/melih-unsal/DemoGPT.git 74 | ``` 75 | 2. 导航到项目目录: 76 | ```sh 77 | cd DemoGPT 78 | ``` 79 | 3. 安装必要的依赖项: 80 | ```sh 81 | pip install -r requirements.txt 82 | ``` 83 | 84 | ## 🎮 使用 85 | 86 | ## 🤝 贡献 87 | 88 | 欢迎为DemoGPT项目做出贡献!无论您是修复错误、改进文档还是提出新的功能,我们都非常感谢您的努力。在开始任何工作之前,请检查开放的问题。 89 | 90 | > 请阅读[`CONTRIBUTING`](../CONTRIBUTING.md)以获取我们的[`CODE OF CONDUCT`](../CODE_OF_CONDUCT.md)的详细信息,以及向我们提交拉取请求的过程。 91 | 92 | ## 📜 许可证 93 | 94 | DemoGPT是一个基于[MIT许可证](../LICENSE)的开源项目。 95 | 96 | --- 97 | 98 | 如有任何问题、疑问或评论,请随时与我们联系或提出问题。我们非常欣赏您的反馈,以使DemoGPT变得更好。 -------------------------------------------------------------------------------- /docs/ROADMAP.md: -------------------------------------------------------------------------------- 1 | # DemoGPT Development Roadmap 2 | 3 | Our goal is to enable DemoGPT to accomplish anything that can be done through LangChain. In order to realize this goal, we have outlined the following development roadmap: 4 | 5 | ## Phase 1: New DemoGPT Pipeline Implementation 6 | 7 | - Implement a new DemoGPT pipeline including plan generation, task creation, code snippet generation, and final code assembly. 8 | - Define useful LangChain tasks and publish a release with the new pipeline without refinement. 9 | 10 | ## Phase 2: Model Selection and Integration 11 | 12 | - Add a feature to allow users to select models that meet specific performance criteria. 13 | - Integrate Llama 2 to DemoGPT for running everything locally. 14 | 15 | ## Phase 3: Task Implementation and Refinement 16 | 17 | - Implement remaining LangChain tasks. 18 | - Implement a self-refining strategy for model response refinement. 19 | 20 | ## Phase 4: API Integration and Expansion 21 | 22 | - Integrate the Gorilla model for API calls. 23 | - Add Rapid API for expanding available API calls. 24 | 25 | ## Phase 5: Database Implementation 26 | 27 | - Implement a publicly available database to accelerate the generation process by retrieving similar examples during the refining process. 28 | - Add all successfully generated steps to a DB to eliminate redundant refinement. 29 | 30 | ## Phase 6: Creation of React-Based Applications 31 | 32 | - Extend DemoGPT's capabilities to create react-based applications, leveraging a self-refining strategy for continuous improvement of application performance. 33 | 34 | This roadmap will guide our development efforts, and we look forward to sharing our progress with the community as we work towards making DemoGPT an indispensable tool for LangChain development. 35 | -------------------------------------------------------------------------------- /docs/ROADMAP_CN.md: -------------------------------------------------------------------------------- 1 | # DemoGPT 开发路线图 2 | 3 | 我们的目标是使 DemoGPT 能够完成通过 LangChain 可以完成的任何事情。为了实现这一目标,我们已经概述了以下开发路线图: 4 | 5 | ## 第一阶段:新 DemoGPT 流程实现 6 | 7 | - 实现新的 DemoGPT 流程,包括计划生成、任务创建、代码片段生成和最终代码组装。 8 | - 定义有用的 LangChain 任务,并发布新流程的版本,无需精炼。 9 | 10 | ## 第二阶段:模型选择和集成 11 | 12 | - 添加一个功能,允许用户选择满足特定性能标准的模型。 13 | - 将 Llama 2 集成到 DemoGPT 中,以便在本地运行所有内容。 14 | 15 | ## 第三阶段:任务实现和精炼 16 | 17 | - 实现剩余的 LangChain 任务。 18 | - 实现模型响应精炼的自我精炼策略。 19 | 20 | ## 第四阶段:API 集成和扩展 21 | 22 | - 集成 Gorilla 模型进行 API 调用。 23 | - 添加 Rapid API 以扩展可用的 API 调用。 24 | 25 | ## 第五阶段:数据库实现 26 | 27 | - 实现公共可用的数据库,通过在精炼过程中检索类似的示例来加速生成过程。 28 | - 将所有成功生成的步骤添加到 DB 中,以消除冗余的精炼。 29 | 30 | ## 第六阶段:创建基于 React 的应用程序 31 | 32 | - 扩展 DemoGPT 的功能,以创建基于 React 的应用程序,利用自我精炼策略不断提高应用程序性能。 33 | 34 | 此路线图将指导我们的开发工作,我们期待着与社区分享我们的进展,努力使 DemoGPT 成为 LangChain 开发的不可或缺的工具。 35 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["poetry-core>=1.0.0"] 3 | build-backend = "poetry.core.masonry.api" 4 | 5 | [tool.poetry] 6 | name = "demogpt" 7 | version = "1.2.7" 8 | description = "Autonomous AI Agent for Gen-AI App Generation " 9 | authors = ["Melih Unsal "] 10 | license = "MIT" 11 | readme = "README.md" 12 | homepage = "https://github.com/melih-unsal/DemoGPT" 13 | packages = [ 14 | { include = "demogpt" } 15 | ] 16 | 17 | [tool.poetry.dependencies] 18 | python = "^3.8.1" 19 | pydantic = "<2.0.0" 20 | streamlit = "*" 21 | altair = "<5" 22 | langchain = "*" 23 | langchain_experimental = "*" 24 | langchain_openai = "*" 25 | openai = "*" 26 | tiktoken = "*" 27 | python-dotenv = "*" 28 | unstructured = "*" 29 | pdf2image = "*" 30 | pdfminer-six = "*" 31 | autopep8 = "*" 32 | numexpr = "*" 33 | flake8 = "*" 34 | duckduckgo-search = "6.3.2" 35 | wikipedia = "1.4.0" 36 | pyowm = "3.3.0" 37 | 38 | [tool.poetry.dev-dependencies] 39 | 40 | [tool.poetry.scripts] 41 | demogpt = "demogpt.cli:main" 42 | 43 | [tool.poetry.urls] 44 | "Homepage" = "https://github.com/melih-unsal/DemoGPT" 45 | -------------------------------------------------------------------------------- /rag_chroma/12c93a31-bb79-46fa-a985-7ab34c742bb7/header.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/rag_chroma/12c93a31-bb79-46fa-a985-7ab34c742bb7/header.bin -------------------------------------------------------------------------------- /rag_chroma/12c93a31-bb79-46fa-a985-7ab34c742bb7/length.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/rag_chroma/12c93a31-bb79-46fa-a985-7ab34c742bb7/length.bin -------------------------------------------------------------------------------- /rag_chroma/12c93a31-bb79-46fa-a985-7ab34c742bb7/link_lists.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/rag_chroma/12c93a31-bb79-46fa-a985-7ab34c742bb7/link_lists.bin -------------------------------------------------------------------------------- /rag_chroma/chroma.sqlite3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/rag_chroma/chroma.sqlite3 -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melih-unsal/DemoGPT/d9b1ce998f9a8a70f0e14d6d109361f0620ae433/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_llms.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from demogpt_agenthub.llms.openai import OpenAIModel, OpenAIChatModel 3 | 4 | class TestOpenAIModel(unittest.TestCase): 5 | def setUp(self): 6 | self.model = OpenAIModel(model="gpt-3.5-turbo-instruct") 7 | 8 | def test_correct_response(self): 9 | result = self.model.run("What is the capital of Turkey?") 10 | self.assertIn("Ankara", result) 11 | 12 | class TestOpenAIChatModel(unittest.TestCase): 13 | def setUp(self): 14 | self.model = OpenAIChatModel(model="gpt-4o-mini") 15 | 16 | def test_correct_response(self): 17 | result = self.model.run("What is the capital of Turkey?") 18 | self.assertIn("Ankara", result) -------------------------------------------------------------------------------- /tests/test_rag.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | from demogpt_agenthub.rag.base import BaseRAG 4 | from demogpt_agenthub.llms.openai import OpenAIChatModel 5 | 6 | class TestRAG(unittest.TestCase): 7 | def setUp(self): 8 | self.rag = BaseRAG( 9 | llm=OpenAIChatModel(model="gpt-4o-mini"), 10 | vectorstore="chroma", 11 | persistent_path="rag_chroma", 12 | index_name="rag_index", 13 | reset_vectorstore=True, 14 | embedding_model_name="sentence-transformers/all-mpnet-base-v2", 15 | filter={"search_kwargs": {"score_threshold": 0.5}} 16 | ) 17 | 18 | with open("test_rag.txt", "w") as f: 19 | f.write("John Doe is 40 years old and he is a software engineer.") 20 | 21 | def tearDown(self): 22 | if os.path.exists("test_rag.txt"): 23 | os.remove("test_rag.txt") 24 | 25 | def test_add_files(self): 26 | self.rag.add_files(["test_rag.txt"]) 27 | question = "How old is John Doe?" 28 | self.assertIn("40", self.rag.query(question)) 29 | 30 | def test_add_text(self): 31 | self.rag.add_texts("John Doe is 28 years old and he is a doctor.") 32 | question = "How old is John Doe?" 33 | self.assertIn("28", self.rag.query(question)) --------------------------------------------------------------------------------