├── .env.example ├── .gitignore ├── CONTRIBUTING.md ├── PULL_REQUEST_TEMPLATE.md ├── README.md ├── assets ├── banner.png ├── logo.svg ├── test_command.png └── textbase-deploy.gif ├── docs ├── .gitignore ├── README.md ├── assets │ ├── library_server.gif │ ├── local_server.gif │ ├── mac_zip.png │ ├── select_interpreter.gif │ └── zipping.gif ├── babel.config.js ├── docs │ ├── FAQs.md │ ├── deployment │ │ ├── _category_.json │ │ ├── deploy-from-cli.md │ │ ├── deploy-from-dashboard.md │ │ └── prerequisites.md │ ├── examples │ │ ├── _category_.json │ │ ├── dalle-bot.md │ │ ├── gpt-assistants-bot.md │ │ ├── gpt-vision-bot.md │ │ ├── huggingface-bot.md │ │ ├── mimicking-bot.md │ │ ├── openai-bot.md │ │ └── palmai-bot.md │ ├── get-started │ │ ├── _category_.json │ │ ├── bot-example-with-response-structure.md │ │ ├── create-your-first-bot.md │ │ ├── expected-bot-response.md │ │ ├── installation.md │ │ └── test-locally.md │ ├── intro.md │ └── usage.md ├── docusaurus.config.js ├── package-lock.json ├── package.json ├── sidebars.js ├── src │ ├── css │ │ └── custom.css │ └── pages │ │ └── index.module.css ├── static │ ├── .nojekyll │ └── img │ │ ├── favicon.png │ │ └── logo.svg └── tsconfig.json ├── examples ├── dalle-bot │ └── main.py ├── gpt-assistants-bot │ └── main.py ├── gpt-vision-bot │ └── main.py ├── huggingface-bot │ └── main.py ├── mimic-bot │ └── main.py ├── openai-bot │ └── main.py └── palmai-bot │ └── main.py ├── pyproject.toml ├── tests └── __init__.py └── textbase ├── __init__.py ├── bot.py ├── datatypes.py ├── helpers.py ├── message.py ├── models.py ├── template ├── main.py └── requirements.txt ├── textbase_cli.py └── utils ├── download_build.py ├── logs.py └── server.py /.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= 2 | HUGGINGFACEHUB_API_TOKEN= 3 | PALM_API_KEY= -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | marvin.toml 2 | .marvin-history 3 | marvin.egg-info 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | pip-wheel-metadata/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | dist 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .nox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | *.py,cover 55 | .hypothesis/ 56 | .pytest_cache/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | *.log 64 | local_settings.py 65 | db.sqlite3 66 | db.sqlite3-journal 67 | 68 | # Flask stuff: 69 | instance/ 70 | .webassets-cache 71 | 72 | # Scrapy stuff: 73 | .scrapy 74 | 75 | # Sphinx documentation 76 | docs/_build/ 77 | 78 | # PyBuilder 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | .python-version 90 | 91 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 92 | __pypackages__/ 93 | 94 | # Celery stuff 95 | celerybeat-schedule 96 | celerybeat.pid 97 | 98 | # SageMath parsed files 99 | *.sage.py 100 | 101 | # Environments 102 | .env 103 | .venv 104 | env/ 105 | venv/ 106 | ENV/ 107 | env.bak/ 108 | venv.bak/ 109 | cpcli-env/ 110 | /poetry.lock 111 | .vscode 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | .DS_STORE -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Textbase 2 | 3 | Being part of the core `Textbase` team is accessible to anyone who is motivated and wants to be part of that journey! 4 | 5 | Please see below how to contribute to the project, also refer to the contributing documentation. 6 | 7 | ## How can you help us? 8 | 9 | * Report a bug 10 | * Improve documentation 11 | * Discuss the code implementation 12 | * Submit a bug fix 13 | * Propose new features 14 | * Test Textbase 15 | 16 | ## Code contributions 17 | 18 | 1. Fork the repository to your personal GitHub account. 19 | We call this forked repo as `/textbase` repo. 20 | 21 | 2. Now, clone `/textbase` and add `cofactoryai/textbase` as the upstream: 22 | ```bash 23 | git clone https://github.com//textbase.git 24 | cd textbase 25 | git remote add upstream https://github.com/cofactoryai/textbase.git 26 | git fetch upstream 27 | ``` 28 | 29 | 3. Create a new branch with the name of your feature (eg. `docs`): 30 | ```bash 31 | git pull upstream main 32 | git checkout -b 33 | ``` 34 | 35 | 4. Close the terminal and complete the task. You may commit your progress as many times as you like during the process: 36 | ```bash 37 | git add . 38 | git commit -m "" 39 | ``` 40 | 41 | 5. Commit your progress if you haven't already and push it to `:` likewise: 42 | ```bash 43 | git push origin 44 | ``` 45 | 46 | 6. Open your browser and go to `/textbase` repo on GitHub. 47 | 48 | 7. Create a PR 49 | **from `:` to `cofactoryai:main`** (Very important step) 50 | 51 | 8. Wait for the maintainer to review your code. 52 | If you need to make some changes, commit and push to `:`. 53 | 54 | 9. Delete `:` branch **after** the PR is merged or is out of scope. 55 | ```bash 56 | git checkout dev 57 | git push -d origin 58 | git branch -d 59 | ``` 60 | 61 | 10. Repeat from step 3 for a new PR. 62 | 63 | And you're done! 64 | 65 | > NOTE: Be sure to merge the latest from "upstream" before making a pull request! Also, make the PR to the staging branch. 66 | 67 | ## Feature and Bug reports 68 | We use GitHub issues to track bugs and features. Report them by opening a [new issue](https://github.com/cofactoryai/textbase/issues/) 69 | 70 | If you are new to `textbase` and opensource in general we have collected some `good-first-issues` for you to get started. Have a look at it [here](https://github.com/cofactoryai/textbase/labels/good%20first%20issue) 71 | 72 | ## Code review process 73 | 74 | The Pull Request reviews are done on a regular basis. Please, make sure you respond to our feedback/questions. 75 | 76 | 77 | 78 | Join our mission of `building and deploying AI chatbots` with a single command! 79 | -------------------------------------------------------------------------------- /PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Scope 2 | `[Add context about what this feature is about and explain why of the feature and your technical decisions.]` 3 | 4 | 5 | 6 | - [ ] `[Sub task]` 7 | 8 | 9 | ### Screenshots 10 | --- 11 | 12 | 13 | ## Code improvements 14 | - `[Did you add some generic like utility, component, or anything else useful outside of this PR]` 15 | 16 | 17 | ### Developer checklist 18 | - [ ] I’ve manually tested that code works locally on desktop and mobile browsers. 19 | - [ ] I’ve reviewed my code. 20 | - [ ] I’ve removed all my personal credentials (API keys etc.) from the code. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 | Textbase python library 4 | 5 |
6 |
7 |

8 | 9 |

10 | 11 | Documentation 12 | 13 |

14 | 15 |

16 |

✨ Textbase is a framework for building chatbots using NLP and ML. ✨

17 |

18 | 19 |

20 | 21 |

22 | 23 | Just implement the `on_message` function in `main.py` and Textbase will take care of the rest :) 24 | 25 | Since it is just Python you can use whatever models, libraries, vector databases and APIs you want. 26 | 27 | Coming soon: 28 | - [x] [PyPI package](https://pypi.org/project/textbase-client/) 29 | - [x] Easy web deployment via [textbase-client deploy](docs/docs/deployment/deploy-from-cli.md) 30 | - [ ] SMS integration 31 | - [ ] Native integration of other models (Claude, Llama, ...) 32 | 33 | ![Demo Deploy GIF](assets/textbase-deploy.gif) 34 | 35 | ## Installation 36 | Make sure you have `python version >=3.9.0`, it's always good to follow the [docs](https://docs.textbase.ai/get-started/installation) 👈🏻 37 | ### 1. Through pip 38 | ```bash 39 | pip install textbase-client 40 | ``` 41 | 42 | ### 2. Local installation 43 | Clone the repository and install the dependencies using [Poetry](https://python-poetry.org/) (you might have to [install Poetry](https://python-poetry.org/docs/#installation) first). 44 | 45 | For proper details see [here]() 46 | 47 | ```bash 48 | git clone https://github.com/cofactoryai/textbase 49 | cd textbase 50 | poetry shell 51 | poetry install 52 | ``` 53 | 54 | ## Start development server 55 | 56 | > If you're using the default template, **remember to set the OpenAI API key** in `main.py`. 57 | 58 | Run the following command: 59 | - if installed locally 60 | ```bash 61 | poetry run python textbase/textbase_cli.py test 62 | ``` 63 | - if installed through pip 64 | ```bash 65 | textbase-client test 66 | ``` 67 | Response: 68 | ```bash 69 | Path to the main.py file: examples/openai-bot/main.py # You can create a main.py by yourself and add that path here. NOTE: The path should not be in quotes 70 | ``` 71 | Now go to the link in blue color which is shown on the CLI and you will be able to chat with your bot! 72 | ![Local UI](assets/test_command.png) 73 | 74 | ### `Other commands have been mentioned in the documentation website.` [Have a look](https://docs.textbase.ai/usage) 😃! 75 | 76 | 77 | ## Contributions 78 | 79 | Contributions are welcome! Please open an issue or create a pull request. 80 | Follow our [`contributions guide`](CONTRIBUTING.md) for more details! 81 | -------------------------------------------------------------------------------- /assets/banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cofactoryai/textbase/9f41c9c0c064eb8c758a21543d31571b2a75ec9a/assets/banner.png -------------------------------------------------------------------------------- /assets/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /assets/test_command.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cofactoryai/textbase/9f41c9c0c064eb8c758a21543d31571b2a75ec9a/assets/test_command.png -------------------------------------------------------------------------------- /assets/textbase-deploy.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cofactoryai/textbase/9f41c9c0c064eb8c758a21543d31571b2a75ec9a/assets/textbase-deploy.gif -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | /node_modules 3 | 4 | # Production 5 | /build 6 | 7 | # Generated files 8 | .docusaurus 9 | .cache-loader 10 | 11 | # Misc 12 | .DS_Store 13 | .env.local 14 | .env.development.local 15 | .env.test.local 16 | .env.production.local 17 | 18 | npm-debug.log* 19 | yarn-debug.log* 20 | yarn-error.log* 21 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Website 2 | 3 | This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator. 4 | 5 | ### Installation 6 | 7 | ``` 8 | $ yarn 9 | ``` 10 | 11 | ### Local Development 12 | 13 | ``` 14 | $ yarn start 15 | ``` 16 | 17 | This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. 18 | 19 | ### Build 20 | 21 | ``` 22 | $ yarn build 23 | ``` 24 | 25 | This command generates static content into the `build` directory and can be served using any static contents hosting service. 26 | 27 | ### Deployment 28 | 29 | Using SSH: 30 | 31 | ``` 32 | $ USE_SSH=true yarn deploy 33 | ``` 34 | 35 | Not using SSH: 36 | 37 | ``` 38 | $ GIT_USER= yarn deploy 39 | ``` 40 | 41 | If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. 42 | -------------------------------------------------------------------------------- /docs/assets/library_server.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cofactoryai/textbase/9f41c9c0c064eb8c758a21543d31571b2a75ec9a/docs/assets/library_server.gif -------------------------------------------------------------------------------- /docs/assets/local_server.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cofactoryai/textbase/9f41c9c0c064eb8c758a21543d31571b2a75ec9a/docs/assets/local_server.gif -------------------------------------------------------------------------------- /docs/assets/mac_zip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cofactoryai/textbase/9f41c9c0c064eb8c758a21543d31571b2a75ec9a/docs/assets/mac_zip.png -------------------------------------------------------------------------------- /docs/assets/select_interpreter.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cofactoryai/textbase/9f41c9c0c064eb8c758a21543d31571b2a75ec9a/docs/assets/select_interpreter.gif -------------------------------------------------------------------------------- /docs/assets/zipping.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cofactoryai/textbase/9f41c9c0c064eb8c758a21543d31571b2a75ec9a/docs/assets/zipping.gif -------------------------------------------------------------------------------- /docs/babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | presets: [require.resolve('@docusaurus/core/lib/babel/preset')], 3 | }; 4 | -------------------------------------------------------------------------------- /docs/docs/FAQs.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 7 3 | --- 4 | 5 | ### Why is my bot deploy failing even though I have followed the correct folder structure? 6 | Make sure that you have an `on_message` function inside your `main.py` file and you also have the `bot()` decorator. 7 | 8 | If you are a MacOS user, make sure that you use the website which we have provided in the [prerequisites](./deployment/prerequisites.md#important-note-for-macos-users) section to zip your files. 9 | 10 | ### Why am I getting a weird axios error when I am trying to deploy my bot using the CLI? 11 | We currently have a two bot limit per user. If you have exceeded that, then you will get this error in the CLI. 12 | 13 | ### Why am I getting an Error: Got unexpected extra argument? 14 | This is because your path has a space in between somewhere and it's considering whatever's there after the space as an entirely extra argument. Check your path and make sure that there are no spaces in between. -------------------------------------------------------------------------------- /docs/docs/deployment/_category_.json: -------------------------------------------------------------------------------- 1 | { 2 | "label": "Deployment", 3 | "position": 4, 4 | "link": { 5 | "type": "generated-index" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /docs/docs/deployment/deploy-from-cli.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 2 3 | --- 4 | 5 | # Deploy from CLI 6 | 7 | ## API key generation 8 | 9 | Before deploying your bot from the CLI, you need to generate an API key in the dashboard. To do that, you need to: 10 | 11 | 1. Navigate to the Textbase [dashboard](https://textbase-dashboard-nextjs.vercel.app/). 12 | 2. Sign in using your google account. 13 | 3. Generate an API key by clicking on `Generate` in the bottom left section. 14 | 15 | ## Deployment 16 | 17 | After this, you can execute the `textbase-client deploy` command to deploy your bot from a terminal. 18 | 19 | After executing it, it will ask for: 20 | 1. Path to the zip folder 21 | 2. Bot name (**IMPORTANT:** can only contain lowercase alphanumeric characters, hyphens, and underscores) 22 | 3. Textbase API key 23 | 24 | If you want to run this command in one shot, you can make use of flags: 25 | 26 | ```bash 27 | textbase-client deploy --path= --bot-name= --api_key= 28 | ``` 29 | 30 | If this command executes successfully, it will return a table with `Status`, `Bot ID` and `URL` and you can click on that URL to view your bot! 31 | -------------------------------------------------------------------------------- /docs/docs/deployment/deploy-from-dashboard.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | --- 4 | 5 | # Deploy from Dashboard 6 | 7 | 1. Navigate to the Textbase [dashboard](https://textbase-dashboard-nextjs.vercel.app/). 8 | 2. Sign in using your google account. 9 | 3. Click on `Create Deployment` and then click on `Create Bot` on the top right. 10 | 4. You will need to provide a chatbot name and you need to upload the zip file. **IMPORTANT:** The bot name can only contain lowercase alphanumeric characters, hyphens, and underscores. 11 | 5. Click on `Create Bot` to start the deployment. 12 | 6. This will redirect you to the `Deployments` section after a few seconds. In here, you can check the status of your bot and if it's deployed successfully, a link will be generated. You can click on the blue button with the symbol: `` which will redirect to the link where the bot is deployed and you can test it out! -------------------------------------------------------------------------------- /docs/docs/deployment/prerequisites.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 1 3 | --- 4 | 5 | # Prerequisites 6 | There are two methods to deploy your bot to the internet so that everyone can use it. 7 | 8 | Before using any method, you need to ensure that: 9 | 1. You have a `requirements.txt` file which includes all the additional requirements which you might have installed while coding up the logic for your bot and it also contains the `textbase-client` requirement. 10 | 2. The name of the file in which the `on_message` function is present is named `main.py`. 11 | 3. You can have your own packages and modules in different folders and have relative imports to them. But the `main.py` and `requirements.txt` must **ABSOLUTELY NOT** be in any folder of their own. Do take a look at the [folder structures](#folder-structure) below for better understanding. 12 | 3. Archive these two (or more) files into a `.zip` archive. It's important that it's a **.zip** archive and not anything else. 13 | 1. You can archive it yourself but we recommend you use the [compress](../usage.md#compress) command available from the CLI. 14 | 2. If you wish to do this by yourself and you are using MacOS, please read the note below. 15 | 16 | ## Important note for MacOS users 17 | Please download the software `RAR Extractor MAX` from App Store 18 | 19 | ![Mac Zip Software](../../assets/mac_zip.png) 20 | 21 | for creating archives as MacOS creates an extra `__MACOSX` folder when compressing using the native compress utility which causes some issues with our backend. 22 | 23 | 24 | ## Folder structure 25 | When you decide to archive the files, please **MAKE SURE** that main.py and requirements.txt are available in the **root** of the archive itself. As in if the zip is extracted, it will produce two (or more) files/folders. 26 | 27 | ![Zip folder](../../assets/zipping.gif) 28 | 29 | ### Good folder structure :white_check_mark: 30 | ``` 31 | your-bot.zip/ 32 | ├── main.py 33 | ├── requirements.txt 34 | └── your_package/ 35 | └── your_module/ 36 | └── sub_module 37 | ``` 38 | ### Bad folder structure 1 :x: 39 | ``` 40 | your-bot.zip/ 41 | └── bot_folder/ 42 | ├── main.py 43 | ├── requirements.txt 44 | └── your_package/ 45 | └── your_module/ 46 | └── sub_module 47 | ``` 48 | 49 | ### Bad folder structure 2 :x: 50 | ``` 51 | your-bot.zip/ 52 | ├── main_folder/ 53 | │ ├── main.py 54 | │ └── requirements.txt 55 | └── your_package/ 56 | └── your_module/ 57 | └── sub_module 58 | ``` -------------------------------------------------------------------------------- /docs/docs/examples/_category_.json: -------------------------------------------------------------------------------- 1 | { 2 | "label": "Examples", 3 | "position": 5, 4 | "link": { 5 | "type": "generated-index" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /docs/docs/examples/dalle-bot.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 5 3 | --- 4 | 5 | # DALL-E bot 6 | This bot makes an API call to OpenAI and processes the user input. It uses DALL-E. 7 | 8 | **You must import the `Image` datatype and wrap your bot_response with it so that the images can be rendered on the chat UI.** 9 | ```py 10 | from typing import List 11 | from textbase import bot, Message 12 | from textbase.models import DallE 13 | from textbase.datatypes import Image 14 | 15 | # Load your OpenAI API key 16 | DallE.api_key = "" 17 | 18 | @bot() 19 | def on_message(message_history: List[Message], state: dict = None): 20 | 21 | # Generate DallE response 22 | bot_response = DallE.generate( 23 | message_history=message_history, # Assuming history is the list of user messages 24 | ) 25 | 26 | return { 27 | "messages": [Image(url=bot_response)], 28 | "state": state 29 | } 30 | ``` -------------------------------------------------------------------------------- /docs/docs/examples/gpt-assistants-bot.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 7 3 | --- 4 | 5 | # GPT Assistants bot 6 | This bot calls Vision API from OpenAI and processes the user image. 7 | 8 | ```py 9 | from typing import List 10 | from textbase import bot, Message 11 | from textbase.models import OpenAI 12 | 13 | # Load your OpenAI API key 14 | OpenAI.api_key = "" 15 | 16 | @bot() 17 | def on_message(message_history: List[Message], state: dict = None): 18 | last_message = message_history[-1]['content'][-1] 19 | text = last_message['value'] 20 | 21 | if ('id' not in state): 22 | state['id'] = OpenAI.create_assistant( 23 | name="Math Tutor", 24 | instructions="You are a personal math tutor. Write and run code to answer math questions.", 25 | tools=[{"type": "code_interpreter"}], 26 | model="gpt-4-1106-preview" 27 | ) 28 | 29 | while(state['id'] != ''): 30 | bot_responses = OpenAI.run_assistant( 31 | message_history=message_history, 32 | text=text, 33 | assistant_id=state['id'] 34 | ) 35 | 36 | return { 37 | "messages": [bot_responses], 38 | "state": state 39 | } 40 | ``` -------------------------------------------------------------------------------- /docs/docs/examples/gpt-vision-bot.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 6 3 | --- 4 | 5 | # GPT Vision bot 6 | This bot calls Vision API from OpenAI and processes the user image. 7 | 8 | ```py 9 | from typing import List 10 | from textbase import bot, Message 11 | from textbase.models import OpenAI 12 | 13 | # Load your OpenAI API key 14 | OpenAI.api_key = "" 15 | 16 | @bot() 17 | def on_message(message_history: List[Message], state: dict = None): 18 | last_message = message_history[-1]['content'][-1] 19 | data_type = last_message['data_type'] 20 | 21 | if data_type == "IMAGE_URL": 22 | bot_response = OpenAI.vision( 23 | message_history=message_history, # Assuming history is the list of user messages 24 | model="gpt-4-vision-preview", 25 | ) 26 | elif data_type == "STRING": 27 | bot_response = OpenAI.vision( 28 | message_history=message_history, # Assuming history is the list of user messages 29 | model="gpt-4-vision-preview", 30 | ) 31 | 32 | return { 33 | "messages": [bot_response], 34 | "state": state 35 | } 36 | ``` -------------------------------------------------------------------------------- /docs/docs/examples/huggingface-bot.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | --- 4 | 5 | # HuggingFace bot 6 | 7 | This bot makes an API call to OpenAI and processes the user input. It uses Microsoft's [DialoGPT-large](https://huggingface.co/microsoft/DialoGPT-large) model. 8 | 9 | ```py 10 | from typing import List 11 | from textbase import bot, Message 12 | from textbase.models import HuggingFace 13 | 14 | # Load your HuggingFace API key 15 | HuggingFace.api_key = "" 16 | 17 | # Prompt for GPT-3.5 Turbo 18 | SYSTEM_PROMPT = """You are chatting with an AI. There are no specific prefixes for responses, so you can ask or talk about anything you like. 19 | The AI will respond in a natural, conversational manner. Feel free to start the conversation with any question or topic, and let's have a 20 | pleasant chat! 21 | """ 22 | 23 | @bot() 24 | def on_message(message_history: List[Message], state: dict = None): 25 | 26 | # Generate HuggingFace response. Uses the DialoGPT-large model from Microsoft by default. 27 | bot_response = HuggingFace.generate( 28 | system_prompt=SYSTEM_PROMPT, 29 | message_history=message_history, # Assuming history is the list of user messages 30 | ) 31 | 32 | return { 33 | "messages": [bot_response], 34 | "state": state 35 | } 36 | ``` -------------------------------------------------------------------------------- /docs/docs/examples/mimicking-bot.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 1 3 | --- 4 | 5 | # Mimicking bot 6 | 7 | This bot just returns whatever the user has typed in. 8 | 9 | ```py 10 | from typing import List 11 | from textbase import bot, Message 12 | 13 | @bot() 14 | def on_message(message_history: List[Message], state: dict = None): 15 | 16 | # Mimic user's response 17 | bot_response = [message["value"] for message in message_history[-1]["content"]] 18 | 19 | # message_history[-1]["content"] structure is 20 | 21 | # [ 22 | # { 23 | # "data_type": "STRING", 24 | # "value": "" 25 | # } 26 | # ] 27 | 28 | return { 29 | "messages": bot_response, 30 | "state": state 31 | } 32 | ``` -------------------------------------------------------------------------------- /docs/docs/examples/openai-bot.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 2 3 | --- 4 | 5 | # Open AI bot 6 | 7 | This bot makes an API call to OpenAI and processes the user input. It uses GPT-3.5 Turbo. 8 | 9 | ```py 10 | from typing import List 11 | from textbase import bot, Message 12 | from textbase.models import OpenAI 13 | 14 | # Load your OpenAI API key 15 | OpenAI.api_key = "" 16 | 17 | # Prompt for GPT-3.5 Turbo 18 | SYSTEM_PROMPT = """You are chatting with an AI. There are no specific prefixes for responses, so you can ask or talk about anything you like. 19 | The AI will respond in a natural, conversational manner. Feel free to start the conversation with any question or topic, and let's have a 20 | pleasant chat! 21 | """ 22 | 23 | @bot() 24 | def on_message(message_history: List[Message], state: dict = None): 25 | 26 | # Generate GPT-3.5 Turbo response 27 | bot_response = OpenAI.generate( 28 | system_prompt=SYSTEM_PROMPT, 29 | message_history=message_history, # Assuming history is the list of user messages 30 | model="gpt-3.5-turbo", 31 | ) 32 | 33 | return { 34 | "messages": [bot_response], 35 | "state": state 36 | } 37 | ``` -------------------------------------------------------------------------------- /docs/docs/examples/palmai-bot.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 4 3 | --- 4 | 5 | # Google PaLM AI bot 6 | 7 | This bot makes an API call to PaLMAI and processes the user input. It uses PaLM Chat. 8 | 9 | ```py 10 | from typing import List 11 | from textbase import bot, Message 12 | from textbase.models import PalmAI 13 | 14 | # Load your PALM API key 15 | PalmAI.api_key = "" 16 | 17 | @bot() 18 | def on_message(message_history: List[Message], state: dict = None): 19 | 20 | bot_response = PalmAI.generate( 21 | message_history=message_history, # Assuming history is the list of user messages 22 | ) 23 | 24 | return { 25 | "messages": [bot_response], 26 | "state": state 27 | } 28 | ``` -------------------------------------------------------------------------------- /docs/docs/get-started/_category_.json: -------------------------------------------------------------------------------- 1 | { 2 | "label": "Get Started", 3 | "position": 2, 4 | "link": { 5 | "type": "generated-index", 6 | "description": "Get started with TextBase!" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /docs/docs/get-started/bot-example-with-response-structure.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 4 3 | --- 4 | # Bot examples with response structures 5 | ## Bot example for text generation 6 | This particular example uses OpenAI's API. You can use your own or you can even integrate some in the project itself. We are open for contributions! 7 | ```py 8 | from typing import List 9 | from textbase import bot, Message 10 | from textbase.models import OpenAI 11 | 12 | # Load your OpenAI API key 13 | OpenAI.api_key = "" 14 | 15 | # Prompt for GPT-3.5 Turbo 16 | SYSTEM_PROMPT = """You are chatting with an AI. There are no specific prefixes for responses, so you can ask or talk about anything you like. 17 | The AI will respond in a natural, conversational manner. Feel free to start the conversation with any question or topic, and let's have a 18 | pleasant chat! 19 | """ 20 | 21 | @bot() 22 | def on_message(message_history: List[Message], state: dict = None): 23 | 24 | # Generate GPT-3.5 Turbo response 25 | bot_response = OpenAI.generate( 26 | system_prompt=SYSTEM_PROMPT, 27 | message_history=message_history, # Assuming history is the list of user messages 28 | model="gpt-3.5-turbo", 29 | ) 30 | 31 | return { 32 | "messages": [bot_response], 33 | "state": state 34 | } 35 | ``` 36 | 37 | ## Bot example for image generation 38 | This particular example uses DALL-E's API. You can use your own or you can even integrate some in the project itself. We are open for contributions! 39 | 40 | **You must import the `Image` datatype and wrap your bot_response with it so that the images can be rendered on the chat UI.** 41 | ```py 42 | from typing import List 43 | from textbase import bot, Message 44 | from textbase.models import DallE 45 | from textbase.datatypes import Image 46 | 47 | # Load your OpenAI API key 48 | DallE.api_key = "" 49 | 50 | @bot() 51 | def on_message(message_history: List[Message], state: dict = None): 52 | 53 | # Generate DallE response 54 | bot_response = DallE.generate( 55 | message_history=message_history, # Assuming history is the list of user messages 56 | ) 57 | 58 | return { 59 | "messages": [Image(url=bot_response)], 60 | "state": state 61 | } 62 | ``` 63 | 64 | ## Bot example for text _and_ image generation 65 | This example uses both OpenAI _and_ DALL-E's API. You can use your own or you can even integrate some in the project itself. We are open for contributions! 66 | 67 | **You must import the `Image` datatype and wrap your bot_response with it so that the images can be rendered on the chat UI.** 68 | ```py 69 | from typing import List 70 | from textbase import bot, Message 71 | from textbase.models import OpenAI, DallE 72 | from textbase.datatypes import Image 73 | 74 | # Load your OpenAI API key 75 | OpenAI.api_key = DallE.api_key = "" 76 | 77 | # Prompt for GPT-3.5 Turbo 78 | SYSTEM_PROMPT = """You are chatting with an AI. There are no specific prefixes for responses, so you can ask or talk about anything you like. 79 | The AI will respond in a natural, conversational manner. Feel free to start the conversation with any question or topic, and let's have a 80 | pleasant chat! 81 | """ 82 | 83 | @bot() 84 | def on_message(message_history: List[Message], state: dict = None): 85 | 86 | last_message = message_history[-1]['content'][-1] 87 | data_type = last_message['data_type'] 88 | 89 | # Generate GPT-3.5 Turbo response 90 | if data_type == 'STRING': 91 | bot_response = OpenAI.generate( 92 | system_prompt=SYSTEM_PROMPT, 93 | message_history=message_history, # Assuming history is the list of user messages 94 | model="gpt-3.5-turbo", 95 | ) 96 | 97 | # Generate similar images based on the image uploaded by the user 98 | # Note that we are wrapping it around the Image datatype 99 | elif data_type == 'IMAGE_URL': 100 | bot_response = Image(DallE.generate_variations( 101 | message_history=message_history, 102 | size="1024x1024", 103 | )) 104 | 105 | return { 106 | "messages": [bot_response], 107 | "state": state 108 | } 109 | ``` -------------------------------------------------------------------------------- /docs/docs/get-started/create-your-first-bot.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 2 3 | --- 4 | 5 | # Create your first bot 6 | Let's get started on creating your first bot. 7 | 8 | You can make your own model using NLP and ML or you can make use of one of our inbuilt models. To use our inbuilt models, you can import the `models` module. 9 | 10 | #### Currently we support: 11 | #### Text generation 12 | - [OpenAI](../examples/openai-bot.md) 13 | - [HuggingFace](../examples/huggingface-bot.md) ([Microsoft/dialoGPT-large](https://huggingface.co/microsoft/DialoGPT-large)) 14 | - BotLibre 15 | - [PaLM AI](../examples/palmai-bot.md) 16 | #### Image generation 17 | - [DALL-E](../examples/dalle-bot.md) -------------------------------------------------------------------------------- /docs/docs/get-started/expected-bot-response.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | --- 4 | 5 | # Expected bot response 6 | ## `datatypes` module 7 | The `datatypes` module provides three different classes which act as wrappers for different data types. 8 | 9 | ### `Image` data type 10 | #### EXAMPLE: 11 | ```py 12 | from textbase.datatypes import Image 13 | 14 | # bot logic 15 | 16 | return { 17 | "messages": [Image(url=bot_response)], 18 | "state": state 19 | } 20 | ``` 21 | #### PARAMETERS: 22 | - **url** - A publicly hosted URL for an image file. 23 | - **pil_image** - A PIL image object. 24 | - **path** - A path to an image file on your computer. 25 | 26 | **NOTE**: All the above parameters are *mutually exclusive*. 27 | 28 | #### RETURNS: 29 | - A custom URL if a `pil_image` or a `path` is provided. 30 | - The URL itself if `url` is provided. 31 | 32 | #### RAISES: 33 | - **TypeError** - If two or more parameters are provided simultaneously or if the PIL image is not valid. 34 | - **FileNotFoundError** - If the image file path is invalid. 35 | 36 | ### `Video` data type 37 | #### EXAMPLE: 38 | ```py 39 | from textbase.datatypes import Video 40 | 41 | # bot logic 42 | 43 | return { 44 | "messages": [Video(path="path/to/video/file.mp4")], 45 | "state": state 46 | } 47 | ``` 48 | #### PARAMETERS: 49 | - **url** - A publicly hosted URL for a video file. 50 | - **path** - A path to a video file on your computer. 51 | 52 | **NOTE**: All the above parameters are *mutually exclusive*. 53 | 54 | #### RETURNS: 55 | - A custom URL if a `path` is provided. 56 | - The URL itself if `url` is provided. 57 | 58 | #### RAISES: 59 | - **FileNotFoundError** - If the video file path is invalid. 60 | 61 | ### `Audio` data type 62 | #### EXAMPLE: 63 | ```py 64 | from textbase.datatypes import Audio 65 | 66 | # bot logic 67 | 68 | return { 69 | "messages": [Audio(path="path/to/audio/file.mp3")], 70 | "state": state 71 | } 72 | ``` 73 | #### PARAMETERS: 74 | - **url** - A publicly hosted URL for an audio file. 75 | - **path** - A path to an audio file on your computer. 76 | 77 | **NOTE**: All the above parameters are *mutually exclusive*. 78 | 79 | #### RETURNS: 80 | - A custom URL if a `path` is provided. 81 | - The URL itself if `url` is provided. 82 | 83 | #### RAISES: 84 | - **FileNotFoundError** - If the audio file path is invalid. 85 | 86 | ### `File` data type 87 | #### EXAMPLE: 88 | ```py 89 | from textbase.datatypes import File 90 | 91 | # bot logic 92 | 93 | return { 94 | "messages": [File(path="path/to/file.pdf")], 95 | "state": state 96 | } 97 | ``` 98 | #### PARAMETERS: 99 | - **url** - A publicly hosted URL for a file. 100 | - **path** - A path to a file on your computer. 101 | 102 | **NOTE**: All the above parameters are *mutually exclusive*. 103 | 104 | #### RETURNS: 105 | - A custom URL if a `path` is provided. 106 | - The URL itself if `url` is provided. 107 | 108 | #### RAISES: 109 | - **FileNotFoundError** - If the file path is invalid. 110 | -------------------------------------------------------------------------------- /docs/docs/get-started/installation.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 1 3 | --- 4 | 5 | # Installation 6 | 7 | ## Using PyPI 8 | 9 | 1. Make sure to [upgrade/install](https://www.python.org/downloads/) your `Python` installation to a `version >= 3.9` and add it to your `PATH` while installing. 10 | 2. Run `pip install textbase-client`. 11 | 12 | For the usage of commands through this library, refer to the [usage](../usage.md) section. 13 | 14 | ## Local installation of the library 15 | 16 | Before getting started with the instructions for installing the library locally, make sure that you have done this prerequisite steps: 17 | 18 | 1. Fork the repository 19 | 2. Clone the repo 20 | 3. Open the repo in your VS Code 21 | 4. Install the VS Code extension for Python if not already done. 22 | 23 | ### [Video guide](https://youtu.be/ChGp44kQ7jY) for Windows 24 | 25 | ### Guide for Windows 26 | 27 | 1. Make sure to [upgrade/install](https://www.python.org/downloads/) your `Python` installation to a `version >= 3.9` and add it to your `PATH` while installing. 28 | 2. Now, you need to [install](https://python-poetry.org/docs/#installation) `Poetry`, which is a python dependency manager which makes your life easier. To do so, you can just run 29 | ```bash 30 | pip install poetry 31 | ``` 32 | 3. Use the keyboard combo Ctrl + Shift + P and search for `Terminal: Select Default Profile` and select `Command Prompt`. 33 | 4. You can open the VS Code terminal using Ctrl + \` and then run the command below in the VSCode terminal inside the folder where you have cloned textbase repo. This makes sure that Poetry makes a virtual environment inside your current directory. This command does not give you any output. 34 | ```bash 35 | poetry config virtualenvs.in-project true 36 | ``` 37 | 5. `poetry shell` 38 | 6. Use the keyboard combo Ctrl + Shift + P and select `Python: Select Interpreter` (you can type this option in if it's not visible) and make sure after running this you have selected the Poetry interpreter. 39 | ![Select interpreter](../../assets/select_interpreter.gif) 40 | 7. `poetry install` to install the required dependencies. 41 | 42 | ### [Video guide](https://youtu.be/r7G-RlVq_Ec) for Linux (for Ubuntu, version >19.04) 43 | 44 | ### Guide for Linux (for Ubuntu, version >19.04) and MacOS 45 | 46 | 1. Make sure to [upgrade/install](https://www.python.org/downloads/) your `Python` installation to a `version >= 3.9` and add it to your `PATH` while installing. 47 | 2. Now, you need to [install](https://python-poetry.org/docs/#installation) `Poetry`, which is a python dependency manager which makes your life easier. To do so, you can just run 48 | ```bash 49 | pip install poetry 50 | ``` 51 | 3. Add it to your path using: 52 | ```bash 53 | export PATH="$HOME/.local/bin:$PATH" 54 | ``` 55 | 4. You can open the VS Code terminal using Ctrl + \` (Mac: + \`) and then run the command below in the VSCode terminal inside the folder where you have cloned textbase repo. This makes sure that Poetry makes a virtual environment inside your current directory. This command does not give you any output. 56 | ```bash 57 | poetry config virtualenvs.in-project true 58 | ``` 59 | 5. `poetry shell` 60 | 6. Use the keyboard combo Ctrl + Shift + P (Mac: + Shift + P) and select `Python: Select Interpreter` (you can type this option in if it's not visible) and make sure after running this you have selected the Poetry interpreter. 61 | ![Select interpreter](../../assets/select_interpreter.gif) 62 | 7. `poetry install` to install the required dependencies. 63 | 64 | ### Guide for Ubuntu (≤19.04) 65 | 66 | 1. Follow this [guide](https://gist.github.com/basaks/652eea861a143a9b3d11805c96273488) to install `Python version 3.9`. 67 | 2. Install pip using 68 | ```bash 69 | sudo apt install python-pip 70 | ``` 71 | 3. Install poetry using 72 | ```bash 73 | pip install poetry 74 | ``` 75 | 4. Add it to your path using 76 | ```bash 77 | export PATH="$HOME/.local/bin:$PATH" 78 | ``` 79 | 5. You can open the VS Code terminal using Ctrl + \` (Mac: + \`) and then run the command: `poetry config virtualenvs.in-project true` in the VS Code terminal inside the folder where you have cloned textbase repo so that Poetry makes a `.venv` folder inside your current project directory. This command does not give you any output. 80 | 6. Running the commands below will make a new virtual Python environment inside the current directory and then you can select the default python interpreter to be the one in the `.venv` folder. 81 | ```bash 82 | cd textbase 83 | poetry shell 84 | ``` 85 | 7. In order to select the default python interpreter in VS Code, Ctrl + Shift + P (Mac: + Shift + P) and select `Python: Select Interpreter` (you can type this option in if it's not visible) and make sure after running this you have selected the Poetry interpreter. 86 | ![Select interpreter](../../assets/select_interpreter.gif) 87 | 8. `poetry install` to install all the required dependencies. -------------------------------------------------------------------------------- /docs/docs/get-started/test-locally.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 5 3 | --- 4 | 5 | # Test locally 6 | **Regardless of the method of installation, both of these commands will ask for a path to your `main.py` file. When giving this path, make sure that the path is *NOT* enclosed within quotes.** 7 | 8 | Running any of the above commands will spin up a local server and a URL will be printed in your terminal. You can navigate to this URL to check if your bot works and interact with it! 9 | ## If you have installed the library from PyPI 10 | ```bash 11 | textbase-client test 12 | ``` 13 | ![](../../assets/library_server.gif) 14 | ## If you have cloned the repo 15 | For Linux and MacOS 16 | ```bash 17 | poetry run python3 textbase/textbase_cli.py test 18 | ``` 19 | For Windows 20 | ```bash 21 | poetry run python textbase/textbase_cli.py test 22 | ``` 23 | -------------------------------------------------------------------------------- /docs/docs/intro.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 1 3 | slug: / 4 | --- 5 | 6 | # TextBase 7 | 8 | ## What is textbase? 9 | :sparkles: Textbase is a framework for building chatbots using NLP and ML. :sparkles: 10 | 11 | Since it's just Python you can use any model, library, vector database and API you want! 12 | 13 | Coming soon: 14 | - [x] [PyPI package](https://pypi.org/project/textbase-client/) 15 | - [x] Easy web deployment via [textbase-client deploy](../docs/deployment/deploy-from-cli.md) 16 | - [ ] SMS integration 17 | - [ ] Native integration of other models (Claude, Llama, ...) -------------------------------------------------------------------------------- /docs/docs/usage.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | --- 4 | 5 | # Usage 6 | You can use execute these commands after installing the `textbase-client` package. 7 | 8 | ## init 9 | Before executing this command, make sure that you have installed `textbase-client` using `pip` as mentioned [here](./get-started/installation.md) 10 | Run this command to create a basic setup of your project 11 | ```bash 12 | textbase-client init 13 | ``` 14 | If you wish to run this in one go, you can make use of the `--project_name` flag 15 | ```bash 16 | textbase-client init --project_name= 17 | ``` 18 | 19 | ## test 20 | Before executing this command, make sure that: 21 | 1. The directory in which your `main.py` file is in, **DOES NOT** have any spaces. 22 | 2. You have a `main.py` file akin to the ones provided in the [examples](./category/examples) section. 23 | 24 | This will start a local server and will give you a link which you can navigate to and test your bot there. 25 | ```bash 26 | textbase-client test 27 | ``` 28 | If you wish to run this in one go, you can make use of the `--path` and `--port` flags 29 | ```bash 30 | textbase-client test --path= 31 | ``` 32 | **If you wish to use the `--path` flag, make sure you have your path inside quotes.** 33 | 34 | ```bash 35 | textbase-client test --port=8080 36 | ``` 37 | **Port 8080 is the default, but it's crucial to note that it's frequently used. If you have it open for another application, this flag lets you alter the backend server's port to prevent conflicts.** 38 | 39 | ## compress 40 | Before executing this command, make sure that 41 | 1. You have a `main.py` and `requirements.txt` file in the path that you have given to compress. 42 | 2. Your `requirements.txt` has the `textbase-client` requirement. 43 | ```bash 44 | textbase-client compress 45 | ``` 46 | If you wish to run this in one go, you can make use of the `--path` flag 47 | ```bash 48 | textbase-client compress --path= 49 | ``` 50 | 51 | ## deploy 52 | Before executing this command, make sure that 53 | 1. You have a `.zip` file which is made according to the instructions and folder structure given in the 54 | [prerequisites](./deployment/prerequisites.md) section. 55 | 2. The path where this zip file is contained **DOES NOT** have any spaces. 56 | 3. You have an Textbase API key. This can be generated in the [dashboard](https://textbase-dashboard-nextjs.vercel.app/), guide for which is given in the [deployment](./deployment/deploy-from-cli.md#api-key-generation) section. 57 | 58 | ### memory option 59 | The `--memory` option can be used to specify the memory for your bot. You can choose between three options. All values are in megabytes: 60 | - 256 (default) 61 | - 512 62 | - 1024 63 | 64 | ### disable_logs flag 65 | The `--disable_logs` flag can be used if you want to disable the logs after deploying the bot. 66 | 67 | ### NOTE 68 | Executing this command will ask the name for your bot as well. There is a naming convention to be followed for that: the bot name can only contain **lowercase alphanumeric characters, hyphens, and underscores**. 69 | ```bash 70 | textbase-client deploy 71 | ``` 72 | If you wish to run this in one go, you can make use of the `--path`, `--bot-name`, `--memory` and `--api_key` flags 73 | ```bash 74 | textbase-client deploy --path= --bot-name= --memory=<256/512/1024> --api_key= 75 | ``` 76 | **If you wish to use the `--path` flag, make sure you have your path inside quotes.** 77 | 78 | ## health 79 | Before executing this command, make sure that 80 | 1. You have the bot ID of which you are trying to check the health of. You can get the Bot ID in the `Deployments` section of the [dashboard](https://textbase-dashboard-nextjs.vercel.app/) or by executing the [list](#list) command. 81 | 2. You have an Textbase API key. This can be generated in the [dashboard](https://textbase-dashboard-nextjs.vercel.app/), guide for which is given in the [deployment](./deployment/deploy-from-cli.md#api-key-generation) section. 82 | ```bash 83 | textbase-client health 84 | ``` 85 | If you wish to run this in one go, you can make use of the `--bot_id` and `--api_key` flag 86 | ```bash 87 | textbase-client health --bot_id= --api_key= 88 | ``` 89 | 90 | ## list 91 | This will ask you for your API key, which can be generated in the [dashboard](https://textbase-dashboard-nextjs.vercel.app/), guide for which is given in the [deployment](./deployment/deploy-from-cli.md#api-key-generation), and on successful validation will return the list of the bots that you have deployed along with their bot ID and link. 92 | ```bash 93 | textbase-client list 94 | ``` 95 | If you wish to run this in one go, you can make use of the `--api_key` flag 96 | ```bash 97 | textbase-client list --api_key= 98 | ``` 99 | 100 | ## delete 101 | Before executing this command, make sure that 102 | 1. You have the bot ID of which you are trying to check the health of. You can get the Bot ID in the `Deployments` section of the [dashboard](https://textbase-dashboard-nextjs.vercel.app/) or by executing the [list](#list) command. 103 | 2. You have an Textbase API key. This can be generated in the [dashboard](https://textbase-dashboard-nextjs.vercel.app/), guide for which is given in the [deployment](./deployment/deploy-from-cli.md#api-key-generation) section. 104 | ```bash 105 | textbase-client delete 106 | ``` 107 | If you wish to run this in one go, you can make use of the `--bot_id` and `--api_key` flag 108 | ```bash 109 | textbase-client delete --bot_id= --api_key= 110 | ``` 111 | 112 | ## logs 113 | Before executing this command, make sure that 114 | 1. You have the bot name of which you are trying to check the logs of. You can get the Bot Name in the `Deployments` section of the [dashboard](https://www.textbase.ai/deployment) or by executing the [list](#list) command. 115 | 2. You have an Textbase API key. This can be generated in the [dashboard](https://textbase.ai/), guide for which is given in the [deployment](./deployment/deploy-from-cli.md#api-key-generation) section. 116 | 3. You have to enter the `start_time` which means `for how many minutes before now do you want to see the logs of?`. While running the command you will be asked like this:- 117 | `Logs for previous ___ minutes [5]:` If you enter nothing by default it'll fetch the logs of last 5 minutes, if you enter (let's say) 15, it will fetch you the logs for last 15 mins. 118 | ```bash 119 | textbase-client logs 120 | ``` 121 | If you wish to run this in one go, you can make use of the `--bot_name`, `--api_key` and `start_time` flag 122 | ```bash 123 | textbase-client logs --bot_name= --api_key= --start_time="how many mins in the past do you want to see the logs of" 124 | ``` 125 | 126 | ## download 127 | This command lets you download the zip file that you used to create and deploy your bot. It might come in handy when you want to see which files you used to create a previously deployed bot in the past. 128 | Before executing this command, make sure that 129 | 1. You have the bot name of which you are trying to check the logs of. You can get the Bot Name in the `Deployments` section of the [dashboard](https://www.textbase.ai/deployment) or by executing the [list](#list) command. 130 | 2. You have an Textbase API key. This can be generated in the [dashboard](https://textbase.ai/), guide for which is given in the [deployment](./deployment/deploy-from-cli.md#api-key-generation) section. 131 | ```bash 132 | textbase-client download 133 | ``` 134 | If you wish to run this in one go, you can make use of the `--bot_name` and `--api_key` flag 135 | ```bash 136 | textbase-client download --bot_name= --api_key= 137 | ``` 138 | The zip file will be downloaded in your root directory. -------------------------------------------------------------------------------- /docs/docusaurus.config.js: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | // Note: type annotations allow type checking and IDEs autocompletion 3 | 4 | const lightCodeTheme = require('prism-react-renderer/themes/github'); 5 | const darkCodeTheme = require('prism-react-renderer/themes/dracula'); 6 | 7 | /** @type {import('@docusaurus/types').Config} */ 8 | const config = { 9 | title: 'TextBase', 10 | tagline: 'Hotel, Trivago; Chatbot, TextBase', 11 | favicon: 'img/favicon.png', 12 | 13 | // Set the production url of your site here 14 | url: 'https://textbase-docs.com', 15 | // Set the // pathname under which your site is served 16 | // For GitHub pages deployment, it is often '//' 17 | baseUrl: '/', 18 | 19 | // GitHub pages deployment config. 20 | // If you aren't using GitHub pages, you don't need these. 21 | organizationName: 'cofactory', // Usually your GitHub org/user name. 22 | projectName: 'textbase-client', // Usually your repo name. 23 | 24 | onBrokenLinks: 'throw', 25 | onBrokenMarkdownLinks: 'warn', 26 | 27 | // Even if you don't use internalization, you can use this field to set useful 28 | // metadata like html lang. For example, if your site is Chinese, you may want 29 | // to replace "en" with "zh-Hans". 30 | i18n: { 31 | defaultLocale: 'en', 32 | locales: ['en'], 33 | }, 34 | 35 | presets: [ 36 | [ 37 | 'classic', 38 | /** @type {import('@docusaurus/preset-classic').Options} */ 39 | ({ 40 | docs: { 41 | routeBasePath: '/', 42 | sidebarPath: require.resolve('./sidebars.js'), 43 | // Please change this to your repo. 44 | // Remove this to remove the "edit this page" links. 45 | editUrl: 46 | 'https://github.com/cofactoryai/textbase/docs', 47 | }, 48 | theme: { 49 | customCss: require.resolve('./src/css/custom.css'), 50 | }, 51 | }), 52 | ], 53 | ], 54 | 55 | themeConfig: 56 | /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ 57 | ({ 58 | navbar: { 59 | logo: { 60 | alt: 'TextBase Logo', 61 | src: 'img/logo.svg', 62 | }, 63 | items: [ 64 | { 65 | type: 'docSidebar', 66 | sidebarId: 'tutorialSidebar', 67 | position: 'left', 68 | label: 'Docs', 69 | }, 70 | { 71 | href: 'https://github.com/cofactoryai/textbase', 72 | label: 'GitHub', 73 | position: 'right', 74 | }, 75 | ], 76 | }, 77 | footer: { 78 | style: 'dark', 79 | links: [ 80 | { 81 | title: 'Docs', 82 | items: [ 83 | { 84 | label: 'Tutorial', 85 | to: '/', 86 | }, 87 | ], 88 | }, 89 | { 90 | title: 'Community', 91 | items: [ 92 | { 93 | label: 'Discord', 94 | href: 'https://discordapp.com/invite/docusaurus', 95 | } 96 | ], 97 | }, 98 | { 99 | title: 'More', 100 | items: [ 101 | { 102 | label: 'GitHub', 103 | href: 'https://github.com/cofactoryai/textbase', 104 | }, 105 | ], 106 | }, 107 | ], 108 | copyright: `Copyright © ${new Date().getFullYear()} TextBase, Inc. Built with Docusaurus.`, 109 | }, 110 | prism: { 111 | theme: lightCodeTheme, 112 | darkTheme: darkCodeTheme, 113 | }, 114 | colorMode: { 115 | defaultMode: 'dark' 116 | } 117 | }), 118 | }; 119 | 120 | module.exports = config; 121 | -------------------------------------------------------------------------------- /docs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "textbase-doc", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "docusaurus": "docusaurus", 7 | "start": "docusaurus start", 8 | "build": "docusaurus build", 9 | "swizzle": "docusaurus swizzle", 10 | "deploy": "docusaurus deploy", 11 | "clear": "docusaurus clear", 12 | "serve": "docusaurus serve", 13 | "write-translations": "docusaurus write-translations", 14 | "write-heading-ids": "docusaurus write-heading-ids", 15 | "typecheck": "tsc" 16 | }, 17 | "dependencies": { 18 | "@docusaurus/core": "2.4.1", 19 | "@docusaurus/preset-classic": "2.4.1", 20 | "@mdx-js/react": "^1.6.22", 21 | "clsx": "^1.2.1", 22 | "prism-react-renderer": "^1.3.5", 23 | "react": "^17.0.2", 24 | "react-dom": "^17.0.2" 25 | }, 26 | "devDependencies": { 27 | "@docusaurus/module-type-aliases": "2.4.1", 28 | "@tsconfig/docusaurus": "^1.0.5", 29 | "typescript": "^4.7.4" 30 | }, 31 | "browserslist": { 32 | "production": [ 33 | ">0.5%", 34 | "not dead", 35 | "not op_mini all" 36 | ], 37 | "development": [ 38 | "last 1 chrome version", 39 | "last 1 firefox version", 40 | "last 1 safari version" 41 | ] 42 | }, 43 | "engines": { 44 | "node": ">=16.14" 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /docs/sidebars.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Creating a sidebar enables you to: 3 | - create an ordered group of docs 4 | - render a sidebar for each doc of that group 5 | - provide next/previous navigation 6 | 7 | The sidebars can be generated from the filesystem, or explicitly defined here. 8 | 9 | Create as many sidebars as you want. 10 | */ 11 | 12 | // @ts-check 13 | 14 | /** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ 15 | const sidebars = { 16 | // By default, Docusaurus generates a sidebar from the docs folder structure 17 | tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], 18 | 19 | // But you can create a sidebar manually 20 | /* 21 | tutorialSidebar: [ 22 | 'intro', 23 | 'hello', 24 | { 25 | type: 'category', 26 | label: 'Tutorial', 27 | items: ['tutorial-basics/create-a-document'], 28 | }, 29 | ], 30 | */ 31 | }; 32 | 33 | module.exports = sidebars; 34 | -------------------------------------------------------------------------------- /docs/src/css/custom.css: -------------------------------------------------------------------------------- 1 | /** 2 | * Any CSS included here will be global. The classic template 3 | * bundles Infima by default. Infima is a CSS framework designed to 4 | * work well for content-centric websites. 5 | */ 6 | 7 | /* You can override the default Infima variables here. */ 8 | :root { 9 | --ifm-color-primary: #2e8555; 10 | --ifm-color-primary-dark: #29784c; 11 | --ifm-color-primary-darker: #277148; 12 | --ifm-color-primary-darkest: #205d3b; 13 | --ifm-color-primary-light: #33925d; 14 | --ifm-color-primary-lighter: #359962; 15 | --ifm-color-primary-lightest: #3cad6e; 16 | --ifm-code-font-size: 95%; 17 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); 18 | } 19 | 20 | /* For readability concerns, you should choose a lighter palette in dark mode. */ 21 | [data-theme='dark'] { 22 | --ifm-color-primary: #25c2a0; 23 | --ifm-color-primary-dark: #21af90; 24 | --ifm-color-primary-darker: #1fa588; 25 | --ifm-color-primary-darkest: #1a8870; 26 | --ifm-color-primary-light: #29d5b0; 27 | --ifm-color-primary-lighter: #32d8b4; 28 | --ifm-color-primary-lightest: #4fddbf; 29 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); 30 | } 31 | -------------------------------------------------------------------------------- /docs/src/pages/index.module.css: -------------------------------------------------------------------------------- 1 | /** 2 | * CSS files with the .module.css suffix will be treated as CSS modules 3 | * and scoped locally. 4 | */ 5 | 6 | .heroBanner { 7 | padding: 4rem 0; 8 | text-align: center; 9 | position: relative; 10 | overflow: hidden; 11 | } 12 | 13 | @media screen and (max-width: 996px) { 14 | .heroBanner { 15 | padding: 2rem; 16 | } 17 | } 18 | 19 | .buttons { 20 | display: flex; 21 | align-items: center; 22 | justify-content: center; 23 | } 24 | -------------------------------------------------------------------------------- /docs/static/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cofactoryai/textbase/9f41c9c0c064eb8c758a21543d31571b2a75ec9a/docs/static/.nojekyll -------------------------------------------------------------------------------- /docs/static/img/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cofactoryai/textbase/9f41c9c0c064eb8c758a21543d31571b2a75ec9a/docs/static/img/favicon.png -------------------------------------------------------------------------------- /docs/static/img/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /docs/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | // This file is not used in compilation. It is here just for a nice editor experience. 3 | "extends": "@tsconfig/docusaurus/tsconfig.json", 4 | "compilerOptions": { 5 | "baseUrl": "." 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /examples/dalle-bot/main.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from textbase import bot, Message 3 | from textbase.models import DallE 4 | from textbase.datatypes import Image 5 | 6 | # Load your OpenAI API key 7 | DallE.api_key = "" 8 | 9 | @bot() 10 | def on_message(message_history: List[Message], state: dict = None): 11 | 12 | # Generate DallE response 13 | bot_response = DallE.generate( 14 | message_history=message_history, # Assuming history is the list of user messages 15 | ) 16 | 17 | return { 18 | "messages": [Image(url=bot_response)], 19 | "state": state 20 | } 21 | -------------------------------------------------------------------------------- /examples/gpt-assistants-bot/main.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from textbase import bot, Message 3 | from textbase.models import OpenAI 4 | 5 | # Load your OpenAI API key 6 | OpenAI.api_key = "" 7 | 8 | @bot() 9 | def on_message(message_history: List[Message], state: dict = None): 10 | last_message = message_history[-1]['content'][-1] 11 | text = last_message['value'] 12 | 13 | if ('id' not in state): 14 | state['id'] = OpenAI.create_assistant( 15 | name="Math Tutor", 16 | instructions="You are a personal math tutor. Write and run code to answer math questions.", 17 | tools=[{"type": "code_interpreter"}], 18 | model="gpt-4-1106-preview" 19 | ) 20 | 21 | while(state['id'] != ''): 22 | bot_responses = OpenAI.run_assistant( 23 | message_history=message_history, 24 | text=text, 25 | assistant_id=state['id'] 26 | ) 27 | 28 | return { 29 | "messages": [bot_responses], 30 | "state": state 31 | } -------------------------------------------------------------------------------- /examples/gpt-vision-bot/main.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from textbase import bot, Message 3 | from textbase.models import OpenAI 4 | 5 | # Load your OpenAI API key 6 | OpenAI.api_key = "" 7 | 8 | @bot() 9 | def on_message(message_history: List[Message], state: dict = None): 10 | last_message = message_history[-1]['content'][-1] 11 | data_type = last_message['data_type'] 12 | 13 | if data_type == "IMAGE_URL": 14 | bot_response = OpenAI.vision( 15 | message_history=message_history, # Assuming history is the list of user messages 16 | model="gpt-4-vision-preview", 17 | ) 18 | elif data_type == "STRING": 19 | bot_response = OpenAI.vision( 20 | message_history=message_history, # Assuming history is the list of user messages 21 | model="gpt-4-vision-preview", 22 | ) 23 | 24 | return { 25 | "messages": [bot_response], 26 | "state": state 27 | } 28 | -------------------------------------------------------------------------------- /examples/huggingface-bot/main.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from textbase import bot, Message 3 | from textbase.models import HuggingFace 4 | 5 | # Load your HuggingFace API key 6 | HuggingFace.api_key = "" 7 | 8 | # Prompt for GPT-3.5 Turbo 9 | SYSTEM_PROMPT = """You are chatting with an AI. There are no specific prefixes for responses, so you can ask or talk about anything you like. 10 | The AI will respond in a natural, conversational manner. Feel free to start the conversation with any question or topic, and let's have a 11 | pleasant chat! 12 | """ 13 | 14 | @bot() 15 | def on_message(message_history: List[Message], state: dict = None): 16 | 17 | # Generate HuggingFace response. Uses the DialoGPT-large model from Microsoft by default. 18 | bot_response = HuggingFace.generate( 19 | system_prompt=SYSTEM_PROMPT, 20 | message_history=message_history, # Assuming history is the list of user messages 21 | ) 22 | 23 | return { 24 | "messages": [bot_response], 25 | "state": state 26 | } -------------------------------------------------------------------------------- /examples/mimic-bot/main.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from textbase import bot, Message 3 | from textbase.datatypes import Image, Video, Audio, File 4 | 5 | @bot() 6 | def on_message(message_history: List[Message], state: dict = None): 7 | 8 | # Mimic user's response 9 | bot_response = [] 10 | for message in message_history[-1]["content"]: 11 | match message['data_type']: 12 | case 'STRING': 13 | bot_response.append(message['value']) 14 | case 'IMAGE_URL': 15 | bot_response.append(Image(message['value'])) 16 | case 'VIDEO_URL': 17 | bot_response.append(Video(message['value'])) 18 | case 'AUDIO_URL': 19 | bot_response.append(Audio(message['value'])) 20 | case 'FILE_URL': 21 | bot_response.append(File(message['value'])) 22 | 23 | # message_history[-1]["content"] structure is 24 | 25 | # [ 26 | # { 27 | # "data_type": "STRING", 28 | # "value": "" 29 | # } 30 | # ] 31 | 32 | return { 33 | "messages": bot_response, 34 | "state": state 35 | } 36 | -------------------------------------------------------------------------------- /examples/openai-bot/main.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from textbase import bot, Message 3 | from textbase.models import OpenAI 4 | 5 | # Load your OpenAI API key 6 | OpenAI.api_key = "" 7 | 8 | # Prompt for GPT-3.5 Turbo 9 | SYSTEM_PROMPT = """You are chatting with an AI. There are no specific prefixes for responses, so you can ask or talk about anything you like. 10 | The AI will respond in a natural, conversational manner. Feel free to start the conversation with any question or topic, and let's have a 11 | pleasant chat! 12 | """ 13 | 14 | @bot() 15 | def on_message(message_history: List[Message], state: dict = None): 16 | 17 | # Generate GPT-3.5 Turbo response 18 | bot_response = OpenAI.generate( 19 | system_prompt=SYSTEM_PROMPT, 20 | message_history=message_history, # Assuming history is the list of user messages 21 | model="gpt-3.5-turbo", 22 | ) 23 | 24 | return { 25 | "messages": [bot_response], 26 | "state": state 27 | } 28 | -------------------------------------------------------------------------------- /examples/palmai-bot/main.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from textbase import bot, Message 3 | from textbase.models import PalmAI 4 | 5 | # Load your PALM API key 6 | PalmAI.api_key = "" 7 | 8 | @bot() 9 | def on_message(message_history: List[Message], state: dict = None): 10 | 11 | bot_response = PalmAI.generate( 12 | message_history=message_history, # Assuming history is the list of user messages 13 | ) 14 | 15 | return { 16 | "messages": [bot_response], 17 | "state": state 18 | } 19 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "textbase-client" 3 | version = "0.1.7" 4 | description = "A python library to help you create chatbots easily with some more configurations" 5 | authors = ["kaus-cofactory ", "sammyCofactory "] 6 | readme = "README.md" 7 | packages = [{include = "textbase"}] 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.9.0" 11 | requests = "^2.31.0" 12 | openai = "^1.1.2" 13 | python-dotenv = "^1.0.0" 14 | tabulate = "^0.9.0" 15 | functions-framework = "^3.4.0" 16 | yaspin = "^3.0.0" 17 | pydantic = "^2.3.0" 18 | google-generativeai = "^0.1.0" 19 | rich = "^13.5.2" 20 | pillow = "^10.0.1" 21 | 22 | [build-system] 23 | requires = ["poetry-core"] 24 | build-backend = "poetry.core.masonry.api" 25 | 26 | [tool.poetry.scripts] 27 | textbase-client = "textbase.textbase_cli:cli" 28 | 29 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cofactoryai/textbase/9f41c9c0c064eb8c758a21543d31571b2a75ec9a/tests/__init__.py -------------------------------------------------------------------------------- /textbase/__init__.py: -------------------------------------------------------------------------------- 1 | from .bot import bot 2 | from .message import Message -------------------------------------------------------------------------------- /textbase/bot.py: -------------------------------------------------------------------------------- 1 | import functions_framework 2 | from textbase.datatypes import Image, Video, Audio, File 3 | 4 | 5 | @functions_framework.http 6 | def bot(): 7 | def bot_message(func): 8 | def bot_function(*args): 9 | request = args[0] 10 | if request.method == 'OPTIONS': 11 | # Allows GET requests from any origin with the Content-Type 12 | # header and caches preflight response for an 3600s 13 | headers = { 14 | 'Access-Control-Allow-Origin': '*', 15 | 'Access-Control-Allow-Methods': 'GET', 16 | 'Access-Control-Allow-Headers': 'Content-Type', 17 | 'Access-Control-Max-Age': '3600' 18 | } 19 | 20 | return ('', 204, headers) 21 | 22 | # Set CORS headers for the main request 23 | headers = { 24 | 'Access-Control-Allow-Origin': '*' 25 | } 26 | 27 | post_body = request.json 28 | history_messages = post_body['data']['message_history'] 29 | state = post_body['data']['state'] 30 | 31 | if not isinstance(history_messages, list): 32 | return 'Error in processing', 402, headers 33 | 34 | resp = func(history_messages, state) 35 | 36 | content = [] 37 | 38 | if "errors" in resp: 39 | return { 40 | "message_history": history_messages, 41 | "state": resp["state"], 42 | "new_message": [] 43 | }, 500, headers 44 | 45 | for message in resp["messages"]: 46 | if isinstance(message, str): 47 | content.append({ 48 | "data_type": "STRING", 49 | "value": message 50 | }) 51 | elif isinstance(message, Image): 52 | if message.pil_image: 53 | message.upload_pil_to_bucket() 54 | elif message.path: 55 | message.upload_file_to_bucket() 56 | content.append({ 57 | "data_type": "IMAGE_URL", 58 | "value": message.url 59 | }) 60 | elif isinstance(message, Video): 61 | if message.path: 62 | message.upload_file_to_bucket() 63 | content.append({ 64 | "data_type": "VIDEO_URL", 65 | "value": message.url 66 | }) 67 | elif isinstance(message, Audio): 68 | if message.path: 69 | message.upload_file_to_bucket() 70 | content.append({ 71 | "data_type": "AUDIO_URL", 72 | "value": message.url 73 | }) 74 | elif isinstance(message, File): 75 | if message.path: 76 | message.upload_file_to_bucket() 77 | content.append({ 78 | "data_type": "FILE_URL", 79 | "value": message.url 80 | }) 81 | 82 | history_messages.append({ 83 | "role": "assistant", 84 | "content": content 85 | }) 86 | 87 | return { 88 | "message_history": history_messages, 89 | "state": resp["state"], 90 | "new_message": content 91 | }, 200, headers 92 | return bot_function 93 | return bot_message 94 | -------------------------------------------------------------------------------- /textbase/datatypes.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL.Image import Image as PILImageClass 3 | from textbase.helpers import convert_img_to_url,\ 4 | convert_video_to_url,\ 5 | convert_audio_to_url,\ 6 | convert_file_to_url 7 | 8 | 9 | def check_parameters(*args): 10 | if sum(bool(p) for p in args[0]) > 1: 11 | raise TypeError("Only one parameter can be given.") 12 | 13 | 14 | class Image: 15 | def __init__(self, url="", pil_image="", path=""): 16 | # list(locals().values())[1:] is the list of arguments except the 'self' argument 17 | check_parameters(list(locals().values())[1:]) 18 | 19 | if pil_image and not isinstance(pil_image, PILImageClass): 20 | raise TypeError("Not a valid PIL image.") 21 | 22 | self.url = url 23 | self.pil_image = pil_image 24 | 25 | if path and not os.path.exists(path): 26 | raise FileNotFoundError("The given path doesn't exist.") 27 | 28 | self.path = path 29 | 30 | def upload_pil_to_bucket(self): 31 | self.url = convert_img_to_url(pil_image=self.pil_image) 32 | 33 | def upload_file_to_bucket(self): 34 | self.url = convert_img_to_url(image_file_path=self.path) 35 | 36 | 37 | class Media: 38 | def __init__(self, url="", path=""): 39 | # list(locals().values())[1:] is the list of arguments except the 'self' argument 40 | check_parameters(list(locals().values())[1:]) 41 | 42 | self.url = url 43 | 44 | if path and not os.path.exists(path): 45 | raise FileNotFoundError("The given path doesn't exist.") 46 | 47 | self.path = path 48 | 49 | def upload_file_to_bucket(self, convert_func): 50 | self.url = convert_func(self.path) 51 | 52 | 53 | class Video(Media): 54 | def upload_file_to_bucket(self): 55 | super().upload_file_to_bucket(convert_video_to_url) 56 | 57 | 58 | class Audio(Media): 59 | def upload_file_to_bucket(self): 60 | super().upload_file_to_bucket(convert_audio_to_url) 61 | 62 | 63 | class File(Media): 64 | def upload_file_to_bucket(self): 65 | super().upload_file_to_bucket(convert_file_to_url) 66 | -------------------------------------------------------------------------------- /textbase/helpers.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from PIL.Image import Image as PILImageClass 3 | from io import BytesIO 4 | 5 | URL = "https://us-central1-chat-agents.cloudfunctions.net/upload-multimedia" 6 | 7 | 8 | def upload_file(file_path: str, file_type: str) -> str: 9 | with open(file_path, 'rb') as _file: 10 | _file = { 11 | 'file': _file 12 | } 13 | data = { 14 | 'parent_path': 'bot', 15 | 'file_type': file_type 16 | } 17 | 18 | response = requests.post(URL, files=_file, data=data) 19 | 20 | if 'error' in response.json(): 21 | return f'Error: {response.json()["error"]}' 22 | else: 23 | return response.json()['url'] 24 | 25 | 26 | def convert_img_to_url(image_file_path: str="", pil_image: PILImageClass=None) -> str: 27 | if pil_image: 28 | # convert PIL object to a byte array 29 | img_byte_arr = BytesIO() 30 | img_format = pil_image.format 31 | pil_image.save(img_byte_arr, img_format) 32 | img_byte_arr = img_byte_arr.getvalue() 33 | 34 | img_file = { 35 | 'file': img_byte_arr 36 | } 37 | data = { 38 | 'parent_path': 'bot', 39 | 'file_type': 'images', 40 | 'file_extension': img_format.lower() 41 | } 42 | 43 | response = requests.post(URL, files=img_file, data=data) 44 | 45 | if 'error' in response.json(): 46 | return f'Error: {response.json()["error"]}' 47 | else: 48 | return response.json()['url'] 49 | else: 50 | response = upload_file(image_file_path, 'images') 51 | return response 52 | 53 | 54 | def convert_video_to_url(video_file_path: str="") -> str: 55 | response = upload_file(video_file_path, 'videos') 56 | return response 57 | 58 | def convert_audio_to_url(audio_file_path: str="") -> str: 59 | response = upload_file(audio_file_path, 'audios') 60 | return response 61 | 62 | def convert_file_to_url(file_path: str="") -> str: 63 | response = upload_file(file_path, 'files') 64 | return response 65 | -------------------------------------------------------------------------------- /textbase/message.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | from typing import List 3 | 4 | class Content(BaseModel): 5 | data_type: str 6 | value: str 7 | 8 | class Message(BaseModel): 9 | role: str # "user" or "assistant" 10 | content: List[Content] -------------------------------------------------------------------------------- /textbase/models.py: -------------------------------------------------------------------------------- 1 | import json 2 | import openai 3 | from io import BytesIO 4 | import google.generativeai as palm 5 | import requests 6 | import time 7 | import typing 8 | import traceback 9 | from textbase import Message 10 | import threading 11 | import queue 12 | 13 | # Return list of values of content. 14 | def get_contents(message: Message, data_type: str): 15 | return [ 16 | { 17 | "role": message["role"], 18 | "content": content["value"] 19 | } 20 | for content in message["content"] 21 | if content["data_type"] == data_type 22 | ] 23 | 24 | # Returns content if it's non empty. 25 | def extract_content_values(message: Message, data_type: str="STRING"): 26 | return [ 27 | content["content"] 28 | for content in get_contents(message, data_type) 29 | if content 30 | ] 31 | 32 | class OpenAI: 33 | api_key = None 34 | 35 | @classmethod 36 | def generate( 37 | cls, 38 | system_prompt: str, 39 | message_history: list[Message], 40 | model="gpt-3.5-turbo", 41 | max_tokens=3000, 42 | temperature=0.7, 43 | ): 44 | assert cls.api_key is not None, "OpenAI API key is not set." 45 | openai.api_key = cls.api_key 46 | 47 | filtered_messages = [] 48 | 49 | for message in message_history: 50 | #list of all the contents inside a single message 51 | contents = get_contents(message, "STRING") 52 | if contents: 53 | filtered_messages.extend(contents) 54 | 55 | response = openai.chat.completions.create( 56 | model=model, 57 | messages=[ 58 | { 59 | "role": "system", 60 | "content": system_prompt 61 | }, 62 | *map(dict, filtered_messages), 63 | ], 64 | temperature=temperature, 65 | max_tokens=max_tokens, 66 | ) 67 | 68 | return response.choices[0].message.content 69 | 70 | @classmethod 71 | def vision( 72 | cls, 73 | message_history: list[Message], 74 | image_url: str = None, 75 | text: str = None, 76 | model="gpt-4-vision-preview", 77 | max_tokens=3000 78 | ): 79 | assert cls.api_key is not None, "OpenAI API key is not set." 80 | openai.api_key = cls.api_key 81 | 82 | transformed_contents = [] 83 | 84 | for message in message_history: 85 | for content in message["content"]: 86 | if content["data_type"] == "STRING": 87 | transformed_contents.append({ 88 | "type": "text", 89 | "text": content["value"] 90 | }) 91 | elif content["data_type"] == "IMAGE_URL": 92 | transformed_contents.append({ 93 | "type": "image_url", 94 | "image_url": content["value"] 95 | }) 96 | 97 | 98 | response = openai.chat.completions.create( 99 | model=model, 100 | messages=[ 101 | { 102 | "role": "user", 103 | "content": [ 104 | *map(dict, transformed_contents) 105 | ], 106 | }, 107 | ], 108 | max_tokens=max_tokens, 109 | ) 110 | 111 | return response.choices[0].message.content 112 | 113 | @classmethod 114 | def run_assistant( 115 | cls, 116 | message_history: list[Message], 117 | text: str, 118 | assistant_id: str 119 | ): 120 | THREAD_ID=assistant_id 121 | 122 | def run_and_store_result(q, thread_id, assistant_id): 123 | result = openai.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id) 124 | q.put(result) 125 | 126 | assert cls.api_key is not None, "OpenAI API key is not set." 127 | openai.api_key = cls.api_key 128 | 129 | thread = openai.beta.threads.create() 130 | 131 | # Student asks question 132 | message = openai.beta.threads.messages.create( 133 | thread_id=thread.id, 134 | role="user", 135 | content=text 136 | ) 137 | 138 | q = queue.Queue() 139 | 140 | run_thread = threading.Thread(target=run_and_store_result, 141 | args=(q, thread.id, THREAD_ID)) 142 | run_thread.start() 143 | run_thread.join() 144 | result = q.get() 145 | 146 | while(not run_thread.is_alive()): 147 | run_status = openai.beta.threads.runs.retrieve( 148 | thread_id=thread.id, 149 | run_id=result.id 150 | ) 151 | 152 | responses = [] 153 | if run_status.status == 'completed': 154 | messages = openai.beta.threads.messages.list( 155 | thread_id=thread.id 156 | ) 157 | 158 | for msg in messages.data[::-1]: 159 | role = msg.role 160 | content = msg.content[0].text.value 161 | responses.append({ 162 | "role": role, 163 | "content": [{ 164 | "data_type": "STRING", 165 | "value": content 166 | }] 167 | }) 168 | 169 | for response in responses: 170 | if response["role"] == "assistant": 171 | for content in response["content"]: 172 | return content["value"] 173 | 174 | @classmethod 175 | def create_assistant( 176 | cls, 177 | name: str, 178 | instructions: str, 179 | tools: typing.List[dict], 180 | model: str = "gpt-4-1106-preview" 181 | ): 182 | assistant = openai.beta.assistants.create( 183 | name=name, 184 | instructions=instructions, 185 | tools=tools, 186 | model=model 187 | ) 188 | return assistant.id 189 | 190 | class HuggingFace: 191 | api_key = None 192 | 193 | @classmethod 194 | def generate( 195 | cls, 196 | system_prompt: str, 197 | message_history: list[Message], 198 | model: typing.Optional[str] = "microsoft/DialoGPT-large", 199 | max_tokens: typing.Optional[int] = 3000, 200 | temperature: typing.Optional[float] = 0.7, 201 | min_tokens: typing.Optional[int] = None, 202 | top_k: typing.Optional[int] = None 203 | ) -> str: 204 | try: 205 | assert cls.api_key is not None, "Hugging Face API key is not set." 206 | 207 | headers = { "Authorization": f"Bearer { cls.api_key }" } 208 | API_URL = "https://api-inference.huggingface.co/models/" + model 209 | inputs = { 210 | "past_user_inputs": [system_prompt], 211 | "generated_responses": [f"Ok, I will answer according to the context, where context is '{system_prompt}'."], 212 | "text": "" 213 | } 214 | 215 | for message in message_history: 216 | if message["role"] == "user": 217 | inputs["past_user_inputs"].extend(extract_content_values(message)) 218 | else: 219 | inputs["generated_responses"].extend(extract_content_values(message)) 220 | 221 | inputs["text"] = inputs["past_user_inputs"].pop(-1) 222 | 223 | payload = { 224 | "inputs": inputs, 225 | "max_length": max_tokens, 226 | "temperature": temperature, 227 | "min_length": min_tokens, 228 | "top_k": top_k, 229 | } 230 | 231 | data = json.dumps(payload) 232 | response = requests.request("POST", API_URL, headers=headers, data=data) 233 | response = json.loads(response.content.decode("utf-8")) 234 | 235 | if response.get("error", None) == "Authorization header is invalid, use 'Bearer API_TOKEN'.": 236 | print("Hugging Face API key is not correct.") 237 | 238 | if response.get("estimated_time", None): 239 | print(f"Model is loading please wait for {response.get('estimated_time')}") 240 | time.sleep(response.get("estimated_time")) 241 | response = requests.request("POST", API_URL, headers=headers, data=data) 242 | response = json.loads(response.content.decode("utf-8")) 243 | 244 | return response["generated_text"] 245 | 246 | except Exception: 247 | print(f"An exception occured while using this model, please try using another model.\nException: {traceback.format_exc()}.") 248 | 249 | class BotLibre: 250 | application = None 251 | instance = None 252 | 253 | @classmethod 254 | def generate( 255 | cls, 256 | message_history: list[Message], 257 | ): 258 | most_recent_message = get_contents(message_history[-1], "STRING") 259 | 260 | request = { 261 | "application": cls.application, 262 | "instance": cls.instance, 263 | "message": most_recent_message 264 | } 265 | response = requests.post('https://www.botlibre.com/rest/json/chat', json=request) 266 | data = json.loads(response.text) # parse the JSON data into a dictionary 267 | message = data['message'] 268 | 269 | return message 270 | 271 | class PalmAI: 272 | api_key = None 273 | 274 | @classmethod 275 | def generate( 276 | cls, 277 | message_history: list[Message], 278 | ): 279 | assert cls.api_key is not None, "Palm API key is not set." 280 | palm.configure(api_key=cls.api_key) 281 | 282 | filtered_messages = [] 283 | 284 | for message in message_history: 285 | #list of all the contents inside a single message 286 | contents = extract_content_values(message) 287 | if contents: 288 | filtered_messages.extend(contents) 289 | 290 | #send request to Google Palm chat API 291 | response = palm.chat(messages=filtered_messages) 292 | 293 | print(response) 294 | return response.last 295 | 296 | class DallE: 297 | api_key = None 298 | 299 | @classmethod 300 | def generate( 301 | cls, 302 | message_history: list[Message], 303 | size="256x256" 304 | ): 305 | assert cls.api_key is not None, "OpenAI API key is not set." 306 | openai.api_key = cls.api_key 307 | 308 | last_message = message_history[-1] 309 | prompt = extract_content_values(last_message)[0] 310 | 311 | response = openai.Image.create( 312 | prompt=prompt, 313 | n=1, 314 | size=size, 315 | ) 316 | return response['data'][0]['url'] 317 | 318 | @classmethod 319 | def generate_variations( 320 | cls, 321 | message_history: list[Message], 322 | size="256x256" 323 | ): 324 | assert cls.api_key is not None, "OpenAI API key is not set." 325 | openai.api_key = cls.api_key 326 | 327 | last_message = message_history[-1] 328 | image_url = extract_content_values(last_message, "IMAGE_URL")[0] 329 | 330 | response = requests.get(image_url) 331 | image_content = BytesIO(response.content) 332 | 333 | response = openai.Image.create_variation( 334 | image=image_content, 335 | n=1, 336 | size=size, 337 | ) 338 | 339 | return response['data'][0]['url'] 340 | -------------------------------------------------------------------------------- /textbase/template/main.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from textbase import bot, Message 3 | from textbase.models import OpenAI 4 | 5 | # Load your OpenAI API key (if using OpenAI, else feel free to experiment with whatever you need) 6 | OpenAI.api_key = "" 7 | 8 | SYSTEM_PROMPT = """Put prompt to configure the behaviour of your GPT-4 or anyother kind of bot. 9 | """ 10 | 11 | @bot() 12 | def on_message(message_history: List[Message], state: dict = None): 13 | 14 | # Generate responses from the bot (refer to examples in documentation) 15 | # Use any other kind of logic/integration you want. 16 | bot_response = "" 17 | 18 | return { 19 | "messages": [bot_response], 20 | "state": state 21 | } -------------------------------------------------------------------------------- /textbase/template/requirements.txt: -------------------------------------------------------------------------------- 1 | textbase-client -------------------------------------------------------------------------------- /textbase/textbase_cli.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import click 3 | import requests 4 | import subprocess 5 | import os 6 | from tabulate import tabulate 7 | from time import sleep 8 | from yaspin import yaspin 9 | import importlib.resources 10 | import re 11 | import zipfile 12 | import urllib.parse 13 | import shutil 14 | from textbase.utils.logs import fetch_and_display_logs 15 | from importlib.resources import files 16 | 17 | CLOUD_URL = "https://us-east1-chat-agents.cloudfunctions.net/deploy-from-cli" 18 | UPLOAD_URL = "https://us-east1-chat-agents.cloudfunctions.net/upload-file" 19 | 20 | @click.group() 21 | def cli(): 22 | pass 23 | 24 | @cli.command() 25 | @click.option("--project_name", prompt="What do you want to name your project", required=True) 26 | def init(project_name): 27 | """ 28 | Initialize a new project with a basic template setup. 29 | """ 30 | # Define the path to the new project directory 31 | project_dir = os.path.join(os.getcwd(), project_name) 32 | 33 | # Check if the directory already exists 34 | if os.path.exists(project_dir): 35 | click.secho(f"Error: Directory '{project_name}' already exists.", fg="red") 36 | return 37 | 38 | # Create the new project directory 39 | os.makedirs(project_dir) 40 | 41 | # Copy the contents of the template directory to the new project directory 42 | template_dir = files('textbase').joinpath('template') 43 | for item in template_dir.iterdir(): 44 | s = str(item) 45 | d = os.path.join(project_dir, os.path.basename(s)) 46 | if item.is_dir(): 47 | shutil.copytree(s, d, dirs_exist_ok=True) 48 | else: 49 | shutil.copy2(s, d) 50 | 51 | click.secho(f"Project '{project_name}' has been initialized!", fg="green") 52 | 53 | @cli.command() 54 | @click.option("--path", prompt="Path to the main.py file", required=True) 55 | @click.option("--port", prompt="Enter port", required=False, default=8080) 56 | def test(path, port): 57 | # Check if the file exists 58 | if not os.path.exists(path): 59 | click.secho("Incorrect main.py path.", fg='red') 60 | return 61 | 62 | # Load the module dynamically 63 | spec = importlib.util.spec_from_file_location("module.name", path) 64 | module = importlib.util.module_from_spec(spec) 65 | spec.loader.exec_module(module) 66 | 67 | # Check if 'on_message' exists and is a function 68 | if "on_message" in dir(module) and inspect.isfunction(getattr(module, "on_message")): 69 | click.secho("The function 'on_message' exists in the specified main.py file.", fg='yellow') 70 | else: 71 | click.secho("The function 'on_message' does not exist in the specified main.py file.", fg='red') 72 | return 73 | server_path = importlib.resources.files('textbase').joinpath('utils', 'server.py') 74 | try: 75 | if os.name == 'posix': 76 | process_local_ui = subprocess.Popen(f'python3 {server_path}', shell=True) 77 | else: 78 | process_local_ui = subprocess.Popen(f'python {server_path}', shell=True) 79 | 80 | process_gcp = subprocess.Popen(f'functions_framework --target=on_message --source={path} --debug --port={port}', 81 | shell=True, 82 | stdin=subprocess.PIPE) 83 | 84 | # Print the Bot UI Url 85 | encoded_api_url = urllib.parse.quote(f"http://localhost:{port}", safe='') 86 | click.secho(f"Server URL: http://localhost:4000/?API_URL={encoded_api_url}", fg='cyan', bold=True) 87 | process_local_ui.communicate() 88 | process_gcp.communicate() # Wait for the process to finish 89 | except KeyboardInterrupt: 90 | process_gcp.kill() # Stop the process when Ctrl+C is pressed 91 | process_local_ui.kill() 92 | click.secho("Server stopped.", fg='red') 93 | 94 | ################################################################################################################# 95 | def files_exist(path): 96 | if not os.path.exists(os.path.join(path, "main.py")): 97 | click.echo(click.style(f"Error: main.py not found in {path} directory.", fg='red')) 98 | return False 99 | if not os.path.exists(os.path.join(path, "requirements.txt")): 100 | click.echo(click.style(f"Error: requirements.txt not found in {path} directory.", fg='red')) 101 | return False 102 | return True 103 | 104 | def check_requirement(requirements_path): 105 | try: 106 | with open(requirements_path, 'r') as file: 107 | requirements = file.readlines() 108 | for requirement in requirements: 109 | if 'textbase-client' in requirement: 110 | click.echo(click.style("textbase-client is in requirements.txt", fg='green')) 111 | return True 112 | click.echo(click.style("textbase-client is not in requirements.txt. Aborting..", fg='red')) 113 | return False 114 | except FileNotFoundError: 115 | click.echo(click.style("requirements.txt file not found", fg='red')) 116 | return False 117 | 118 | @cli.command() 119 | @click.option("--path", prompt="Path to the directory containing main.py and requirements.txt file", default=os.getcwd()) 120 | def compress(path): 121 | click.echo(click.style("Creating zip file for deployment", fg='green')) 122 | 123 | OUTPUT_ZIP_FILENAME = 'deploy.zip' 124 | OUTPUT_ZIP_PATH = os.path.join(os.getcwd(), OUTPUT_ZIP_FILENAME) 125 | REQUIREMENTS_FILE_PATH = os.path.join(path, 'requirements.txt') 126 | 127 | if files_exist(path) and check_requirement(REQUIREMENTS_FILE_PATH): 128 | with zipfile.ZipFile(OUTPUT_ZIP_PATH, 'w', zipfile.ZIP_DEFLATED) as zipf: 129 | for root, _, files in os.walk(path): 130 | for file in files: 131 | # skip the zip file itself when zipping it 132 | if file == OUTPUT_ZIP_FILENAME: 133 | continue 134 | file_path = os.path.join(root, file) 135 | zipf.write(file_path, os.path.relpath(file_path, path)) 136 | click.echo(click.style(f"Files have been zipped to {OUTPUT_ZIP_FILENAME}", fg='green')) 137 | 138 | ################################################################################################################# 139 | VALID_MEMORY_SIZES = [256, 512, 1024] 140 | 141 | def validate_bot_name(ctx, param, value): 142 | pattern = r'^[a-z0-9_-]+$' 143 | if not re.match(pattern, value): 144 | error_message = click.style('Bot name can only contain lowercase alphanumeric characters, hyphens, and underscores.', fg='red') 145 | raise click.BadParameter(error_message) 146 | return value 147 | 148 | def validate_memory_size(ctx, param, value): 149 | if value not in VALID_MEMORY_SIZES: 150 | error_message = click.style(f'Memory size is not one of {[x for x in VALID_MEMORY_SIZES]}') 151 | raise click.BadParameter(error_message) 152 | return value 153 | 154 | @cli.command() 155 | @click.option("--path", prompt="Path to the zip folder", required=True) 156 | @click.option("--bot_name", prompt="Name of the bot", required=True, callback=validate_bot_name) 157 | @click.option("--memory", prompt="Memory to be assigned to the bot (default: 256MB)", callback=validate_memory_size, 158 | help=f"The value can be only one of {[x for x in VALID_MEMORY_SIZES]}", default=256, type=int, required=True) 159 | @click.option("--api_key", prompt="Textbase API Key", required=True) 160 | @click.option("--disable_logs", is_flag=True, default=False, help="Fetch logs after deployment") 161 | def deploy(path, bot_name, memory, api_key, disable_logs): 162 | click.echo(click.style(f"Deploying bot '{bot_name}' with zip folder from path: {path}", fg='yellow')) 163 | 164 | headers = { 165 | "Authorization": f"Bearer {api_key}" 166 | } 167 | 168 | files = { 169 | "file": open(path, "rb"), 170 | } 171 | 172 | data = { 173 | "botName": bot_name, 174 | "mem": memory 175 | } 176 | 177 | with yaspin(text="Uploading...", color="yellow") as spinner: 178 | response = requests.post( 179 | UPLOAD_URL, 180 | headers=headers, 181 | data=data, 182 | files=files 183 | ) 184 | 185 | if response.ok: 186 | click.echo(click.style("Upload completed successfully! ✅", fg='green')) 187 | response_data = response.json() 188 | error = response_data.get('error') 189 | data = response_data.get('data') 190 | if not error and data: 191 | message = data.get('message') 192 | # Parse the message to extract bot ID and URL 193 | parts = message.split('. ') 194 | bot_id = parts[1].split(' ')[-1] 195 | url = parts[2].split(' ')[-1] 196 | # Create a list of dictionaries for tabulate 197 | data_list = [{'Status': parts[0], 'Bot ID': bot_id, 'URL': url}] 198 | table = tabulate(data_list, headers="keys", tablefmt="pretty") 199 | click.echo(click.style("Deployment details:", fg='blue')) 200 | click.echo(table) 201 | else: 202 | click.echo(click.style("Something went wrong! ❌", fg='red')) 203 | click.echo(response.text) 204 | else: 205 | click.echo(click.style("Something went wrong! ❌", fg='red')) 206 | click.echo(response.text) 207 | 208 | # Piping logs in the cli in real-time 209 | if not disable_logs: 210 | click.echo(click.style(f"Fetching logs for bot '{bot_name}'...", fg='green')) 211 | 212 | cloud_url = f"{CLOUD_URL}/logs" 213 | headers = { 214 | "Authorization": f"Bearer {api_key}" 215 | } 216 | params = { 217 | "botName": bot_name, 218 | "pageToken": None 219 | } 220 | 221 | fetch_and_display_logs(cloud_url=cloud_url, 222 | headers=headers, 223 | params=params) 224 | ################################################################################################################# 225 | 226 | @cli.command() 227 | @click.option("--bot_id", prompt="Id of the bot", required=True) 228 | @click.option("--api_key", prompt="Textbase API Key", required=True) 229 | def health(bot_id, api_key): 230 | click.echo(click.style(f"Checking health of bot '{bot_id}' with API key: {api_key}", fg='green')) 231 | 232 | # the user would get the bot_id from the GET /list and use it here 233 | cloud_url = f"{CLOUD_URL}/bot-health" 234 | 235 | headers = { 236 | "Authorization": f"Bearer {api_key}" 237 | } 238 | 239 | params = { 240 | "botId": bot_id 241 | } 242 | 243 | response = requests.get(cloud_url, headers=headers, params=params) 244 | 245 | if response.ok: 246 | response_data = response.json() 247 | data = response_data.get('data') 248 | if data is not None: 249 | # Convert the data dictionary to a list of dictionaries for tabulate 250 | data_list = [data] 251 | table = tabulate(data_list, headers="keys", tablefmt="pretty") 252 | click.echo(click.style("Bot status:", fg='green')) 253 | click.echo(table) 254 | else: 255 | click.echo(click.style("Status information not found in the response.", fg='red')) 256 | click.echo(response_data) 257 | else: 258 | click.echo(click.style("Failed to retrieve bot status.", fg='red')) 259 | 260 | 261 | @cli.command() 262 | @click.option("--api_key", prompt="Textbase API Key", required=True) 263 | def list(api_key): 264 | click.echo(click.style("Getting the list of bots...", fg='green')) 265 | 266 | cloud_url = f"{CLOUD_URL}/list" 267 | 268 | headers = { 269 | "Authorization": f"Bearer {api_key}" 270 | } 271 | 272 | response = requests.get( 273 | cloud_url, 274 | headers=headers 275 | ) 276 | 277 | if response.ok: 278 | data = response.json().get('data', []) 279 | if data: 280 | # Reorder the dictionaries in the data list 281 | reordered_data = [{'ID': d['id'], 'Name': d['name'], 'Memory': f"{d['mem']} MB", 'URL': d['url']} for d in data] 282 | table = tabulate(reordered_data, headers="keys", tablefmt="pretty") 283 | click.echo(click.style("List of bots:", fg='blue')) 284 | print(table) 285 | else: 286 | click.echo(click.style("No bots found.", fg='yellow')) 287 | else: 288 | click.echo(click.style("Something went wrong!", fg='red')) 289 | 290 | 291 | @cli.command() 292 | @click.option("--bot_id", prompt="Id of the bot", required=True) 293 | @click.option("--api_key", prompt="Textbase API Key", required=True) 294 | def delete(bot_id, api_key): 295 | click.echo(click.style(f"Deleting bot '{bot_id}'...", fg='red')) 296 | 297 | cloud_url = f"{CLOUD_URL}/delete" 298 | 299 | headers = { 300 | "Authorization": f"Bearer {api_key}" 301 | } 302 | 303 | data = { 304 | "botId": bot_id 305 | } 306 | 307 | with click.progressbar(length=100, label='Deleting...') as bar: 308 | for i in range(100): 309 | sleep(0.02) # simulate deletion progress 310 | bar.update(1) 311 | 312 | response = requests.post( 313 | cloud_url, 314 | json=data, 315 | headers=headers 316 | ) 317 | 318 | if response.ok: 319 | click.echo(click.style(f"Bot '{bot_id}' deleted successfully!", fg='green')) 320 | response_data = response.json() 321 | if response_data: 322 | # Convert the data dictionary to a list of dictionaries for tabulate 323 | data_list = [response_data] 324 | table = tabulate(data_list, headers="keys", tablefmt="pretty") 325 | click.echo(table) 326 | else: 327 | click.echo("No data found in the response.") 328 | else: 329 | click.echo(click.style("Something went wrong!", fg='red')) 330 | 331 | 332 | @cli.command() 333 | @click.option("--bot_name", prompt="Name of the bot", required=True) 334 | @click.option("--api_key", prompt="Textbase API Key", required=True) 335 | @click.option("--start_time", prompt="Logs for previous ___ minutes", required=False, default=5) 336 | def logs(bot_name, api_key, start_time): 337 | click.echo(click.style(f"Fetching logs for bot '{bot_name}'...", fg='green')) 338 | 339 | cloud_url = f"{CLOUD_URL}/logs" 340 | headers = { 341 | "Authorization": f"Bearer {api_key}" 342 | } 343 | params = { 344 | "botName": bot_name, 345 | "startTime": start_time, 346 | "pageToken": None 347 | } 348 | 349 | fetch_and_display_logs(cloud_url=cloud_url, 350 | headers=headers, 351 | params=params) 352 | 353 | 354 | @cli.command() 355 | @click.option("--bot_name", prompt="Name of the bot", required=True) 356 | @click.option("--api_key", prompt="Textbase API Key", required=True) 357 | def download(bot_name, api_key): 358 | cloud_url = f"{CLOUD_URL}/downloadZip" 359 | headers = { 360 | "Authorization": f"Bearer {api_key}" 361 | } 362 | 363 | params = {"botName": bot_name} 364 | response = requests.get(cloud_url, 365 | headers=headers, 366 | params=params, 367 | stream=True) 368 | 369 | if response.status_code == 200: 370 | with open(f"{bot_name}.zip", "wb") as f: 371 | for chunk in response.iter_content(chunk_size=1024): 372 | if chunk: 373 | f.write(chunk) 374 | else: 375 | click.echo(click.style(f"Error: {response.status_code}, {response.text}", fg="red")) 376 | 377 | if __name__ == "__main__": 378 | cli() 379 | -------------------------------------------------------------------------------- /textbase/utils/download_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import zipfile 3 | import requests 4 | import shutil 5 | import click 6 | 7 | def download_and_extract_zip(zip_url, destination_folder): 8 | """ 9 | The function `download_and_extract_zip` downloads a zip file from a given URL and extracts its 10 | contents to a specified destination folder. 11 | 12 | :param zip_url: The URL of the zip file that you want to download and extract 13 | :param destination_folder: The destination_folder parameter is the path where you want to save the 14 | downloaded zip file and extract its contents. It can be an absolute path or a relative path to the 15 | current working directory 16 | """ 17 | # Create the destination folder if it doesn't exist 18 | os.makedirs(destination_folder, exist_ok=True) 19 | 20 | # Download the zip file 21 | response = requests.get(zip_url) 22 | if response.status_code == 200: 23 | zip_file_path = os.path.join(destination_folder, "temp.zip") 24 | with open(zip_file_path, 'wb') as f: 25 | f.write(response.content) 26 | 27 | # Extract the contents of the zip file 28 | with zipfile.ZipFile(zip_file_path, 'r') as zip_ref: 29 | zip_ref.extractall(destination_folder) 30 | 31 | # Remove the zip file after extraction 32 | os.remove(zip_file_path) 33 | 34 | # Remove the existing "dist" folder if it exists 35 | dist_folder = os.path.join(destination_folder, "dist") 36 | if os.path.exists(dist_folder): 37 | shutil.rmtree(dist_folder) 38 | 39 | # Rename the extracted folder to "dist" 40 | extracted_folder = os.path.join(destination_folder, 'build') 41 | shutil.move(extracted_folder, dist_folder) 42 | 43 | click.secho("Zip file downloaded and extracted successfully.", fg="yellow") 44 | else: 45 | click.secho("Failed to download the zip file.", fg="red") -------------------------------------------------------------------------------- /textbase/utils/logs.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from rich.console import Console 3 | from rich.table import Table 4 | from rich.text import Text 5 | import time 6 | import click 7 | import json 8 | from yaspin import yaspin 9 | 10 | def fetch_and_display_logs(cloud_url, headers, params): 11 | console = Console() 12 | table = Table(show_header=True, header_style="bold magenta") 13 | table.add_column("Timestamp") 14 | table.add_column("Severity") 15 | table.add_column("Summary") 16 | 17 | while True: 18 | with yaspin(text="Logs...", color="yellow") as spinner: 19 | response = requests.get(cloud_url, headers=headers, params=params) 20 | 21 | if response.ok: 22 | response_data = response.json() 23 | data = response_data.get('data') 24 | if data is not None: 25 | logs = data.get('logs') 26 | if logs: 27 | for log in logs: 28 | severity_color = 'blue' if log['severity'].lower() in ['notice', 'info', 'debug'] else 'red' if log['severity'].lower() in ['alert', 'critical', 'error'] else 'yellow' 29 | 30 | table.add_row(log['timestamp'], Text(log['severity'], style=severity_color), Text(log.get('text', ''), style=severity_color)) 31 | 32 | console.clear() 33 | console.print(table) 34 | # Update the params for the next request 35 | params['pageToken'] = data.get('nextPageToken') 36 | params['startTime'] = data.get('startTime') 37 | else: 38 | click.echo(click.style("No logs found in the response.", fg='yellow')) 39 | else: 40 | error_message = json.loads(response.text) 41 | click.echo(click.style(f"Failed to retrieve logs ❌ \n Error: {error_message['message']}, Details: {error_message['error']}", fg='red')) 42 | 43 | # Poll the endpoint every 3 seconds 44 | time.sleep(3) -------------------------------------------------------------------------------- /textbase/utils/server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import http.server 3 | import socketserver 4 | from textbase.utils.download_build import download_and_extract_zip 5 | import click 6 | 7 | socketserver.TCPServer.allow_reuse_address=True 8 | 9 | # URL of the zip file containing the dist folder 10 | zip_url = "https://storage.googleapis.com/chatbot_mainpy/frontendUI.zip" 11 | 12 | # Destination folder where the zip file will be extracted 13 | destination_folder = os.path.join(os.getcwd(), "") 14 | 15 | # Download and extract the zip file 16 | download_and_extract_zip(zip_url, destination_folder) 17 | 18 | # Port where the HTTP server will be running 19 | PORT = 4000 20 | 21 | class MyHandler(http.server.SimpleHTTPRequestHandler): 22 | def translate_path(self, path): 23 | path = super().translate_path(path) 24 | relpath = os.path.relpath(path, os.getcwd()) 25 | fullpath = os.path.join(os.getcwd(), 'dist', relpath) 26 | return fullpath 27 | 28 | Handler = MyHandler 29 | 30 | # Change the current working directory to the destination folder 31 | os.chdir(destination_folder) 32 | 33 | with socketserver.TCPServer(("", PORT), Handler) as httpd: 34 | httpd.serve_forever() --------------------------------------------------------------------------------