├── .dockerignore ├── .env.example ├── .github └── workflows │ └── docker-build.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── agent ├── __init__.py ├── agent.py ├── output_parser.py └── prompt.py ├── app.py ├── callbacks └── handlers.py ├── command ├── __init__.py └── cli.py ├── config ├── __init__.py └── config.py ├── examples ├── k8s_diagnose.md ├── k8s_helm.md ├── k8s_yaml.md ├── walrus_deploy_source_code.md └── walrus_manage_environment.md ├── i18n ├── __init__.py └── text.py ├── k8s ├── context.py ├── toolkit.py └── tools │ ├── common │ └── endpoint.py │ ├── helm │ ├── __init__.py │ ├── prompt.py │ └── tool.py │ └── manage_resource │ ├── __init__.py │ ├── prompt.py │ └── tool.py ├── pyproject.toml ├── requirements.txt ├── tools ├── base │ └── tools.py ├── human │ ├── __init__.py │ └── tool.py └── reasoning │ ├── __init__.py │ └── tool.py ├── utils └── utils.py └── walrus ├── client.py ├── context.py ├── toolkit.py └── tools ├── base └── tools.py ├── general ├── __init__.py └── tools.py ├── manage_context ├── __init__.py └── tool.py ├── manage_environment ├── __init__.py └── tool.py ├── manage_project ├── __init__.py └── tool.py ├── manage_service ├── __init__.py ├── prompt.py └── tool.py └── manage_template ├── __init__.py ├── prompt.py └── tool.py /.dockerignore: -------------------------------------------------------------------------------- 1 | .env 2 | .git 3 | .venv 4 | 5 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # OpenAI API key, access to gpt-4 model is required. 2 | OPENAI_API_KEY= 3 | # Optional, uncomment the following line if you need custom openAI API base. 4 | # OPENAI_API_BASE=https://your-server/v1 5 | 6 | # Toolkits to enable. Currently support Kubernetes and Walrus. Case insensitive. 7 | TOOLKITS=kubernetes 8 | 9 | # Natural language AI used to interacte with you. e.g., Chinese, Japanese, etc. 10 | NATURAL_LANGUAGE=English 11 | 12 | # show AI reasoning steps. 13 | SHOW_REASONING=1 14 | 15 | # Output in verbose mode. 16 | VERBOSE=0 17 | 18 | ## Configuration for Walrus toolkit, valid when Walrus toolkit is enabled. 19 | # URL and API key for Walrus API. 20 | WALRUS_URL= 21 | WALRUS_API_KEY= 22 | # Skip TLS verification for WALRUS API. Use when testing with self-signed certificates. 23 | WALRUS_SKIP_TLS_VERIFY=1 24 | # Name of project and environment for the default context. 25 | # WALRUS_DEFAULT_PROJECT=default 26 | # WALRUS_DEFAULT_ENVIRONMENT=dev 27 | -------------------------------------------------------------------------------- /.github/workflows/docker-build.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | workflow_dispatch: {} 5 | push: 6 | branches: 7 | - main 8 | tags: 9 | - "v*" 10 | 11 | env: 12 | REPO: "sealio" 13 | BUILD_PLATFORMS: "linux/amd64,linux/arm64" 14 | 15 | jobs: 16 | skip: 17 | timeout-minutes: 5 18 | runs-on: ubuntu-22.04 19 | permissions: 20 | contents: read 21 | pull-requests: read 22 | actions: write 23 | outputs: 24 | should: ${{ steps.duplicate.outputs.should_skip }} 25 | steps: 26 | - name: Check Duplicate 27 | id: duplicate 28 | uses: fkirc/skip-duplicate-actions@v5 29 | with: 30 | github_token: "${{ github.token }}" 31 | paths_ignore: '["**.md", "**.mdx", "**.png", "**.jpg"]' 32 | do_not_skip: '["workflow_dispatch"]' 33 | continue-on-error: true 34 | 35 | build-and-push: 36 | needs: 37 | - skip 38 | if: needs.skip.outputs.should != 'true' 39 | 40 | runs-on: ubuntu-22.04 41 | steps: 42 | - name: Checkout code 43 | uses: actions/checkout@v2 44 | 45 | - name: Setup QEMU 46 | uses: docker/setup-qemu-action@v2 47 | with: 48 | image: tonistiigi/binfmt:qemu-v7.0.0 49 | platforms: "arm64" 50 | 51 | - name: Set up Docker Buildx 52 | uses: docker/setup-buildx-action@v2 53 | 54 | - name: Get Docker Metadata 55 | if: ${{ github.event_name != 'pull_request' }} 56 | id: metadata 57 | uses: docker/metadata-action@v4 58 | with: 59 | images: ${{ env.REPO }}/appilot 60 | 61 | - name: Login to Docker Hub 62 | uses: docker/login-action@v1 63 | with: 64 | username: ${{ secrets.CI_DOCKERHUB_USERNAME }} 65 | password: ${{ secrets.CI_DOCKERHUB_PASSWORD }} 66 | 67 | - name: Build and push image 68 | uses: docker/build-push-action@v4 69 | with: 70 | push: ${{ github.event_name != 'pull_request' }} 71 | context: . 72 | tags: ${{ steps.metadata.outputs.tags }} 73 | platforms: ${{ env.BUILD_PLATFORMS }} 74 | cache-from: | 75 | type=registry,ref=${{ env.REPO }}/build-cache:appilot 76 | cache-to: | 77 | ${{ github.event_name != 'pull_request' && format('type=registry,mode=max,oci-mediatypes=false,compression=gzip,ref={0}/build-cache:appilot,ignore-error=true', env.REPO) || '' }} 78 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.pyc 3 | *.pyo 4 | *.pyd 5 | 6 | .env 7 | .envrc 8 | .pyenv 9 | venv/ 10 | env/ 11 | .venv/ 12 | __venv__/ 13 | __pyenv__/ 14 | 15 | .ipynb_checkpoints/ 16 | .idea/ 17 | .vscode/ 18 | 19 | .pytest_cache/ 20 | __pycache__/ 21 | 22 | .DS_Store 23 | *.swp 24 | 25 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY . . 6 | RUN pip3 install -r requirements.txt 7 | 8 | 9 | CMD ["python3", "app.py"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | Copyright 2023 Seal, Inc. 179 | 180 | Licensed under the Apache License, Version 2.0 (the "License"); 181 | you may not use this file except in compliance with the License. 182 | You may obtain a copy of the License at 183 | 184 | http://www.apache.org/licenses/LICENSE-2.0 185 | 186 | Unless required by applicable law or agreed to in writing, software 187 | distributed under the License is distributed on an "AS IS" BASIS, 188 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 189 | See the License for the specific language governing permissions and 190 | limitations under the License. 191 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | #Sets the default shell for executing commands as /bin/bash and specifies command should be executed in a Bash shell. 2 | SHELL := /bin/bash 3 | 4 | # Color codes for terminal output 5 | COLOR_RESET=\033[0m 6 | COLOR_CYAN=\033[1;36m 7 | COLOR_GREEN=\033[1;32m 8 | 9 | # Defines the targets help, install, dev-install, and run as phony targets. Phony targets are targets that are not really the name of files that are to be built. Instead, they are treated as commands. 10 | .PHONY: help install run 11 | 12 | #sets the default goal to help when no target is specified on the command line. 13 | .DEFAULT_GOAL := help 14 | 15 | #Disables echoing of commands. The commands executed by Makefile will not be printed on the console during execution. 16 | .SILENT: 17 | 18 | #Defines a target named help. 19 | help: 20 | @echo "Please use 'make ' where is one of the following:" 21 | @echo " help Return this message with usage instructions." 22 | @echo " install Create a virtual environment and install the dependencies." 23 | @echo " run Run Appilot." 24 | 25 | #Defines a target named install. This target will create a virtual environment, upgrade pip and install the dependencies. 26 | install: create-venv upgrade-pip install-dependencies farewell 27 | 28 | #Defines a target named create-venv. This target will create a virtual environment in the .venv folder. 29 | create-venv: 30 | @echo -e "$(COLOR_CYAN)Creating virtual environment...$(COLOR_RESET)" && \ 31 | python3 -m venv .venv 32 | 33 | #Defines a target named upgrade-pip. This target will upgrade pip to the latest version. 34 | upgrade-pip: 35 | @echo -e "$(COLOR_CYAN)Upgrading pip...$(COLOR_RESET)" && \ 36 | source .venv/bin/activate && \ 37 | pip install --upgrade pip >> /dev/null 38 | 39 | #Defines a target named install-dependencies. This target will install the dependencies. 40 | install-dependencies: 41 | @echo -e "$(COLOR_CYAN)Installing dependencies...$(COLOR_RESET)" && \ 42 | source .venv/bin/activate && \ 43 | pip install -r requirements.txt >> /dev/null 44 | 45 | #Defines a target named farewell. This target will print a farewell message. 46 | farewell: 47 | @echo -e "$(COLOR_GREEN)All done!$(COLOR_RESET)" 48 | 49 | #Defines a target named lint. This targeet will do pylint. 50 | lint: 51 | git ls-files '*.py' | xargs pylint 52 | #Defines a target named run. This target will run Appilot. 53 | run: 54 | @echo -e "$(COLOR_CYAN)Running Appilot...$(COLOR_RESET)" && \ 55 | source .venv/bin/activate && \ 56 | python3 app.py 57 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Appilot 2 | 3 | Appilot['æpaɪlət] stands for application-pilot. 4 | It is an experimental project that helps you operate applications using GPT-like LLMs. 5 | 6 | ## Feature 7 | 8 | - Application management: deploy, upgrade, rollback, etc. 9 | - Environment management: clone, view topology, etc. 10 | - Diagnose: view logs, find flaws and provide fixes. 11 | - Safeguard: any action involving state changes requires human approval. 12 | - Hybrid infrastructure: works on kubernetes, VM, cloud, on-prem. 13 | - Multi language support: Choose the natural language you're comfortable with. 14 | - Pluggable backends: It supports multiple backends including [Walrus](https://github.com/seal-io/walrus) and [Kubernetes](https://kubernetes.io), and is extensible. 15 | 16 | ## Demo 17 | 18 | Chat to deploy llama-2 on AWS: 19 | 20 | https://github.com/seal-io/appilot/assets/5697937/0562fe29-8e97-42ba-bbf6-eaa5b5fefc41 21 | 22 | Other use cases: 23 | 24 | - [Deploy from source code](./examples/walrus_deploy_source_code.md) 25 | - [Manage environments](./examples/walrus_manage_environment.md) 26 | - [Manage applications in Kubernetes using helm charts](./examples/k8s_helm.md) 27 | - [Operating native Kubernetes resources](./examples/k8s_yaml.md) 28 | - [Diagnose and fix issues](./examples/k8s_diagnose.md) 29 | 30 | ## Quickstart 31 | 32 | > **prerequistes:** 33 | > 34 | > - Get OpenAI API key with access to the gpt-4 model. 35 | > - Install `python3` and `make`. 36 | > - Install [kubectl](https://kubernetes.io/docs/tasks/tools/) and [helm](https://helm.sh/docs/intro/install/). 37 | > - Have a running Kubernetes cluster. 38 | 39 | 1. Clone the repository. 40 | 41 | ``` 42 | git clone https://github.com/seal-io/appilot && cd appilot 43 | ``` 44 | 45 | 2. Run the following command to get the envfile. 46 | 47 | ``` 48 | cp .env.example .env 49 | ``` 50 | 51 | 3. Edit the `.env` file and fill in `OPENAI_API_KEY`. 52 | 53 | 4. Run the following command to install. It will create a venv and install required dependencies. 54 | 55 | ``` 56 | make install 57 | ``` 58 | 59 | 5. Run the following command to get started: 60 | 61 | ``` 62 | make run 63 | ``` 64 | 65 | 6. Ask Appilot to deploy an application, e.g.: 66 | 67 | ``` 68 | > Deploy a jupyterhub. 69 | ... 70 | > Get url of the jupyterhub. 71 | ``` 72 | 73 | ## Usage 74 | 75 | ### Configuration 76 | 77 | Appilot is configurable via environment variable or the envfile: 78 | | Parameter | Description | Default | 79 | |----------|------|---------------| 80 | | OPENAI_API_KEY | OpenAI API key, access to gpt-4 model is required. | "" | 81 | | OPENAI_API_BASE | Custom openAI API base. You can integrate with other LLMs as long as they serve in the same API style. | "" | 82 | | TOOLKITS | Toolkits to enable. Currently support Kubernetes and Walrus. Case insensitive. | "kubernetes" | 83 | | NATURAL_LANGUAGE | Natural language AI used to interacte with you. e.g., Chinese, Japanese, etc. | "English" | 84 | | SHOW_REASONING | Show AI reasoning steps. | True | 85 | | VERBOSE | Output in verbose mode. | False | 86 | | WALRUS_URL | URL of Walrus, valid when Walrus toolkit is enabled. | "" | 87 | | WALRUS_API_KEY | API key of Walrus, valid when Walrus toolkit is enabled. | "" | 88 | | WALRUS_SKIP_TLS_VERIFY | Skip TLS verification for WALRUS API. Use when testing with self-signed certificates. Valid when Walrus toolkit is enabled. | True | 89 | | WALRUS_DEFAULT_PROJECT | Project name for the default context, valid when Walrus toolkit is enabled. | "" | 90 | | WALRUS_DEFAULT_ENVIRONMENT | Environment name for the default context, valid when Walrus toolkit is enabled. | "" | 91 | 92 | ### Using Kubernetes Backend 93 | 94 | Follow steps in quickstart to run with Kubernetes backend. 95 | 96 | ### Using Walrus Backend 97 | 98 | > **Prerequisites:** [Install Walrus](https://seal-io.github.io/docs/quickstart). 99 | 100 | Walrus serves as the application management engine. It provides features like hybrid infrastructure support, environment management, etc. 101 | To enable Walrus backend, edit the envfile: 102 | 103 | 1. Set `TOOLKITS=walrus` 104 | 2. Fill in `OPENAI_API_KEY`, `WALRUS_URL` and `WALRUS_API_KEY` 105 | 106 | Then you can run Appilot to get started: 107 | 108 | ``` 109 | make run 110 | ``` 111 | 112 | ### Run with Docker 113 | 114 | You can run Appilot in docker container when using Walrus backend. 115 | 116 | > **Prerequisites:** Install `docker`. 117 | 118 | 1. Get an envfile by running the following command. 119 | 120 | ``` 121 | cp .env.example .env 122 | ``` 123 | 124 | 2. Configure the `.env` file. 125 | 126 | - Set `TOOLKITS=walrus` 127 | - Fill in `OPENAI_API_KEY`, `WALRUS_URL` and `WALRUS_API_KEY` 128 | 129 | 3. Run the following command: 130 | 131 | ``` 132 | docker run -it --env-file .env sealio/appilot:main 133 | ``` 134 | 135 | ### Using LLM alternatives to GPT-4 136 | 137 | You can use other LLMs as the reasoning engine of Appilot, as long as it serves inference APIs in openAI compatible way. 138 | 139 | 1. Configure the `.env` file, then set `OPENAI_API_BASE=https://your-api-base`. 140 | 141 | 2. Run Appilot as normal. 142 | 143 | ## How it works 144 | 145 | The following is the architecture diagram of Appilot: 146 | 147 | ![appilot-arch](https://github.com/seal-io/appilot/assets/5697937/914cb60d-60ab-4b4d-8661-82f89d85683b) 148 | 149 | ## License 150 | 151 | Copyright (c) 2023 [Seal, Inc.](https://seal.io) 152 | 153 | Licensed under the Apache License, Version 2.0 (the "License"); 154 | you may not use this file except in compliance with the License. 155 | You may obtain a copy of the License at [LICENSE](./LICENSE) file for details. 156 | 157 | Unless required by applicable law or agreed to in writing, software 158 | distributed under the License is distributed on an "AS IS" BASIS, 159 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 160 | See the License for the specific language governing permissions and 161 | limitations under the License. 162 | -------------------------------------------------------------------------------- /agent/__init__.py: -------------------------------------------------------------------------------- 1 | """Appilot agent.""" 2 | -------------------------------------------------------------------------------- /agent/agent.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Optional 2 | 3 | from langchain.tools import BaseTool 4 | from langchain.agents.agent import AgentExecutor 5 | from langchain.agents.conversational.base import ConversationalAgent 6 | from langchain.callbacks.base import BaseCallbackManager 7 | from langchain.chains.llm import LLMChain 8 | from langchain.memory import ReadOnlySharedMemory 9 | from langchain.schema.language_model import BaseLanguageModel 10 | 11 | from config import config 12 | from tools.human.tool import HumanTool 13 | from tools.reasoning.tool import ShowReasoningTool, HideReasoningTool 14 | from agent.output_parser import OutputParser 15 | from agent.prompt import ( 16 | AGENT_PROMPT_PREFIX, 17 | FORMAT_INSTRUCTIONS_TEMPLATE, 18 | ) 19 | 20 | 21 | def create_agent( 22 | llm: BaseLanguageModel, 23 | shared_memory: Optional[ReadOnlySharedMemory] = None, 24 | tools: list[BaseTool] = [], 25 | callback_manager: Optional[BaseCallbackManager] = None, 26 | verbose: bool = True, 27 | agent_executor_kwargs: Optional[Dict[str, Any]] = None, 28 | **kwargs: Dict[str, Any], 29 | ) -> AgentExecutor: 30 | """Instantiate planner for a given task.""" 31 | 32 | system_tools = [ 33 | HumanTool(), 34 | ShowReasoningTool(), 35 | HideReasoningTool(), 36 | ] 37 | 38 | tools.extend(system_tools) 39 | 40 | format_instructions = FORMAT_INSTRUCTIONS_TEMPLATE.format( 41 | natural_language=config.APPILOT_CONFIG.natural_language 42 | ) 43 | prompt = ConversationalAgent.create_prompt( 44 | tools, 45 | prefix=AGENT_PROMPT_PREFIX, 46 | format_instructions=format_instructions, 47 | ) 48 | 49 | agent = ConversationalAgent( 50 | llm_chain=LLMChain( 51 | llm=llm, prompt=prompt, verbose=config.APPILOT_CONFIG.verbose 52 | ), 53 | output_parser=OutputParser(), 54 | allowed_tools=[tool.name for tool in tools], 55 | **kwargs, 56 | ) 57 | 58 | return AgentExecutor.from_agent_and_tools( 59 | agent=agent, 60 | tools=tools, 61 | memory=shared_memory, 62 | callback_manager=callback_manager, 63 | verbose=verbose, 64 | **(agent_executor_kwargs or {}), 65 | ) 66 | -------------------------------------------------------------------------------- /agent/output_parser.py: -------------------------------------------------------------------------------- 1 | import re 2 | from typing import Union 3 | 4 | from langchain.agents.agent import AgentOutputParser 5 | from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS 6 | from langchain.schema import AgentAction, AgentFinish, OutputParserException 7 | 8 | 9 | class OutputParser(AgentOutputParser): 10 | """Output parser for the agent. It extends the convoAgent parser to support multiline observation.""" 11 | 12 | ai_prefix: str = "AI" 13 | """Prefix to use before AI output.""" 14 | 15 | def get_format_instructions(self) -> str: 16 | return FORMAT_INSTRUCTIONS 17 | 18 | def parse(self, text: str) -> Union[AgentAction, AgentFinish]: 19 | if f"{self.ai_prefix}:" in text: 20 | return AgentFinish( 21 | {"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text 22 | ) 23 | regex = r"Action: (.*?)[\n]*Action Input: (.*?)\nReason: .*" 24 | match = re.search(regex, text, re.DOTALL) 25 | if not match: 26 | raise OutputParserException( 27 | f"Could not parse LLM output: `{text}`" 28 | ) 29 | action = match.group(1) 30 | action_input = match.group(2) 31 | return AgentAction( 32 | action.strip(), action_input.strip(" ").strip('"'), text 33 | ) 34 | 35 | @property 36 | def _type(self) -> str: 37 | return "conversational" 38 | -------------------------------------------------------------------------------- /agent/prompt.py: -------------------------------------------------------------------------------- 1 | AGENT_PROMPT_PREFIX = """You are Appilot, an agent that assists with user queries for application management. 2 | Only take action to user queries that are relevant to application management. 3 | Example: 4 | ``` 5 | User query: tell me a joke 6 | Plan: Sorry, I specialize in application management, not comedy. 7 | ``` 8 | 9 | Notes: 10 | ID of service is a string that looks like a long number. 11 | Always use the construct_service tool before creating or updating a service. 12 | 13 | TOOLS: 14 | ------ 15 | 16 | You have access to the following tools:""" 17 | 18 | FORMAT_INSTRUCTIONS_TEMPLATE = """To use a tool, please use the following format: 19 | 20 | ``` 21 | Thought: Do I need to use a tool? Yes 22 | Action: the action to take, should be one of [{{tool_names}}] 23 | Action Input: the input to the action 24 | Reason: the reason you use this tool(in {natural_language}) 25 | Observation: the result of the action 26 | ``` 27 | 28 | When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format: 29 | 30 | ``` 31 | Thought: Do I need to use a tool? No 32 | Reason: the reason you do not need to use a tool 33 | {{ai_prefix}}: [your response here] 34 | ``` 35 | 36 | In the reponse, don't show project id and environment id. Unless user explicitly ask for it. 37 | Use markdown format for the response. If the data is suitable to show in table, use markdown table. 38 | Please print the response to human in {natural_language}. 39 | """ 40 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from command import cli 2 | 3 | 4 | cli.run() 5 | -------------------------------------------------------------------------------- /callbacks/handlers.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any, Dict, Optional, List 3 | from uuid import UUID 4 | import click 5 | import yaml 6 | 7 | from i18n import text 8 | from utils import utils 9 | from config import config 10 | 11 | from langchain.callbacks.base import BaseCallbackHandler 12 | from langchain.schema.output import LLMResult 13 | from langchain.schema.output import LLMResult 14 | from pygments import highlight 15 | from pygments.lexers import JsonLexer 16 | from pygments.lexers import YamlLexer 17 | from pygments.formatters import TerminalFormatter 18 | 19 | 20 | class HumanRejectedException(Exception): 21 | """Exception to raise when a person manually review and rejects a value.""" 22 | 23 | 24 | def remove_triple_backticks(text): 25 | if text.startswith("```") and text.endswith("```"): 26 | lines = text.split("\n") 27 | 28 | if len(lines) <= 1: 29 | return "" 30 | 31 | lines = lines[1:-1] 32 | 33 | result_text = "\n".join(lines) 34 | 35 | return result_text 36 | 37 | return text 38 | 39 | 40 | class ApprovalCallbackHandler(BaseCallbackHandler): 41 | """Callback for manual approval.""" 42 | 43 | raise_error: bool = True 44 | 45 | def on_tool_start( 46 | self, 47 | serialized: Dict[str, Any], 48 | input_str: str, 49 | *, 50 | run_id: UUID, 51 | parent_run_id: Optional[UUID] = None, 52 | **kwargs: Any, 53 | ) -> Any: 54 | if not self._approve(input_str, serialized): 55 | raise HumanRejectedException( 56 | f"Inputs {input_str} to tool {serialized} were rejected." 57 | ) 58 | 59 | def _approve(self, _input: str, serialized: Dict[str, Any]) -> bool: 60 | message = text.get("ask_approval") 61 | _input = remove_triple_backticks(_input.strip()) 62 | 63 | is_json = False 64 | try: 65 | json_input = json.loads(_input) 66 | except Exception as e: 67 | # If the input is not a valid JSON, just pass 68 | pass 69 | else: 70 | # Serialize the JSON input with colors 71 | is_json = True 72 | json_string = json.dumps(json_input, indent=4) 73 | _input = highlight(json_string, JsonLexer(), TerminalFormatter()) 74 | 75 | if not is_json: 76 | # Now try YAML 77 | try: 78 | yaml.safe_load_all(_input) 79 | except Exception as e: 80 | # If the input is not a valid YAML, just pass 81 | pass 82 | else: 83 | # Serialize the YAML input with colors 84 | _input = highlight(_input, YamlLexer(), TerminalFormatter()) 85 | 86 | return click.confirm( 87 | message.format(input=_input, tool_name=serialized["name"]), 88 | default=False, 89 | prompt_suffix="", 90 | ) 91 | 92 | 93 | class PrintReasoningCallbackHandler(BaseCallbackHandler): 94 | def on_llm_end( 95 | self, 96 | response: LLMResult, 97 | *, 98 | run_id: UUID, 99 | parent_run_id: Optional[UUID] = None, 100 | tags: Optional[List[str]] = None, 101 | **kwargs: Any, 102 | ) -> None: 103 | if not config.APPILOT_CONFIG.show_reasoning: 104 | return 105 | 106 | generation_text = response.generations[0][0].text 107 | lines = generation_text.splitlines() 108 | reason_prompt_prefix = "Reason:" 109 | for line in reversed(lines): 110 | if line.startswith(reason_prompt_prefix): 111 | reason_text = line.lstrip("Reason:").strip() 112 | utils.print_ai_reasoning(reason_text) 113 | break 114 | """Print AI reasoning.""" 115 | -------------------------------------------------------------------------------- /command/__init__.py: -------------------------------------------------------------------------------- 1 | """Appilot commands.""" 2 | -------------------------------------------------------------------------------- /command/cli.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from typing import Any 3 | import readline 4 | 5 | 6 | from langchain.chat_models import ChatOpenAI 7 | from langchain.memory import ConversationBufferMemory 8 | import colorama 9 | 10 | from callbacks import handlers 11 | from config import config 12 | from i18n import text 13 | from utils import utils 14 | from agent.agent import create_agent 15 | from walrus.toolkit import WalrusToolKit 16 | from k8s.toolkit import KubernetesToolKit 17 | 18 | last_error = None 19 | 20 | 21 | def setup_agent() -> Any: 22 | config.init() 23 | colorama.init() 24 | 25 | llm = ChatOpenAI( 26 | model_name="gpt-4", 27 | temperature=0, 28 | callbacks=[handlers.PrintReasoningCallbackHandler()], 29 | ) 30 | 31 | text.init_system_messages(llm) 32 | 33 | memory = ConversationBufferMemory(memory_key="chat_history") 34 | 35 | enabled_toolkits = [ 36 | toolkit.lower() for toolkit in config.APPILOT_CONFIG.toolkits 37 | ] 38 | 39 | tools = [] 40 | if "kubernetes" in enabled_toolkits: 41 | kubernetes_toolkit = KubernetesToolKit(llm=llm) 42 | tools.extend(kubernetes_toolkit.get_tools()) 43 | elif "walrus" in enabled_toolkits: 44 | walrus_toolkit = WalrusToolKit(llm=llm) 45 | tools.extend(walrus_toolkit.get_tools()) 46 | else: 47 | print(text.get("enable_no_toolkit")) 48 | sys.exit(1) 49 | 50 | return create_agent( 51 | llm, 52 | shared_memory=memory, 53 | tools=tools, 54 | verbose=config.APPILOT_CONFIG.verbose, 55 | ) 56 | 57 | 58 | def run(): 59 | appilot_agent = setup_agent() 60 | 61 | print(text.get("welcome")) 62 | user_query = None 63 | while True: 64 | user_query = input(">") 65 | if utils.is_inform_sent(): 66 | continue 67 | elif user_query == "exit": 68 | break 69 | elif user_query == "appilot_log": 70 | print_last_error() 71 | continue 72 | elif user_query.startswith("#"): 73 | continue 74 | elif not user_query.strip(): 75 | continue 76 | 77 | try: 78 | result = appilot_agent.run(user_query) 79 | except handlers.HumanRejectedException as he: 80 | utils.print_rejected_message() 81 | continue 82 | except Exception as e: 83 | handle_exception(e) 84 | continue 85 | 86 | utils.print_ai_response(result) 87 | 88 | 89 | def handle_exception(e): 90 | global last_error 91 | print(text.get("response_prefix"), end="") 92 | print(text.get("error_occur_message")) 93 | last_error = e 94 | 95 | 96 | def print_last_error(): 97 | if last_error is None: 98 | print(text.get("response_prefix"), end="") 99 | print(text.get("no_error_message")) 100 | else: 101 | print(last_error) 102 | -------------------------------------------------------------------------------- /config/__init__.py: -------------------------------------------------------------------------------- 1 | """Global configuration of Appilot.""" 2 | -------------------------------------------------------------------------------- /config/config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import urllib3 4 | from utils import utils 5 | 6 | from pydantic import BaseModel 7 | from dotenv import load_dotenv 8 | 9 | 10 | class Config(BaseModel): 11 | openai_api_key: str 12 | openai_api_base: str 13 | natural_language: str 14 | toolkits: list[str] 15 | show_reasoning: bool 16 | verbose: bool 17 | 18 | 19 | APPILOT_CONFIG: Config 20 | 21 | 22 | def init(): 23 | load_dotenv() 24 | openai_api_base = utils.get_env("OPENAI_API_BASE") 25 | openai_api_key = utils.get_env("OPENAI_API_KEY") 26 | natural_language = utils.get_env("NATURAL_LANGUAGE", "English") 27 | toolkits = utils.get_env_list("TOOLKITS") 28 | show_reasoning = utils.get_env_bool("SHOW_REASONING", True) 29 | verbose = utils.get_env_bool("VERBOSE", False) 30 | 31 | if not openai_api_key: 32 | raise Exception("OPENAI_API_KEY is not set") 33 | 34 | if not verbose: 35 | logging.basicConfig(level=logging.CRITICAL) 36 | # Disable child loggers of urllib3, e.g. urllib3.connectionpool 37 | logging.getLogger(urllib3.__package__).propagate = False 38 | 39 | global APPILOT_CONFIG 40 | APPILOT_CONFIG = Config( 41 | openai_api_base=openai_api_base, 42 | openai_api_key=openai_api_key, 43 | natural_language=natural_language, 44 | toolkits=toolkits, 45 | show_reasoning=show_reasoning, 46 | verbose=verbose, 47 | ) 48 | 49 | 50 | def set_verbose(verbose: bool): 51 | global APPILOT_CONFIG 52 | APPILOT_CONFIG.verbose = verbose 53 | 54 | 55 | def set_show_reasoning(show_reasoning: bool): 56 | global APPILOT_CONFIG 57 | APPILOT_CONFIG.show_reasoning = show_reasoning 58 | -------------------------------------------------------------------------------- /examples/k8s_diagnose.md: -------------------------------------------------------------------------------- 1 | https://github.com/seal-io/appilot/assets/5697937/f8188f94-e540-4bcf-930a-fa3abe3edda8 2 | -------------------------------------------------------------------------------- /examples/k8s_helm.md: -------------------------------------------------------------------------------- 1 | https://github.com/seal-io/appilot/assets/5697937/9f048928-58c9-4e36-adc5-9c4e2e7a4d63 2 | -------------------------------------------------------------------------------- /examples/k8s_yaml.md: -------------------------------------------------------------------------------- 1 | https://github.com/seal-io/appilot/assets/5697937/e2cd0601-ad3c-4e2f-ba05-47d004bff771 2 | -------------------------------------------------------------------------------- /examples/walrus_deploy_source_code.md: -------------------------------------------------------------------------------- 1 | https://github.com/seal-io/appilot/assets/5697937/ec433fa1-d4f6-4d73-88aa-d1d4895e52b3 2 | -------------------------------------------------------------------------------- /examples/walrus_manage_environment.md: -------------------------------------------------------------------------------- 1 | https://github.com/seal-io/appilot/assets/5697937/762008cd-d8f1-425e-83ec-4653bf6a4777 2 | -------------------------------------------------------------------------------- /i18n/__init__.py: -------------------------------------------------------------------------------- 1 | """For internationalization. Most of the job is done by prompts. For the rest system messages, we also use AI to translate them.""" 2 | -------------------------------------------------------------------------------- /i18n/text.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from config import config 4 | 5 | from langchain.schema.language_model import BaseLanguageModel 6 | 7 | prompt = """ 8 | Translate the following json map to {language}. Keep the keys unchanged. 9 | {messages} 10 | 11 | RESULT: 12 | 13 | """ 14 | 15 | system_messages = { 16 | "welcome": "Appilot: What can I help?", 17 | "ai_reasoning": "Appilot reasoning: ", 18 | "response_prefix": "Appilot: ", 19 | "inform_prefix": "Appilot[inform]: ", 20 | "error_occur_message": "An internal error occurred. Enter 'appilot_log' if you want to see the details.", 21 | "rejected_message": "The action is rejected.", 22 | "no_error_message": "No error occurred.", 23 | "resource_log_prefix": "Here's the log:", 24 | "watch_service_note": "( Enter to halt )", 25 | "watch_service_ending": "Halted.", 26 | "show_graph_message": "The dependency graph is shown to you.", 27 | "inform_ready_start": "Start watching. Will inform when it's ready.", 28 | "service_ready_message": "Service {} is Ready.", 29 | "enable_no_toolkit": "No toolkit available. Please enable at least one toolkit.", 30 | "ask_approval": """ 31 | The following action requires approval: 32 | 33 | Input: 34 | {input} 35 | 36 | Action: 37 | {tool_name} 38 | 39 | Do you approve the above action? """, 40 | } 41 | 42 | 43 | def init_system_messages(llm: BaseLanguageModel): 44 | language = config.APPILOT_CONFIG.natural_language 45 | if language.lower() in ("en", "english"): 46 | return 47 | 48 | global system_messages 49 | system_messages_string = json.dumps(system_messages, ensure_ascii=False) 50 | result = llm.predict( 51 | prompt.format(language=language, messages=system_messages_string) 52 | ) 53 | translated = json.loads(result) 54 | system_messages = translated 55 | 56 | 57 | def get(key): 58 | return system_messages[key] 59 | -------------------------------------------------------------------------------- /k8s/context.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from kubernetes import client 3 | 4 | 5 | API_RESOURCES: list[dict[Any | str, Any]] 6 | 7 | 8 | class GroupVersionKind: 9 | """GroupVersionKind.""" 10 | 11 | def __init__(self, groupVersion: str, kind: str): 12 | self.groupVersion = groupVersion 13 | self.kind = kind 14 | 15 | 16 | def init_api_resources_cache(): 17 | """Get available api resources. Similar to kubectl api-resources.""" 18 | 19 | api_client = client.ApiClient() 20 | api_resource_list = api_client.call_api( 21 | "/api/v1", 22 | "GET", 23 | response_type="object", 24 | _return_http_data_only=True, 25 | ) 26 | api_resources = [ 27 | {**resource, "groupVersion": "v1"} 28 | for resource in api_resource_list["resources"] 29 | if "storageVersionHash" in resource 30 | ] 31 | 32 | api_group_list = api_client.call_api( 33 | "/apis", 34 | "GET", 35 | response_type="object", 36 | _return_http_data_only=True, 37 | ) 38 | 39 | for api_group in api_group_list["groups"]: 40 | api_resource_list = api_client.call_api( 41 | "/apis/" + api_group["preferredVersion"]["groupVersion"], 42 | "GET", 43 | response_type="object", 44 | _return_http_data_only=True, 45 | ) 46 | api_resources.extend( 47 | [ 48 | { 49 | **resource, 50 | "groupVersion": api_group["preferredVersion"][ 51 | "groupVersion" 52 | ], 53 | } 54 | for resource in api_resource_list["resources"] 55 | if "storageVersionHash" in resource and resource["namespaced"] 56 | ] 57 | ) 58 | global API_RESOURCES 59 | API_RESOURCES = api_resources 60 | 61 | 62 | def get_api_resources(): 63 | return API_RESOURCES 64 | 65 | 66 | def search_api_resource(resource_kind: str) -> GroupVersionKind: 67 | api_resources = get_api_resources() 68 | matching_resources = [ 69 | api_resource 70 | for api_resource in api_resources 71 | if str(api_resource["name"]).lower() == resource_kind 72 | or str(api_resource["singularName"]).lower() == resource_kind 73 | or str(api_resource["kind"]).lower() == resource_kind 74 | or ( 75 | "shortNames" in api_resource 76 | and resource_kind in api_resource["shortNames"] 77 | ) 78 | ] 79 | 80 | if matching_resources: 81 | return GroupVersionKind( 82 | matching_resources[0]["groupVersion"], 83 | matching_resources[0]["kind"], 84 | ) 85 | 86 | raise Exception(f"Resource {resource_kind} not found.") 87 | -------------------------------------------------------------------------------- /k8s/toolkit.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import subprocess 3 | import sys 4 | from langchain.schema.language_model import BaseLanguageModel 5 | from kubernetes import config, client 6 | 7 | from k8s import context 8 | from k8s.tools.helm.tool import ( 9 | DeleteApplicationTool, 10 | DeployApplicationTool, 11 | GenerateUpgradeApplicationValuesTool, 12 | GetApplicationAccessEndpointsTool, 13 | GetApplicationDetailTool, 14 | ListApplicationsTool, 15 | SearchChartTool, 16 | UpgradeApplicationTool, 17 | ) 18 | from k8s.tools.manage_resource.tool import ( 19 | ApplyResourcesTool, 20 | ConstructResourceForUpdateTool, 21 | ConstructResourceTool, 22 | DeleteResourceTool, 23 | DescribePodTool, 24 | GetIngressAccessEndpointsTool, 25 | GetPodLogsTool, 26 | GetResourceDetailTool, 27 | GetResourceYamlTool, 28 | GetServiceAccessEndpointsTool, 29 | ListResourcesForInfoTool, 30 | ListResourcesTool, 31 | WatchResourcesTool, 32 | ) 33 | from walrus.tools.general.tools import BrowseURLTool 34 | 35 | logger = logging.getLogger(__name__) 36 | 37 | 38 | def command_installed(commands: list[str]): 39 | try: 40 | result = subprocess.run( 41 | commands, 42 | stdout=subprocess.PIPE, 43 | stderr=subprocess.PIPE, 44 | text=True, 45 | ) 46 | 47 | if result.returncode == 0: 48 | return True 49 | else: 50 | return False 51 | except FileNotFoundError: 52 | return False 53 | 54 | 55 | class KubernetesToolKit: 56 | """Kubernetes toolkit.""" 57 | 58 | llm: BaseLanguageModel 59 | 60 | def __init__(self, llm: BaseLanguageModel): 61 | self.llm = llm 62 | config.load_kube_config() 63 | self.precheck() 64 | context.init_api_resources_cache() 65 | 66 | def precheck(self): 67 | if not command_installed(["kubectl", "version", "--client"]): 68 | print("Precheck failed: kubectl not found.") 69 | sys.exit(1) 70 | 71 | if not command_installed(["helm", "version"]): 72 | print("Precheck failed: helm not found.") 73 | sys.exit(1) 74 | 75 | try: 76 | client.VersionApi().get_code(_request_timeout=2) 77 | except Exception as e: 78 | logger.debug("Error connecting to Kubernetes cluster: {e}") 79 | print("Precheck failed: Kubernetes cluster is not available.") 80 | sys.exit(1) 81 | 82 | def get_tools(self): 83 | llm = self.llm 84 | tools = [ 85 | ListResourcesTool(return_direct=True), 86 | ListResourcesForInfoTool(), 87 | GetResourceDetailTool(), 88 | GetResourceYamlTool(return_direct=True), 89 | GetServiceAccessEndpointsTool(), 90 | GetIngressAccessEndpointsTool(), 91 | GetPodLogsTool(), 92 | DescribePodTool(), 93 | WatchResourcesTool(return_direct=True), 94 | DeleteResourceTool(), 95 | ConstructResourceTool(llm=llm), 96 | ConstructResourceForUpdateTool(llm=llm), 97 | ApplyResourcesTool(), 98 | SearchChartTool(llm=llm), 99 | DeployApplicationTool(), 100 | GenerateUpgradeApplicationValuesTool(llm=llm), 101 | UpgradeApplicationTool(), 102 | ListApplicationsTool(), 103 | GetApplicationDetailTool(), 104 | GetApplicationAccessEndpointsTool(), 105 | BrowseURLTool(), 106 | DeleteApplicationTool(), 107 | ] 108 | return tools 109 | -------------------------------------------------------------------------------- /k8s/tools/common/endpoint.py: -------------------------------------------------------------------------------- 1 | from kubernetes import config, client 2 | 3 | 4 | def get_service_endpoints(service): 5 | service_endpoints = [] 6 | if service.spec.type == "NodePort": 7 | service_endpoints.extend(get_nodeport_service_endpoints(service)) 8 | elif service.spec.type == "LoadBalancer": 9 | service_endpoints.extend(get_loadbalancer_service_endpoints(service)) 10 | 11 | return service_endpoints 12 | 13 | 14 | def get_loadbalancer_service_endpoints(service): 15 | if ( 16 | service.status.load_balancer 17 | and "ingress" in service.status.load_balancer 18 | ): 19 | endpoints = [] 20 | for ingress in service.status.load_balancer.ingress: 21 | if ingress.hostname != "": 22 | endpoints.append( 23 | { 24 | "name": service.name, 25 | "endpoint": ingress.hostname, 26 | } 27 | ) 28 | elif ingress.ip != "": 29 | endpoints.append( 30 | { 31 | "name": service.name, 32 | "endpoint": ingress.ip, 33 | } 34 | ) 35 | return endpoints 36 | else: 37 | return get_nodeport_service_endpoints(service) 38 | 39 | 40 | def get_nodeport_service_endpoints(service): 41 | endpoints = [] 42 | node_ip = get_node_ip() 43 | for port in service.spec.ports: 44 | endpoints.append( 45 | { 46 | "name": f"{service.metadata.name}/{port.name}", 47 | "endpoint": f"{node_ip}:{port.nodePort}", 48 | } 49 | ) 50 | return endpoints 51 | 52 | 53 | def get_ingress_endpoints(ingress): 54 | ingress_endpoints = [] 55 | host = "" 56 | if ingress.status.load_balancer: 57 | for ing in ingress.status.load_balancer.ingress: 58 | if ing.hostname != "": 59 | host = ing.hostname 60 | else: 61 | host = ing.ip 62 | 63 | tlsHosts = [] 64 | if ingress.spec.tls: 65 | for tls in ingress.spec.tls: 66 | tlsHosts.extend(tls.hosts) 67 | 68 | if ingress.spec.rules: 69 | for rule in ingress.spec.rules: 70 | scheme = "http" 71 | if rule.host in tlsHosts: 72 | scheme = "https" 73 | if rule.host != "": 74 | host = rule.host 75 | if host == "": 76 | continue 77 | if rule.http: 78 | for path in rule.http.paths: 79 | if path.path != "": 80 | ingress_endpoints.append( 81 | { 82 | "name": f"{ingress.name}/{path.path}", 83 | "endpoint": f"{scheme}://{host}{path.path}", 84 | } 85 | ) 86 | else: 87 | ingress_endpoints.append( 88 | { 89 | "name": f"{ingress.name}", 90 | "endpoint": f"{scheme}://{host}", 91 | } 92 | ) 93 | 94 | return ingress_endpoints 95 | 96 | 97 | def get_node_ip() -> str: 98 | config.load_kube_config() 99 | core_v1 = client.CoreV1Api() 100 | nodes = core_v1.list_node() 101 | if nodes.items: 102 | node = nodes.items[0] 103 | for address in node.status.addresses: 104 | if address.type == "ExternalIP": 105 | return address.address 106 | elif address.type == "InternalIP": 107 | ip = address.address 108 | return ip 109 | 110 | raise Exception("No node found.") 111 | -------------------------------------------------------------------------------- /k8s/tools/helm/__init__.py: -------------------------------------------------------------------------------- 1 | """Tool for managing Helm charts & releases.""" 2 | -------------------------------------------------------------------------------- /k8s/tools/helm/prompt.py: -------------------------------------------------------------------------------- 1 | CONSTRUCT_HELM_OVERRIDED_VALUES = """ 2 | You will be provided the default values of a helm chart, and a user query describing a deployment task. 3 | 4 | Output overrided values(in yaml) for the helm installation to satisfy the user query. 5 | You don't need to include values that are the same as the default values. 6 | If no values are overrided, output an empty yaml. 7 | 8 | 9 | USER QUERY: 10 | {query} 11 | 12 | DEFAULT VALUES: 13 | {default_values} 14 | 15 | OVERRIDED VALUES: 16 | """ 17 | 18 | CONSTRUCT_HELM_UPGRADE_VALUES = """ 19 | You will be provided the default values of a helm chart, previous values of a helm release, and a user query describing an upgrade task. 20 | 21 | Output values(in yaml) used for the helm upgrade to satisfy the user query. 22 | Keep the previous values in the output unlesss it need to be changed according to the user query. 23 | If you are not sure about some values, use the default. 24 | 25 | USER QUERY: 26 | {query} 27 | 28 | DEFAULT VALUES: 29 | {default_values} 30 | 31 | PREVIOUS VALUES: 32 | {previous_values} 33 | 34 | VALUES FOR UPGRADE: 35 | """ 36 | -------------------------------------------------------------------------------- /k8s/tools/helm/tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import subprocess 5 | from langchain import LLMChain, PromptTemplate 6 | from langchain.agents.tools import BaseTool 7 | import requests 8 | import yaml 9 | from config import config 10 | from langchain.schema.language_model import BaseLanguageModel 11 | from k8s.tools.common.endpoint import ( 12 | get_ingress_endpoints, 13 | get_service_endpoints, 14 | ) 15 | from k8s.tools.helm.prompt import ( 16 | CONSTRUCT_HELM_OVERRIDED_VALUES, 17 | CONSTRUCT_HELM_UPGRADE_VALUES, 18 | ) 19 | from tools.base.tools import RequireApprovalTool 20 | from kubernetes import config, dynamic 21 | from kubernetes.client import api_client 22 | 23 | logger = logging.getLogger(__name__) 24 | 25 | 26 | def trim_default_values(input_string): 27 | """Trim default values of a helm chart to mitigate LLM rate limit. As a tradeoff some information is lost.""" 28 | lines = input_string.splitlines() 29 | first_300_lines = "\n".join(lines[:300]) 30 | 31 | return first_300_lines 32 | 33 | 34 | def get_chart_default_values(chart_url: str): 35 | """Get default values(in yaml) of a helm chart.""" 36 | helm_show_values_command = f"helm show values {chart_url}" 37 | 38 | try: 39 | output = subprocess.check_output( 40 | helm_show_values_command, shell=True, universal_newlines=True 41 | ) 42 | 43 | return trim_default_values(output) 44 | except subprocess.CalledProcessError as e: 45 | return f"Helm show values failed: {e}" 46 | except Exception as e: 47 | return f"Error: {e}" 48 | 49 | 50 | def get_helm_release_values(namespace: str, name: str): 51 | """Get values of a helm release.""" 52 | helm_get_values_command = f"helm get values {name} -o yaml" 53 | 54 | if namespace: 55 | helm_get_values_command += f" --namespace {namespace}" 56 | 57 | try: 58 | output = subprocess.check_output( 59 | helm_get_values_command, shell=True, universal_newlines=True 60 | ) 61 | 62 | return output 63 | except subprocess.CalledProcessError as e: 64 | return f"Helm get values failed: {e}" 65 | except Exception as e: 66 | return f"Error: {e}" 67 | 68 | 69 | def searchChart(keyword: str): 70 | """Search helm charts in Artifact Hub. Returns a matching chart object.""" 71 | params = { 72 | "facets": "false", 73 | "verified_publisher": "true", 74 | "kind": 0, 75 | "sort": "relevance", 76 | # "org": ["bitnami"], 77 | "ts_query_web": keyword, 78 | } 79 | response = requests.get( 80 | "https://artifacthub.io/api/v1/packages/search", params=params 81 | ) 82 | if response.status_code >= 400: 83 | raise Exception("failed to search helm charts: " + response.text) 84 | 85 | data = response.json() 86 | if len(data.get("packages")) == 0: 87 | raise Exception("no matching helm chart found") 88 | 89 | chart_name = data.get("packages")[0].get("name") 90 | repository_name = data.get("packages")[0].get("repository").get("name") 91 | version = data.get("packages")[0].get("version") 92 | 93 | response = requests.get( 94 | f"https://artifacthub.io/api/v1/packages/helm/{repository_name}/{chart_name}/{version}", 95 | ) 96 | chart_raw = response.json() 97 | 98 | chart = {} 99 | chart["name"] = chart_raw.get("name") 100 | chart["version"] = chart_raw.get("version") 101 | chart["description"] = chart_raw.get("description") 102 | chart["content_url"] = chart_raw.get("content_url") 103 | return chart 104 | 105 | 106 | class SearchChartTool(BaseTool): 107 | """Tool to search helm charts in Artifact Hub. Returns a matching chart object.""" 108 | 109 | name = "search_helm_chart" 110 | description = ( 111 | "Search helm charts in Artifact Hub. " 112 | 'Input should be a json string with two keys, "user_query", "keyword".' 113 | '"user_query" is the description of the deployment task.' 114 | '"keyword" is the keyword to search helm charts.' 115 | "Output a matching chart and overrided values for the helm deployment." 116 | ) 117 | llm: BaseLanguageModel 118 | 119 | def _run(self, text: str) -> str: 120 | input = json.loads(text) 121 | query = input.get("user_query") 122 | keyword = input.get("keyword") 123 | chart = searchChart(keyword) 124 | 125 | default_values = get_chart_default_values(chart.get("content_url")) 126 | 127 | prompt = PromptTemplate( 128 | template=CONSTRUCT_HELM_OVERRIDED_VALUES, 129 | input_variables=["query"], 130 | partial_variables={"default_values": default_values}, 131 | ) 132 | chain = LLMChain(llm=self.llm, prompt=prompt) 133 | overrided_values = chain.run(query).strip() 134 | chart["overrided_values"] = yaml.safe_load(overrided_values) 135 | return json.dumps(chart) 136 | 137 | 138 | class DeployApplicationTool(RequireApprovalTool): 139 | """Tool to deploy an application using helm charts.""" 140 | 141 | name = "deploy_application" 142 | description = ( 143 | "Deploy an application using helm charts." 144 | 'Input should be a json string with four keys, "namespace", "name", "chart_url", "values".' 145 | '"namespace" is the namespace to deploy the application.' 146 | '"name" is the name of the application, generate a reasonable one if not specified.' 147 | '"chart_url" is the url to download the helm chart.' 148 | '"values" is overrided values for the helm installation to satisfy user query.' 149 | ) 150 | 151 | def _run(self, text: str) -> str: 152 | input = json.loads(text) 153 | chart_url = input.get("chart_url") 154 | name = input.get("name") 155 | namespace = input.get("namespace") 156 | if namespace == "": 157 | namespace = "default" 158 | 159 | helm_install_command = ( 160 | f"helm install {name} {chart_url} --namespace {namespace}" 161 | ) 162 | 163 | values = input.get("values") 164 | if values: 165 | values = {} 166 | 167 | # add chart_url to values as metadata until https://github.com/helm/helm/issues/4256 is resolved. 168 | if "global" in values: 169 | values["global"]["metadata_chart_url"] = chart_url 170 | else: 171 | values["global"] = {"metadata_chart_url": chart_url} 172 | 173 | output_directory = "/tmp/appilot" 174 | if not os.path.exists(output_directory): 175 | os.makedirs(output_directory) 176 | file_path = f"{output_directory}/values.yaml" 177 | with open(file_path, "w") as file: 178 | yaml.dump(input.get("values"), file) 179 | helm_install_command += f" -f {file_path}" 180 | 181 | try: 182 | output = subprocess.check_output( 183 | helm_install_command, shell=True, universal_newlines=True 184 | ) 185 | 186 | logger.debug(f"helm install output: {output}") 187 | return f"application {name} is deployed." 188 | except subprocess.CalledProcessError as e: 189 | return f"Helm install failed: {e}" 190 | except Exception as e: 191 | return f"Error: {e}" 192 | 193 | 194 | class GenerateUpgradeApplicationValuesTool(BaseTool): 195 | """Tool to generate values for upgrading an application.""" 196 | 197 | name = "generate_upgrade_application_values" 198 | description = ( 199 | "Generate values for upgrading an application deployed by helm chart." 200 | 'Input should be a json string with three keys, "namespace", "name", "user_query".' 201 | '"namespace" is the namespace of the application.' 202 | '"name" is the name of the application.' 203 | '"user_query" is the description of the deployment task.' 204 | "Output overrided values for the helm upgrade." 205 | ) 206 | llm: BaseLanguageModel 207 | 208 | def _run(self, text: str) -> str: 209 | input = json.loads(text) 210 | namespace = input.get("namespace") 211 | name = input.get("name") 212 | query = input.get("user_query") 213 | 214 | if namespace == "": 215 | namespace = "default" 216 | 217 | previous_values = get_helm_release_values(namespace, name) 218 | 219 | chart_url = ( 220 | yaml.safe_load(previous_values) 221 | .get("global", {}) 222 | .get("metadata_chart_url") 223 | ) 224 | if not chart_url: 225 | return "Missing chart_url metadata in previous release" 226 | 227 | default_values = get_chart_default_values(chart_url) 228 | 229 | prompt = PromptTemplate( 230 | template=CONSTRUCT_HELM_UPGRADE_VALUES, 231 | input_variables=["query"], 232 | partial_variables={ 233 | "default_values": default_values, 234 | "previous_values": previous_values, 235 | }, 236 | ) 237 | chain = LLMChain(llm=self.llm, prompt=prompt) 238 | overrided_values_yaml = chain.run(query).strip() 239 | 240 | overrided_values = yaml.safe_load(overrided_values_yaml) 241 | if ( 242 | "global" in overrided_values 243 | and "metadata_chart_url" in overrided_values["global"] 244 | ): 245 | del overrided_values["global"]["metadata_chart_url"] 246 | 247 | return json.dumps(overrided_values) 248 | 249 | 250 | class UpgradeApplicationTool(RequireApprovalTool): 251 | """Tool to upgrade an application.""" 252 | 253 | name = "upgrade_application" 254 | description = ( 255 | "Upgrade an application." 256 | 'Input should be a json string with three keys, "namespace", "name", "values".' 257 | '"namespace" is the namespace to deploy the application.' 258 | '"name" is the name of the application, generate a reasonable one if not specified.' 259 | '"values" is overrided values for helm upgrade to satisfy user query.' 260 | ) 261 | 262 | def _run(self, text: str) -> str: 263 | input = json.loads(text) 264 | namespace = input.get("namespace") 265 | name = input.get("name") 266 | values = input.get("values") 267 | 268 | previous_values = get_helm_release_values(namespace, name) 269 | 270 | chart_url = ( 271 | yaml.safe_load(previous_values) 272 | .get("global", {}) 273 | .get("metadata_chart_url") 274 | ) 275 | if not chart_url: 276 | return "Missing chart_url metadata in previous release" 277 | 278 | if namespace == "": 279 | namespace = "default" 280 | 281 | helm_upgrade_command = ( 282 | f"helm upgrade {name} {chart_url} --namespace {namespace}" 283 | ) 284 | 285 | if values: 286 | # add chart_url to values as metadata until https://github.com/helm/helm/issues/4256 is resolved. 287 | if "global" in values: 288 | values["global"]["metadata_chart_url"] = chart_url 289 | else: 290 | values["global"] = {"metadata_chart_url": chart_url} 291 | 292 | output_directory = "/tmp/appilot" 293 | if not os.path.exists(output_directory): 294 | os.makedirs(output_directory) 295 | file_path = f"{output_directory}/values.yaml" 296 | with open(file_path, "w") as file: 297 | yaml.dump(values, file) 298 | helm_upgrade_command += f" -f {file_path}" 299 | 300 | try: 301 | output = subprocess.check_output( 302 | helm_upgrade_command, shell=True, universal_newlines=True 303 | ) 304 | 305 | logger.debug(f"helm upgrade output: {output}") 306 | return f"application {name} is upgraded." 307 | except subprocess.CalledProcessError as e: 308 | return f"Helm upgrade failed: {e}" 309 | except Exception as e: 310 | return f"Error: {e}" 311 | 312 | 313 | def get_pod_ready_status_of_helm_release(name: str, namespace: str) -> str: 314 | if namespace == "": 315 | namespace = "default" 316 | 317 | helm_list_command = f"helm get manifest {name} --namespace {namespace}" 318 | 319 | try: 320 | output = subprocess.check_output( 321 | helm_list_command, shell=True, universal_newlines=True 322 | ) 323 | except subprocess.CalledProcessError as e: 324 | return f"Failed to get helm manifest: {e}" 325 | except Exception as e: 326 | return f"Error: {e}" 327 | 328 | resource_manifests = yaml.safe_load_all(output) 329 | 330 | dyn_client = dynamic.DynamicClient( 331 | api_client.ApiClient(configuration=config.load_kube_config()) 332 | ) 333 | 334 | replicas = 0 335 | ready_replicas = 0 336 | for resource_manifest in resource_manifests: 337 | if not resource_manifest: 338 | continue 339 | resource_kind = resource_manifest.get("kind") 340 | if resource_kind not in ["Deployment", "StatefulSet", "DaemonSet"]: 341 | continue 342 | 343 | resoruce_client = dyn_client.resources.get( 344 | api_version=resource_manifest.get("apiVersion"), 345 | kind=resource_kind, 346 | ) 347 | resource = resoruce_client.get( 348 | name=resource_manifest.get("metadata").get("name"), 349 | namespace=namespace, 350 | ) 351 | 352 | if resource_kind == "Deployment": 353 | replicas += resource.get("spec").get("replicas", 0) 354 | ready_replicas += resource.get("status").get("readyReplicas", 0) 355 | elif resource_kind == "StatefulSet": 356 | replicas += resource.get("spec").get("replicas", 0) 357 | ready_replicas += resource.get("status").get("readyReplicas", 0) 358 | elif resource_kind == "DaemonSet": 359 | replicas += resource.get("status").get("desiredNumberScheduled", 0) 360 | ready_replicas += resource.get("status").get("numberReady", 0) 361 | 362 | return f"{ready_replicas}/{replicas}" 363 | 364 | 365 | def get_resource_pods(dyn_client, namespace, resource): 366 | selector = resource.spec.selector.match_labels 367 | pod_client = dyn_client.resources.get( 368 | api_version="v1", 369 | kind="Pod", 370 | ) 371 | pods = pod_client.get( 372 | namespace=namespace, 373 | label_selector=selector, 374 | ) 375 | return pods.to_dict() 376 | 377 | 378 | def tidy_up_resource(resource): 379 | try: 380 | # make prompt short. 381 | del resource["metadata"]["managedFields"] 382 | del resource["metadata"]["resourceVersion"] 383 | del resource["metadata"]["uid"] 384 | del resource["metadata"]["generation"] 385 | except KeyError: 386 | pass 387 | 388 | 389 | class ListApplicationsTool(BaseTool): 390 | """Tool to list applications.""" 391 | 392 | name = "list_applications" 393 | description = ( 394 | "List applications." 395 | 'Input should be a json string with one keys, "namespace".' 396 | "namespace can be empty if not specified. If namespace is empty, list in current namespace." 397 | "If namespace is --all, list applications in all namespaces." 398 | ) 399 | 400 | def _run(self, text: str) -> str: 401 | input = json.loads(text) 402 | namespace = input.get("namespace") 403 | 404 | helm_list_command = "helm list --all -o json" 405 | 406 | if namespace == "--all": 407 | helm_list_command += " --all-namespaces" 408 | else: 409 | if namespace == "": 410 | namespace = "default" 411 | helm_list_command += f" --namespace {namespace}" 412 | 413 | try: 414 | output = subprocess.check_output( 415 | helm_list_command, shell=True, universal_newlines=True 416 | ) 417 | 418 | except subprocess.CalledProcessError as e: 419 | return f"Helm list failed: {e}" 420 | except Exception as e: 421 | return f"Error: {e}" 422 | 423 | helm_releases = json.loads(output) 424 | for helm_release in helm_releases: 425 | del helm_release["chart"] 426 | del helm_release["app_version"] 427 | helm_release["ready"] = get_pod_ready_status_of_helm_release( 428 | helm_release.get("name"), helm_release.get("namespace") 429 | ) 430 | return json.dumps(helm_releases) 431 | 432 | 433 | class GetApplicationResourcesTool(BaseTool): 434 | """Tool to get application resources.""" 435 | 436 | name = "get_application_resources" 437 | description = ( 438 | "Get application resources. " 439 | "Helpful to know what resources an application consists of, what status they are in." 440 | 'Input should be a json string with two keys: "name" and "namespace".' 441 | ) 442 | 443 | def _run(self, text: str) -> str: 444 | input = json.loads(text) 445 | name = input.get("name") 446 | namespace = input.get("namespace") 447 | 448 | if namespace == "": 449 | namespace = "default" 450 | 451 | helm_manifest_command = ( 452 | f"helm get manifest {name} --namespace {namespace}" 453 | ) 454 | 455 | try: 456 | output = subprocess.check_output( 457 | helm_manifest_command, shell=True, universal_newlines=True 458 | ) 459 | 460 | except subprocess.CalledProcessError as e: 461 | return f"Helm delete failed: {e}" 462 | except Exception as e: 463 | return f"Error: {e}" 464 | 465 | resource_manifests = yaml.safe_load_all(output) 466 | 467 | dyn_client = dynamic.DynamicClient( 468 | api_client.ApiClient(configuration=config.load_kube_config()) 469 | ) 470 | 471 | resources = [] 472 | for resource_manifest in resource_manifests: 473 | resource_kind = resource_manifest.get("kind") 474 | resoruce_client = dyn_client.resources.get( 475 | api_version=resource_manifest.get("apiVersion"), 476 | kind=resource_kind, 477 | ) 478 | resource = resoruce_client.get( 479 | name=resource_manifest.get("metadata").get("name"), 480 | namespace=namespace, 481 | ) 482 | if resource_kind in ["Deployment", "StatefulSet", "DaemonSet"]: 483 | pods = get_resource_pods(dyn_client, namespace, resource) 484 | resources.extend(pods) 485 | resource = resource.to_dict() 486 | tidy_up_resource(resource) 487 | resources.append(resource) 488 | 489 | return json.dumps(resources) 490 | 491 | 492 | class GetApplicationAccessEndpointsTool(BaseTool): 493 | """Tool to get application access endpoints.""" 494 | 495 | name = "get_application_access_endpoints" 496 | description = ( 497 | "Get application access endpoints. " 498 | 'Input should be a json string with two keys: "name" and "namespace".' 499 | ) 500 | 501 | def _run(self, text: str) -> str: 502 | input = json.loads(text) 503 | name = input.get("name") 504 | namespace = input.get("namespace") 505 | 506 | helm_manifest_command = f"helm get manifest {name}" 507 | 508 | if namespace == "": 509 | namespace = "default" 510 | 511 | helm_manifest_command += f" --namespace {namespace}" 512 | 513 | try: 514 | output = subprocess.check_output( 515 | helm_manifest_command, shell=True, universal_newlines=True 516 | ) 517 | 518 | except subprocess.CalledProcessError as e: 519 | return f"Helm delete failed: {e}" 520 | except Exception as e: 521 | return f"Error: {e}" 522 | 523 | resource_manifests = yaml.safe_load_all(output) 524 | 525 | dyn_client = dynamic.DynamicClient( 526 | api_client.ApiClient(configuration=config.load_kube_config()) 527 | ) 528 | 529 | endpoints = [] 530 | for resource_manifest in resource_manifests: 531 | resource_kind = resource_manifest.get("kind") 532 | if resource_kind not in ["Service", "Ingress"]: 533 | continue 534 | 535 | resoruce_client = dyn_client.resources.get( 536 | api_version=resource_manifest.get("apiVersion"), 537 | kind=resource_kind, 538 | ) 539 | resource = resoruce_client.get( 540 | name=resource_manifest.get("metadata").get("name"), 541 | namespace=namespace, 542 | ) 543 | 544 | if resource_kind == "Service": 545 | endpoints.extend(get_service_endpoints(resource)) 546 | elif resource_kind == "Ingress": 547 | endpoints.extend(get_ingress_endpoints(resource)) 548 | 549 | return json.dumps(endpoints) 550 | 551 | 552 | class GetApplicationDetailTool(BaseTool): 553 | """Tool to get application detail.""" 554 | 555 | name = "get_application_detail" 556 | description = ( 557 | "Get application detail. " 558 | 'Input should be a json string with two keys: "name" and "namespace".' 559 | ) 560 | 561 | def _run(self, text: str) -> str: 562 | input = json.loads(text) 563 | name = input.get("name") 564 | namespace = input.get("namespace") 565 | 566 | if namespace == "": 567 | namespace = "default" 568 | 569 | helm_status_command = ( 570 | f"helm status {name} --show-resources --namespace {namespace}" 571 | ) 572 | 573 | try: 574 | output = subprocess.check_output( 575 | helm_status_command, shell=True, universal_newlines=True 576 | ) 577 | 578 | except subprocess.CalledProcessError as e: 579 | return f"Helm status failed: {e}" 580 | except Exception as e: 581 | return f"Error: {e}" 582 | 583 | # Trim NOTES 584 | index = output.find("NOTES:") 585 | 586 | if index != -1: 587 | output = output[:index] 588 | 589 | return output 590 | 591 | 592 | class DeleteApplicationTool(RequireApprovalTool): 593 | """Tool to delete an application.""" 594 | 595 | name = "delete_application" 596 | description = ( 597 | "Delete an application. " 598 | 'Input should be a json string with two keys: "name" and "namespace".' 599 | ) 600 | 601 | def _run(self, text: str) -> str: 602 | input = json.loads(text) 603 | name = input.get("name") 604 | namespace = input.get("namespace") 605 | 606 | if namespace == "": 607 | namespace = "default" 608 | 609 | helm_delete_command = f"helm delete {name} --namespace {namespace}" 610 | 611 | try: 612 | output = subprocess.check_output( 613 | helm_delete_command, shell=True, universal_newlines=True 614 | ) 615 | 616 | logger.debug(f"helm delete output: {output}") 617 | return "Application is deleted." 618 | except subprocess.CalledProcessError as e: 619 | return f"Helm delete failed: {e}" 620 | except Exception as e: 621 | return f"Error: {e}" 622 | -------------------------------------------------------------------------------- /k8s/tools/manage_resource/__init__.py: -------------------------------------------------------------------------------- 1 | """Tool for managing Kubernetes resources.""" 2 | -------------------------------------------------------------------------------- /k8s/tools/manage_resource/prompt.py: -------------------------------------------------------------------------------- 1 | CONSTRUCT_RESOURCES_TO_CREATE_PROMPT = """ 2 | You are a planner that constructs kubernetes resources given a user query describing a deployment task. 3 | 4 | You should: 5 | 1) evaluate whether kubernetes resources can be constructed according to the user query. If no, say why. 6 | 2) if yes, output in the following format: 7 | 8 | CONSTRUCTED RESOURCES: 9 | 10 | 11 | Strictly follow the above output format, do not add extra explanation or words. 12 | The output will be applied to a kubernetes cluster for creation. 13 | Ensure the output has no placeholders and requires no external inputs. 14 | 15 | User query: {query} 16 | 17 | CONSTRUCTED RESOURCES: 18 | """ 19 | 20 | CONSTRUCT_RESOURCES_TO_UPDATE_PROMPT = """ 21 | You are a planner that constructs kubernetes resources given a user query describing an update task. 22 | 23 | You should: 24 | 1) evaluate whether kubernetes resources can be constructed according to the user query. If no, say why. 25 | 2) if yes, output in the following format: 26 | 27 | CURRENT RESOURCE SPEC: 28 | 29 | 30 | TO UPDATE RESOURCE SPEC: 31 | 32 | 33 | Strictly follow the above output format, do not add extra explanation or words. 34 | The output will be applied to a kubernetes cluster for creation. 35 | 36 | User query: {query} 37 | 38 | CURRENT RESOURCE SPEC: 39 | {current_resource_spec} 40 | 41 | TO UPDATE RESOURCE SPEC: 42 | """ 43 | -------------------------------------------------------------------------------- /k8s/tools/manage_resource/tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import subprocess 4 | import click 5 | from langchain import LLMChain, PromptTemplate 6 | from langchain.agents.tools import BaseTool 7 | import yaml 8 | from config import config 9 | from langchain.schema.language_model import BaseLanguageModel 10 | from k8s.tools.common.endpoint import ( 11 | get_ingress_endpoints, 12 | get_service_endpoints, 13 | ) 14 | from k8s.tools.manage_resource.prompt import ( 15 | CONSTRUCT_RESOURCES_TO_CREATE_PROMPT, 16 | CONSTRUCT_RESOURCES_TO_UPDATE_PROMPT, 17 | ) 18 | from tools.base.tools import RequireApprovalTool 19 | from kubernetes import config, dynamic, client 20 | from kubernetes.client import api_client 21 | from k8s import context 22 | from utils import utils 23 | from i18n import text 24 | 25 | logger = logging.getLogger(__name__) 26 | 27 | 28 | class ListResourcesTool(BaseTool): 29 | """Tool to list resources.""" 30 | 31 | name = "list_kubernetes_resources" 32 | description = ( 33 | "List kubernetes resources." 34 | 'Input should be a json string with two keys: "resource_kind" and "namespace".' 35 | '"namespace" is optional, set it to empty string if user does not specify.' 36 | "If namespace is --all, lists in all namespaces." 37 | ) 38 | 39 | def _run(self, text: str) -> str: 40 | input = json.loads(text) 41 | 42 | resource_kind = str(input.get("resource_kind")).lower() 43 | namespace = str(input.get("namespace")).lower() 44 | 45 | # Use kubectl directly. Raw API output can easily exceed the LLM rate limit. 46 | kubectl_get_command = f"kubectl get {resource_kind}" 47 | 48 | if namespace == "--all": 49 | kubectl_get_command += " --all-namespaces" 50 | elif namespace: 51 | kubectl_get_command += f" -n {namespace}" 52 | 53 | try: 54 | output = subprocess.check_output( 55 | kubectl_get_command, shell=True, universal_newlines=True 56 | ) 57 | 58 | except subprocess.CalledProcessError as e: 59 | return f"kubectl get failed: {e}" 60 | except Exception as e: 61 | return f"Error: {e}" 62 | 63 | # Print raw output without markdown rendering 64 | return f"{utils.raw_format_prefix}\n{output}" 65 | 66 | 67 | class ListResourcesForInfoTool(ListResourcesTool): 68 | """Tool to list resources for info.""" 69 | 70 | name = "list_kubernetes_resources_for_info" 71 | description = ( 72 | "List kubernetes resources to help find more info, instead of returning to the user." 73 | 'Input should be a json string with two keys: "resource_kind" and "namespace".' 74 | '"namespace" is optional, set it to empty string if user does not specify.' 75 | "If namespace is --all, lists in all namespaces." 76 | ) 77 | 78 | 79 | class DeleteResourceTool(RequireApprovalTool): 80 | """Tool to delete a kubernetes resource.""" 81 | 82 | name = "delete_a_kubernetes_resource" 83 | description = ( 84 | "Delete a kubernetes resource. " 85 | 'Input should be a json string with three keys: "resource_kind", "resource_name" and "namespace".' 86 | ) 87 | 88 | def _run(self, text: str) -> str: 89 | input = json.loads(text) 90 | 91 | dyn_client = dynamic.DynamicClient( 92 | api_client.ApiClient(configuration=config.load_kube_config()) 93 | ) 94 | resource_kind = input.get("resource_kind") 95 | resource_name = input.get("resource_name") 96 | namespace = input.get("namespace") 97 | if namespace == "": 98 | namespace = "default" 99 | gvk = context.search_api_resource(resource_kind) 100 | resources = dyn_client.resources.get( 101 | api_version=gvk.groupVersion, 102 | kind=gvk.kind, 103 | ) 104 | 105 | try: 106 | resources.delete(name=resource_name, namespace=namespace) 107 | except Exception as e: 108 | return f"Error deleting resource: {e}" 109 | 110 | return "Resource is being deleted." 111 | 112 | 113 | class GetResourceDetailTool(BaseTool): 114 | """Tool to get detail of a kubernetes resource.""" 115 | 116 | name = "get_kubernetes_resource_detail" 117 | description = ( 118 | "Get detail of a kubernetes resource. " 119 | 'Input should be a json string with three keys: "resource_kind", "resource_name" and "namespace".' 120 | ) 121 | 122 | def _run(self, text: str) -> str: 123 | input = json.loads(text) 124 | 125 | dyn_client = dynamic.DynamicClient( 126 | api_client.ApiClient(configuration=config.load_kube_config()) 127 | ) 128 | resource_kind = input.get("resource_kind") 129 | resource_name = input.get("resource_name") 130 | namespace = input.get("namespace") 131 | if namespace == "": 132 | namespace = "default" 133 | gvk = context.search_api_resource(resource_kind) 134 | resources = dyn_client.resources.get( 135 | api_version=gvk.groupVersion, 136 | kind=gvk.kind, 137 | ) 138 | 139 | try: 140 | resource_raw = resources.get( 141 | name=resource_name, namespace=namespace 142 | ) 143 | resource = resource_raw.to_dict() 144 | # make prompt short. 145 | del resource["metadata"]["managedFields"] 146 | del resource["metadata"]["resourceVersion"] 147 | del resource["metadata"]["uid"] 148 | del resource["metadata"]["generation"] 149 | except KeyError: 150 | pass 151 | except Exception as e: 152 | return f"Error getting resource detail: {e}" 153 | 154 | return json.dumps(resource) 155 | 156 | 157 | class GetResourceYamlTool(BaseTool): 158 | """Tool to get yaml of a kubernetes resource.""" 159 | 160 | name = "get_kubernetes_resource_yaml" 161 | description = ( 162 | "Get yaml of a kubernetes resource. " 163 | 'Input should be a json string with three keys: "resource_kind", "resource_name" and "namespace".' 164 | "The output is the yaml of the resource." 165 | "It directly prints the output. Use when users want to see the resoruce yaml." 166 | ) 167 | 168 | def _run(self, text: str) -> str: 169 | input = json.loads(text) 170 | 171 | dyn_client = dynamic.DynamicClient( 172 | api_client.ApiClient(configuration=config.load_kube_config()) 173 | ) 174 | resource_kind = input.get("resource_kind") 175 | resource_name = input.get("resource_name") 176 | namespace = input.get("namespace") 177 | if namespace == "": 178 | namespace = "default" 179 | gvk = context.search_api_resource(resource_kind) 180 | resources = dyn_client.resources.get( 181 | api_version=gvk.groupVersion, 182 | kind=gvk.kind, 183 | ) 184 | 185 | try: 186 | resource_raw = resources.get( 187 | name=resource_name, namespace=namespace 188 | ) 189 | resource = resource_raw.to_dict() 190 | # make prompt short. 191 | del resource["metadata"]["managedFields"] 192 | del resource["metadata"]["resourceVersion"] 193 | del resource["metadata"]["uid"] 194 | del resource["metadata"]["generation"] 195 | except KeyError: 196 | pass 197 | except Exception as e: 198 | logger.debug(f"Error getting resource yaml: {e}") 199 | # FIXME handle other cases 200 | return f"Resource not found." 201 | 202 | # Print raw output without markdown rendering 203 | return f"{utils.raw_format_prefix}\n{yaml.dump(resource)}" 204 | 205 | 206 | class GetServiceAccessEndpointsTool(BaseTool): 207 | """Tool to get access endpoints of a kubernetes service.""" 208 | 209 | name = "get_kubernetes_service_access_endpoints" 210 | description = ( 211 | "Get access endpoints of a kubernetes service. " 212 | 'Input should be a json string with two keys: "namespace" and "name".' 213 | ) 214 | 215 | def _run(self, text: str) -> str: 216 | input = json.loads(text) 217 | 218 | name = input.get("name") 219 | namespace = input.get("namespace") 220 | 221 | if namespace == "": 222 | namespace = "default" 223 | 224 | try: 225 | dynamic_client = dynamic.DynamicClient( 226 | client.api_client.ApiClient() 227 | ) 228 | resource = dynamic_client.resources.get( 229 | api_version="v1", kind="Service" 230 | ) 231 | service = resource.get(namespace=namespace, name=name) 232 | 233 | endpoints = get_service_endpoints(service) 234 | except Exception as e: 235 | return f"Error getting service endpoints: {e}" 236 | 237 | return json.dumps(endpoints) 238 | 239 | 240 | class GetIngressAccessEndpointsTool(BaseTool): 241 | """Tool to get access endpoints of a kubernetes ingress.""" 242 | 243 | name = "get_kubernetes_ingress_access_endpoints" 244 | description = ( 245 | "Get access endpoints of a kubernetes ingress. " 246 | 'Input should be a json string with two keys: "namespace" and "name".' 247 | ) 248 | 249 | def _run(self, text: str) -> str: 250 | input = json.loads(text) 251 | 252 | name = input.get("name") 253 | namespace = input.get("namespace") 254 | 255 | if namespace == "": 256 | namespace = "default" 257 | 258 | try: 259 | dynamic_client = dynamic.DynamicClient( 260 | client.api_client.ApiClient() 261 | ) 262 | resource = dynamic_client.resources.get( 263 | api_version="networking.k8s.io/v1", kind="Ingress" 264 | ) 265 | ingress = resource.get(namespace=namespace, name=name) 266 | 267 | endpoints = get_ingress_endpoints(ingress) 268 | except Exception as e: 269 | return f"Error getting ingress endpoints: {e}" 270 | 271 | return json.dumps(endpoints) 272 | 273 | 274 | class DescribePodTool(BaseTool): 275 | """Tool to describe a pod.""" 276 | 277 | name = "describe_pod" 278 | description = ( 279 | "Show details of a pod including related events. " 280 | 'Input should be a json string with two keys: "name" and "namespace".' 281 | ) 282 | 283 | def _run(self, text: str) -> str: 284 | input = json.loads(text) 285 | 286 | name = input.get("name") 287 | namespace = input.get("namespace") 288 | if namespace == "": 289 | namespace = "default" 290 | 291 | kubectl_describe_command = ( 292 | f"kubectl describe pod {name} -n {namespace}" 293 | ) 294 | 295 | try: 296 | output = subprocess.check_output( 297 | kubectl_describe_command, shell=True, universal_newlines=True 298 | ) 299 | except subprocess.CalledProcessError as e: 300 | return f"kubectl get failed: {e}" 301 | except Exception as e: 302 | return f"Error: {e}" 303 | 304 | return output 305 | 306 | 307 | class GetPodLogsTool(BaseTool): 308 | """Tool to get logs of a pod.""" 309 | 310 | name = "get_kubernetes_pod_logs" 311 | description = ( 312 | "Get logs of a pod. " 313 | 'Input should be a json string with four keys: "name", "namespace", "container_name" and "line_number".' 314 | '"container_name" is optional. Set it to the name of the container to get logs from.' 315 | '"line_number" is int defaulting to 50. Set it to the number of lines of logs to return.' 316 | ) 317 | 318 | def _run(self, text: str) -> str: 319 | input = json.loads(text) 320 | 321 | name = input.get("name") 322 | namespace = input.get("namespace") 323 | container_name = input.get("container_name", "") 324 | line_number = input.get("line_number", 50) 325 | 326 | if namespace == "": 327 | namespace = "default" 328 | 329 | v1 = client.CoreV1Api() 330 | 331 | try: 332 | pod_log = v1.read_namespaced_pod_log( 333 | name=name, 334 | namespace=namespace, 335 | container=container_name, 336 | tail_lines=line_number, 337 | ) 338 | except Exception as e: 339 | return f"Error getting pod logs: {e}" 340 | 341 | return f"```\n{pod_log}\n```" 342 | 343 | 344 | class WatchResourcesTool(BaseTool): 345 | """Tool to watch resources.""" 346 | 347 | name = "watch_resources" 348 | description = ( 349 | "Watch resources changes in a namespace." 350 | 'Input should be a json string with two keys: "resource_kind" and "namespace".' 351 | ) 352 | 353 | def _run(self, query: str) -> str: 354 | input = json.loads(query) 355 | resource_kind = str(input.get("resource_kind")).lower() 356 | namespace = str(input.get("namespace")).lower() 357 | 358 | if namespace == "": 359 | namespace = "default" 360 | 361 | kubectl_watch_command = ( 362 | f"kubectl get {resource_kind} -w -n {namespace}" 363 | ) 364 | 365 | try: 366 | click.echo(text.get("watch_service_note")) 367 | process = subprocess.Popen( 368 | kubectl_watch_command, 369 | shell=True, 370 | stdout=subprocess.PIPE, 371 | stderr=subprocess.PIPE, 372 | text=True, 373 | ) 374 | 375 | for line in process.stdout: 376 | print(line, end="") 377 | 378 | process.wait() 379 | 380 | except KeyboardInterrupt: 381 | # Ctrl+C detected. Stopping the request. 382 | print("Terminated by user") 383 | 384 | return text.get("watch_service_ending") 385 | 386 | 387 | class ConstructResourceTool(BaseTool): 388 | """Tool to construct resources.""" 389 | 390 | name = "construct_kubernetes_resources" 391 | description = ( 392 | "Construct Kubernetes resources. " 393 | 'Input to the tool should be a json with one keys: "user_query".' 394 | 'The value of "user_query" should be the description of a deployment task.' 395 | "The output is kubernetes resources in yaml format." 396 | ) 397 | llm: BaseLanguageModel 398 | 399 | def _run(self, text: str) -> str: 400 | input = json.loads(text) 401 | query = input.get("user_query") 402 | prompt = PromptTemplate( 403 | template=CONSTRUCT_RESOURCES_TO_CREATE_PROMPT, 404 | input_variables=["query"], 405 | ) 406 | chain = LLMChain(llm=self.llm, prompt=prompt) 407 | return chain.run(json.dumps(query)).strip() 408 | 409 | 410 | class ConstructResourceForUpdateTool(BaseTool): 411 | """Tool to construct a kubernetes resource for update.""" 412 | 413 | name = "construct_kubernetes_resource_for_update" 414 | description = ( 415 | "Construct a Kubernetes resource for update. " 416 | 'Input to the tool should be a json with four keys: "user_query", "resource_kind", "resource_name" and "namespace".' 417 | '"user_query" should be the description of an update task.' 418 | "The output is the kubernetes resource spec for update in yaml format." 419 | ) 420 | llm: BaseLanguageModel 421 | 422 | def _run(self, text: str) -> str: 423 | input = json.loads(text) 424 | query = input.get("user_query") 425 | resource_kind = input.get("resource_kind") 426 | resource_name = input.get("resource_name") 427 | namespace = input.get("namespace") 428 | if namespace == "": 429 | namespace = "default" 430 | 431 | dyn_client = dynamic.DynamicClient( 432 | api_client.ApiClient(configuration=config.load_kube_config()) 433 | ) 434 | gvk = context.search_api_resource(resource_kind) 435 | resources = dyn_client.resources.get( 436 | api_version=gvk.groupVersion, 437 | kind=gvk.kind, 438 | ) 439 | 440 | try: 441 | resource_raw = resources.get( 442 | name=resource_name, namespace=namespace 443 | ) 444 | resource = resource_raw.to_dict() 445 | # make prompt short. 446 | del resource["metadata"]["managedFields"] 447 | del resource["metadata"]["resourceVersion"] 448 | del resource["metadata"]["uid"] 449 | del resource["metadata"]["generation"] 450 | del resource["status"] 451 | except KeyError: 452 | pass 453 | except Exception as e: 454 | return f"Error getting resource detail: {e}" 455 | 456 | prompt = PromptTemplate( 457 | template=CONSTRUCT_RESOURCES_TO_UPDATE_PROMPT, 458 | input_variables=["query"], 459 | partial_variables={"current_resource_spec": yaml.dump(resource)}, 460 | ) 461 | chain = LLMChain(llm=self.llm, prompt=prompt) 462 | return chain.run(json.dumps(query)).strip() 463 | 464 | 465 | class ApplyResourcesTool(RequireApprovalTool): 466 | """Tool to apply kubernetes resources.""" 467 | 468 | name = "apply_kubernetes_resources" 469 | description = ( 470 | "Apply kubernetes resources. " 471 | "Input should be kubernetes resources in yaml format." 472 | "The provided yaml should is fully functional. Do not provide empty input. " 473 | "It will be applied to a kubernetes cluster." 474 | ) 475 | 476 | def _run(self, text: str) -> str: 477 | lines = text.splitlines() 478 | # remove triple backticks of the yaml text 479 | filtered_lines = [line for line in lines if not line.startswith("```")] 480 | filtered_text = "\n".join(filtered_lines) 481 | try: 482 | yaml_documents = yaml.safe_load_all(filtered_text) 483 | apply_or_update_yaml(yaml_documents) 484 | except Exception as e: 485 | return f"Error applying/updating YAML manifest: {str(e)}" 486 | 487 | resources = [] 488 | for yaml_manifest in yaml_documents: 489 | resources.append( 490 | { 491 | "kind": yaml_manifest["kind"], 492 | "name": yaml_manifest["metadata"]["name"], 493 | } 494 | ) 495 | 496 | return f"Applied the following resources: {resources}" 497 | 498 | 499 | def apply_or_update_yaml(yaml_documents): 500 | for yaml_manifest in yaml_documents: 501 | api_version = yaml_manifest["apiVersion"] 502 | kind = yaml_manifest["kind"] 503 | 504 | config.load_kube_config() 505 | dynamic_client = dynamic.DynamicClient(client.api_client.ApiClient()) 506 | resource = dynamic_client.resources.get( 507 | api_version=api_version, kind=kind 508 | ) 509 | 510 | namespace = yaml_manifest.get("metadata", {}).get( 511 | "namespace", "default" 512 | ) 513 | name = yaml_manifest.get("metadata", {}).get("name", "") 514 | 515 | resource_exists = False 516 | try: 517 | resource.get(namespace=namespace, name=name) 518 | resource_exists = True 519 | except Exception as e: 520 | pass 521 | 522 | # update if exists, else create. 523 | try: 524 | if resource_exists: 525 | resource.patch( 526 | namespace=namespace, 527 | name=name, 528 | body=yaml_manifest, 529 | ) 530 | else: 531 | resource.create(namespace=namespace, body=yaml_manifest) 532 | except Exception as e: 533 | print(f"Error applying/updating YAML manifest: {str(e)}") 534 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 79 3 | target-version = ['py310', 'py311'] 4 | exclude = '\.venv|build|dist' 5 | 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.8.5 2 | aiosignal==1.3.1 3 | anyio==3.7.1 4 | async-timeout==4.0.2 5 | attrs==23.1.0 6 | black==23.7.0 7 | cachetools==5.3.1 8 | certifi==2023.7.22 9 | charset-normalizer==3.2.0 10 | click==8.1.6 11 | colorama==0.4.6 12 | dataclasses-json==0.5.14 13 | exceptiongroup==1.1.2 14 | fastapi==0.100.1 15 | frozenlist==1.4.0 16 | google-auth==2.22.0 17 | h11==0.14.0 18 | httptools==0.6.0 19 | idna==3.4 20 | kubernetes==27.2.0 21 | langchain==0.0.271 22 | langsmith==0.0.26 23 | markdown-it-py==3.0.0 24 | marshmallow==3.20.1 25 | mdurl==0.1.2 26 | multidict==6.0.4 27 | mypy-extensions==1.0.0 28 | numexpr==2.8.4 29 | numpy==1.25.2 30 | oauthlib==3.2.2 31 | openai==0.27.8 32 | openapi-schema-pydantic==1.2.4 33 | packaging==23.1 34 | pathspec==0.11.2 35 | Pillow==10.0.0 36 | platformdirs==3.10.0 37 | pyasn1==0.5.0 38 | pyasn1-modules==0.3.0 39 | pydantic==1.10.12 40 | pydot==1.4.2 41 | Pygments==2.15.1 42 | pyparsing==3.1.1 43 | python-dateutil==2.8.2 44 | python-dotenv==1.0.0 45 | PyYAML==6.0.1 46 | requests==2.31.0 47 | requests-oauthlib==1.3.1 48 | rich==13.5.2 49 | rsa==4.9 50 | six==1.16.0 51 | sniffio==1.3.0 52 | SQLAlchemy==2.0.19 53 | starlette==0.27.0 54 | tenacity==8.2.2 55 | tqdm==4.65.0 56 | typing-inspect==0.9.0 57 | typing_extensions==4.7.1 58 | urllib3==1.26.16 59 | uvicorn==0.23.2 60 | uvloop==0.17.0 61 | watchfiles==0.19.0 62 | websocket-client==1.6.2 63 | websockets==11.0.3 64 | yarl==1.9.2 65 | -------------------------------------------------------------------------------- /tools/base/tools.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from langchain.agents.tools import BaseTool 3 | from callbacks.handlers import ApprovalCallbackHandler 4 | 5 | 6 | class RequireApprovalTool(BaseTool): 7 | """Tool that requires human approval.""" 8 | 9 | def __init__(self, **data: Any) -> None: 10 | super().__init__(callbacks=[ApprovalCallbackHandler()], **data) 11 | -------------------------------------------------------------------------------- /tools/human/__init__.py: -------------------------------------------------------------------------------- 1 | """Tool for asking human input.""" 2 | -------------------------------------------------------------------------------- /tools/human/tool.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Optional 2 | 3 | from pydantic import Field 4 | 5 | from langchain.callbacks.manager import CallbackManagerForToolRun 6 | from langchain.tools.base import BaseTool 7 | 8 | 9 | def _print_func(text: str) -> None: 10 | print("\n") 11 | print(text) 12 | 13 | 14 | class HumanTool(BaseTool): 15 | """Tool that asks user for input.""" 16 | 17 | name = "human" 18 | description = ( 19 | "You can ask a human for guidance when you think you " 20 | "got stuck or you are not sure what to do next. " 21 | "The input should be a question for the human." 22 | ) 23 | prompt_func: Callable[[str], None] = Field( 24 | default_factory=lambda: _print_func 25 | ) 26 | input_func: Callable = Field(default_factory=lambda: input) 27 | 28 | def _run( 29 | self, 30 | query: str, 31 | run_manager: Optional[CallbackManagerForToolRun] = None, 32 | ) -> str: 33 | """Use the Human input tool.""" 34 | self.prompt_func(query) 35 | return self.input_func(">") 36 | -------------------------------------------------------------------------------- /tools/reasoning/__init__.py: -------------------------------------------------------------------------------- 1 | """Tool for switching Appilot reasoning output.""" 2 | -------------------------------------------------------------------------------- /tools/reasoning/tool.py: -------------------------------------------------------------------------------- 1 | from config import config 2 | 3 | from langchain.tools.base import BaseTool 4 | 5 | 6 | class ShowReasoningTool(BaseTool): 7 | """Tool that show Appilot reasoning output.""" 8 | 9 | name = "show_reasoning_output" 10 | description = "Show Appilot reasoning output." 11 | 12 | def _run(self, query: str) -> str: 13 | config.set_show_reasoning(True) 14 | return "succeed." 15 | 16 | 17 | class HideReasoningTool(BaseTool): 18 | """Tool that hide Appilot reasoning output.""" 19 | 20 | name = "hide_reasoning_output" 21 | description = "Hide Appilot reasoning output." 22 | 23 | def _run(self, query: str) -> str: 24 | config.set_show_reasoning(False) 25 | return "succeed." 26 | -------------------------------------------------------------------------------- /utils/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from i18n import text 5 | 6 | from colorama import Fore, Style 7 | from rich.markdown import Markdown 8 | from rich.console import Console 9 | from datetime import datetime 10 | from dateutil import parser 11 | from datetime import timezone 12 | 13 | console = Console() 14 | 15 | inform_sent = False 16 | 17 | raw_format_prefix = "[raw]" 18 | 19 | 20 | def get_env(key: str, default: str = "") -> str: 21 | env = os.getenv(key) 22 | if env is None: 23 | return default 24 | return env.strip() 25 | 26 | 27 | def get_env_list(key: str, default: [] = []) -> list[str]: 28 | env = os.getenv(key) 29 | if env is None: 30 | return default 31 | return [item.strip() for item in env.split(",")] 32 | 33 | 34 | def get_env_bool(key: str, default: bool = False) -> bool: 35 | env = os.getenv(key) 36 | if env is None: 37 | return default 38 | else: 39 | return env.lower() in ["1", "true", "yes", "on"] 40 | 41 | 42 | def print_ai_reasoning(message): 43 | print(Fore.CYAN + text.get("ai_reasoning") + message + Style.RESET_ALL) 44 | 45 | 46 | def print_ai_inform(message): 47 | global inform_sent 48 | inform_sent = True 49 | # move cursor to the end of previous line 50 | sys.stdout.write("\033[F\033[1000C") 51 | print( 52 | Fore.LIGHTYELLOW_EX 53 | + "\n" 54 | + text.get("inform_prefix") 55 | + message 56 | + Style.RESET_ALL 57 | ) 58 | 59 | 60 | def is_inform_sent(): 61 | global inform_sent 62 | if inform_sent: 63 | inform_sent = False 64 | return True 65 | 66 | 67 | def print_ai_response(message): 68 | print(text.get("response_prefix"), end="") 69 | if message.startswith(raw_format_prefix): 70 | print(message[len(raw_format_prefix) :]) 71 | else: 72 | console.print(Markdown(message)) 73 | 74 | 75 | def print_rejected_message(): 76 | print(text.get("response_prefix"), end="") 77 | print(text.get("rejected_message")) 78 | 79 | 80 | def format_relative_time(iso_time): 81 | parsed_time = parser.isoparse(iso_time) 82 | current_time = datetime.now(timezone.utc) 83 | time_difference = current_time - parsed_time 84 | 85 | days = time_difference.days 86 | hours = int(time_difference.total_seconds() // 3600) 87 | minutes = int((time_difference.total_seconds() % 3600) // 60) 88 | 89 | if days > 0: 90 | return f"{days} Days ago" 91 | elif hours > 0: 92 | return f"{hours} Hours ago" 93 | elif minutes > 0: 94 | return f"{minutes} Minutes ago" 95 | else: 96 | return "Just now" 97 | -------------------------------------------------------------------------------- /walrus/client.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import List 3 | import click 4 | import requests 5 | from i18n import text 6 | from utils import utils 7 | 8 | 9 | class WalrusClient: 10 | """HTTP client for Walrus API.""" 11 | 12 | def __init__(self, api_url: str, api_key: str, **kwargs): 13 | self.api_url = api_url 14 | self.api_key = api_key 15 | self.request_args = kwargs 16 | 17 | def headers(self): 18 | """Get default headers.""" 19 | return { 20 | "Authorization": f"Bearer {self.api_key}", 21 | "Content-Type": "application/json", 22 | } 23 | 24 | def list_projects(self): 25 | """List projects.""" 26 | response = requests.get( 27 | url=self.api_url + "/v1/projects", 28 | params={"perPage": -1}, 29 | headers=self.headers(), 30 | **self.request_args, 31 | ) 32 | if response.status_code >= 400: 33 | raise Exception(f"Failed to list projects: {response.text}") 34 | 35 | return response.json()["items"] 36 | 37 | def get_project(self, project: str): 38 | """Get a project by id or name.""" 39 | response = requests.get( 40 | url=self.api_url + f"/v1/projects/{project}", 41 | headers=self.headers(), 42 | **self.request_args, 43 | ) 44 | if response.status_code >= 400: 45 | raise Exception( 46 | f"Failed to get project {project}: {response.text}" 47 | ) 48 | 49 | return response.json() 50 | 51 | def list_environments(self, project_id: str): 52 | """List environments.""" 53 | params = { 54 | "perPage": -1, 55 | } 56 | response = requests.get( 57 | url=self.api_url + f"/v1/projects/{project_id}/environments", 58 | params=params, 59 | headers=self.headers(), 60 | **self.request_args, 61 | ) 62 | if response.status_code >= 400: 63 | raise Exception(f"Failed to list environments: {response.text}") 64 | 65 | return response.json()["items"] 66 | 67 | def get_environment(self, project_id: str, environment: str): 68 | """Get an environment by id or name.""" 69 | response = requests.get( 70 | url=self.api_url 71 | + f"/v1/projects/{project_id}/environments/{environment}", 72 | headers=self.headers(), 73 | **self.request_args, 74 | ) 75 | if response.status_code >= 400: 76 | raise Exception( 77 | f"Failed to get environment {environment}: {response.text}" 78 | ) 79 | 80 | return response.json() 81 | 82 | def create_environment(self, project_id: str, data): 83 | """Create an environment in a project.""" 84 | 85 | response = requests.post( 86 | url=self.api_url + f"/v1/projects/{project_id}/environments", 87 | headers=self.headers(), 88 | json=data, 89 | **self.request_args, 90 | ) 91 | if response.status_code >= 400: 92 | raise Exception(f"Failed to create environment: {response.text}") 93 | 94 | return response.text 95 | 96 | def delete_environments(self, project_id: str, ids: List[str]): 97 | """Delete one or multiple environments.""" 98 | items = [] 99 | for id in ids: 100 | items.append({"id": id}) 101 | 102 | body = {"items": items} 103 | response = requests.delete( 104 | url=self.api_url + f"/v1/projects/{project_id}/environments", 105 | headers=self.headers(), 106 | json=body, 107 | **self.request_args, 108 | ) 109 | if response.status_code >= 400: 110 | raise Exception(f"Failed to delete environment: {response.text}") 111 | 112 | return response.text 113 | 114 | def get_environment_graph(self, project_id: str, environment_id: str): 115 | """Get environment dependency graph.""" 116 | response = requests.get( 117 | url=self.api_url 118 | + f"/v1/projects/{project_id}/environments/{environment_id}/graph", 119 | headers=self.headers(), 120 | **self.request_args, 121 | ) 122 | if response.status_code >= 400: 123 | raise Exception( 124 | f"Failed to get environment dependency graph: {response.text}" 125 | ) 126 | 127 | return response.json() 128 | 129 | def list_services(self, project_id: str, environment_id: str): 130 | """List services in a project and environment.""" 131 | params = { 132 | "perPage": -1, 133 | } 134 | 135 | response = requests.get( 136 | url=self.api_url 137 | + f"/v1/projects/{project_id}/environments/{environment_id}/services", 138 | params=params, 139 | headers=self.headers(), 140 | **self.request_args, 141 | ) 142 | if response.status_code >= 400: 143 | raise Exception(f"Failed to list services: {response.text}") 144 | 145 | return response.json()["items"] 146 | 147 | def watch_services(self, project_id: str, environment_id: str): 148 | """Watch services in a project and environment.""" 149 | 150 | def align_and_echo(data_list, width=30): 151 | aligned_data = [item.ljust(width) for item in data_list] 152 | click.echo("".join(aligned_data)) 153 | 154 | def print_service(s): 155 | align_and_echo( 156 | [ 157 | s.get("name"), 158 | s.get("template").get("name"), 159 | s.get("status").get("summaryStatus"), 160 | utils.format_relative_time(s.get("createTime")), 161 | ] 162 | ) 163 | 164 | services = self.list_services(project_id, environment_id) 165 | click.echo(text.get("watch_service_note")) 166 | align_and_echo( 167 | [ 168 | "NAME", 169 | "TEMPLATE", 170 | "STATUS", 171 | "CREATE TIME", 172 | ] 173 | ) 174 | for service in services: 175 | print_service(service) 176 | 177 | params = { 178 | "perPage": -1, 179 | "watch": "true", 180 | } 181 | 182 | response = requests.get( 183 | url=self.api_url 184 | + f"/v1/projects/{project_id}/environments/{environment_id}/services", 185 | params=params, 186 | headers=self.headers(), 187 | **self.request_args, 188 | stream=True, 189 | ) 190 | if response.status_code >= 400: 191 | raise Exception(f"Failed to list services: {response.text}") 192 | 193 | for chunk in response.iter_content(chunk_size=None): 194 | event = json.loads(chunk.decode("utf-8")) 195 | if "items" in event: 196 | for item in event["items"]: 197 | print_service(item) 198 | 199 | def list_services_in_all_environments(self, project_id: str): 200 | """List services in all environments of a project.""" 201 | params = { 202 | "perPage": -1, 203 | } 204 | 205 | services = [] 206 | envs = self.list_environments(project_id) 207 | for env in envs: 208 | response = requests.get( 209 | url=self.api_url 210 | + f"/v1/projects/{project_id}/environments/{env['id']}/services", 211 | params=params, 212 | headers=self.headers(), 213 | **self.request_args, 214 | ) 215 | if response.status_code >= 400: 216 | raise Exception(f"Failed to list services: {response.text}") 217 | 218 | services_in_env = response.json()["items"] 219 | if services_in_env is None or len(services_in_env) == 0: 220 | continue 221 | 222 | services.extend(response.json()["items"]) 223 | 224 | return services 225 | 226 | def get_service_by_name( 227 | self, project_id: str, environment_id: str, service_name: str 228 | ): 229 | """Get a service by name.""" 230 | 231 | response = requests.get( 232 | url=self.api_url 233 | + f"/v1/projects/{project_id}/environments/{environment_id}/services/{service_name}", 234 | headers=self.headers(), 235 | **self.request_args, 236 | ) 237 | if response.status_code >= 400: 238 | raise Exception(f"Failed to get service: {response.text}") 239 | 240 | return response.json() 241 | 242 | def create_service(self, project_id: str, environment_id: str, data): 243 | """Create a service in a project and environment.""" 244 | 245 | response = requests.post( 246 | url=self.api_url 247 | + f"/v1/projects/{project_id}/environments/{environment_id}/services", 248 | headers=self.headers(), 249 | json=data, 250 | **self.request_args, 251 | ) 252 | if response.status_code >= 400: 253 | raise Exception(f"Failed to create service: {response.text}") 254 | 255 | return response.text 256 | 257 | def update_service(self, project_id: str, environment_id: str, data): 258 | """Update a service in a project and environment.""" 259 | try: 260 | service = json.loads(data) 261 | except json.JSONDecodeError as e: 262 | raise e 263 | 264 | response = requests.put( 265 | url=self.api_url 266 | + f"/v1/projects/{project_id}/environments/{environment_id}/services/{service['id']}/upgrade", 267 | headers=self.headers(), 268 | data=data, 269 | **self.request_args, 270 | ) 271 | if response.status_code >= 400: 272 | raise Exception(f"Failed to update service: {response.text}") 273 | 274 | return response.text 275 | 276 | def delete_services( 277 | self, project_id: str, environment_id: str, ids: List[str] 278 | ): 279 | """Delete one or multiple services.""" 280 | items = [] 281 | for id in ids: 282 | items.append({"id": id}) 283 | 284 | body = {"items": items} 285 | response = requests.delete( 286 | url=self.api_url 287 | + f"/v1/projects/{project_id}/environments/{environment_id}/services", 288 | headers=self.headers(), 289 | json=body, 290 | **self.request_args, 291 | ) 292 | if response.status_code >= 400: 293 | raise Exception(f"Failed to delete service: {response.text}") 294 | 295 | return response.text 296 | 297 | def get_service_access_endpoints( 298 | self, project_id: str, environment_id: str, service_id: str 299 | ): 300 | """Get access endpoints of a service.""" 301 | response = requests.get( 302 | url=self.api_url 303 | + f"/v1/projects/{project_id}/environments/{environment_id}/services/{service_id}/access-endpoints", 304 | headers=self.headers(), 305 | **self.request_args, 306 | ) 307 | if response.status_code >= 400: 308 | raise Exception( 309 | f"Failed to get service access endpoints: {response.text}" 310 | ) 311 | 312 | return response.text 313 | 314 | def list_service_resources( 315 | self, project_id: str, environment_id: str, service_id: str 316 | ): 317 | """List resources of a service.""" 318 | response = requests.get( 319 | url=self.api_url 320 | + f"/v1/projects/{project_id}/environments/{environment_id}/services/{service_id}/resources", 321 | headers=self.headers(), 322 | **self.request_args, 323 | ) 324 | if response.status_code >= 400: 325 | raise Exception( 326 | f"Failed to get service resources: {response.text}" 327 | ) 328 | 329 | return response.json()["items"] 330 | 331 | def get_service_resource_keys( 332 | self, 333 | project_id: str, 334 | environment_id: str, 335 | service_id: str, 336 | service_resource_id: str, 337 | ): 338 | """Get keys of a service resource.""" 339 | response = requests.get( 340 | url=self.api_url 341 | + f"/v1/projects/{project_id}/environments/{environment_id}/services/{service_id}/resources/{service_resource_id}/keys", 342 | headers=self.headers(), 343 | **self.request_args, 344 | ) 345 | if response.status_code >= 400: 346 | raise Exception( 347 | f"Failed to get service resource keys: {response.text}" 348 | ) 349 | 350 | return response.text 351 | 352 | def get_service_resource_logs( 353 | self, 354 | project_id: str, 355 | environment_id: str, 356 | service_id: str, 357 | service_resource_id: str, 358 | key: str, 359 | line_number: int, 360 | ): 361 | """Get logs of a service resource.""" 362 | params = { 363 | "key": key, 364 | "tailLines": line_number, 365 | } 366 | 367 | response = requests.get( 368 | url=self.api_url 369 | + f"/v1/projects/{project_id}/environments/{environment_id}/services/{service_id}/resources/{service_resource_id}/log", 370 | params=params, 371 | headers=self.headers(), 372 | **self.request_args, 373 | ) 374 | if response.status_code >= 400: 375 | raise Exception( 376 | f"Failed to get service resource logs: {response.text}" 377 | ) 378 | 379 | return response.text 380 | 381 | def list_templates(self): 382 | """List templates.""" 383 | response = requests.get( 384 | url=self.api_url + "/v1/templates", 385 | params={"perPage": -1}, 386 | headers=self.headers(), 387 | **self.request_args, 388 | ) 389 | if response.status_code >= 400: 390 | raise Exception(f"Failed to list templates: {response.text}") 391 | 392 | templates = response.json()["items"] 393 | for template in templates: 394 | del template["createTime"] 395 | del template["updateTime"] 396 | del template["status"] 397 | del template["source"] 398 | return json.dumps(templates) 399 | 400 | def get_template_version(self, template: str): 401 | """Get latest template version given template id or name.""" 402 | response = requests.get( 403 | url=self.api_url + f"/v1/templates/{template}/versions", 404 | params={"perPage": -1}, 405 | headers=self.headers(), 406 | **self.request_args, 407 | ) 408 | if response.status_code >= 400: 409 | raise Exception( 410 | f"Failed to list versions of template {template}: {response.text}" 411 | ) 412 | 413 | template_versions = response.json()["items"] 414 | if len(template_versions) == 0: 415 | raise Exception("Template version not found") 416 | 417 | keys_to_remove = [ 418 | "readme", 419 | "outputs", 420 | "requiredProviders", 421 | "createTime", 422 | "updateTime", 423 | "id", 424 | "source", 425 | ] 426 | 427 | template_version = response.json()["items"][0] 428 | # remove keys that are not needed to make prompt neat 429 | 430 | for key in keys_to_remove: 431 | if key in template_version: 432 | del template_version[key] 433 | if key in template_version["schema"]: 434 | del template_version["schema"][key] 435 | 436 | return json.dumps(template_version) 437 | -------------------------------------------------------------------------------- /walrus/context.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | from walrus.client import WalrusClient 4 | 5 | 6 | class Context(BaseModel): 7 | project_id: str = "" 8 | project_name: str = "" 9 | environment_id: str = "" 10 | environment_name: str = "" 11 | 12 | 13 | GLOBAL_CONTEXT: Context 14 | 15 | 16 | def set_default( 17 | walrus_url: str, 18 | walrus_api_key: str, 19 | default_project: str = "", 20 | default_environment: str = "", 21 | ) -> Context: 22 | walrus_client = WalrusClient( 23 | walrus_url, 24 | walrus_api_key, 25 | verify=False, 26 | ) 27 | if default_project != "" and default_environment != "": 28 | project = walrus_client.get_project(default_project) 29 | environment = walrus_client.get_environment( 30 | default_project, default_environment 31 | ) 32 | else: 33 | # Get the first project and environment if not specified. 34 | projects = walrus_client.list_projects() 35 | if projects is None or len(projects) == 0: 36 | raise Exception("No available project. A project is required.") 37 | project = projects[0] 38 | environments = walrus_client.list_environments(project.get("id")) 39 | if environments is None or len(environments) == 0: 40 | raise Exception( 41 | "No aviailable environment. An environment is required." 42 | ) 43 | environment = environments[0] 44 | 45 | global GLOBAL_CONTEXT 46 | GLOBAL_CONTEXT = Context( 47 | project_id=project.get("id"), 48 | project_name=project.get("name"), 49 | environment_id=environment.get("id"), 50 | environment_name=environment.get("name"), 51 | ) 52 | 53 | 54 | def update_context(context): 55 | global GLOBAL_CONTEXT 56 | if ( 57 | context.get("project_id") is not None 58 | and context.get("project_name") != "" 59 | ): 60 | GLOBAL_CONTEXT.project_id = context.get("project_id") 61 | GLOBAL_CONTEXT.project_name = context.get("project_name") 62 | if ( 63 | context.get("environment_id") is not None 64 | and context.get("environment_name") != "" 65 | ): 66 | GLOBAL_CONTEXT.environment_id = context.get("environment_id") 67 | GLOBAL_CONTEXT.environment_name = context.get("environment_name") 68 | -------------------------------------------------------------------------------- /walrus/toolkit.py: -------------------------------------------------------------------------------- 1 | import urllib3 2 | from walrus import context 3 | from walrus.tools.general.tools import BrowseURLTool 4 | from walrus.tools.manage_context.tool import ( 5 | ChangeContextTool, 6 | CurrentContextTool, 7 | ) 8 | from walrus.tools.manage_environment.tool import ( 9 | CloneEnvironmentTool, 10 | DeleteEnvironmentsTool, 11 | GetEnvironmentDependencyGraphTool, 12 | ListEnvironmentsTool, 13 | ) 14 | from walrus.tools.manage_project.tool import ListProjectsTool 15 | from walrus.tools.manage_service.tool import ( 16 | ConstructServiceToCreateTool, 17 | ConstructServiceToUpdateTool, 18 | CreateServiceTool, 19 | DeleteServicesTool, 20 | GetServiceAccessEndpointsTool, 21 | GetServiceDependencyGraphTool, 22 | GetServiceResourceLogsReturnDirectTool, 23 | GetServiceResourceLogsTool, 24 | GetServicesTool, 25 | InformServiceReadyTool, 26 | ListServiceResourcesTool, 27 | ListServicesInAllEnvironmentsTool, 28 | ListServicesTool, 29 | UpdateServiceTool, 30 | WatchServicesTool, 31 | ) 32 | from walrus.tools.manage_template.tool import ( 33 | GetTemplateSchemaTool, 34 | MatchTemplateTool, 35 | ) 36 | from walrus.client import WalrusClient 37 | from langchain.schema.language_model import BaseLanguageModel 38 | from utils import utils 39 | 40 | 41 | class WalrusToolKit: 42 | """Walrus toolkit.""" 43 | 44 | walrus_client: WalrusClient 45 | llm: BaseLanguageModel 46 | 47 | def __init__(self, llm: BaseLanguageModel): 48 | self.llm = llm 49 | self.init_client() 50 | 51 | def init_client(self): 52 | walrus_api_key = utils.get_env("WALRUS_API_KEY") 53 | walrus_url = utils.get_env("WALRUS_URL") 54 | walrus_default_project = utils.get_env("WALRUS_DEFAULT_PROJECT") 55 | walrus_default_environment = utils.get_env( 56 | "WALRUS_DEFAULT_ENVIRONMENT" 57 | ) 58 | walrus_skip_tls_verify = utils.get_env_bool( 59 | "WALRUS_SKIP_TLS_VERIFY", False 60 | ) 61 | if walrus_skip_tls_verify: 62 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 63 | 64 | if not walrus_url: 65 | raise Exception("WALRUS_URL is not set") 66 | if not walrus_api_key: 67 | raise Exception("WALRUS_API_KEY is not set") 68 | 69 | self.walrus_client = WalrusClient( 70 | walrus_url, 71 | walrus_api_key, 72 | verify=(not walrus_skip_tls_verify), 73 | ) 74 | context.set_default( 75 | walrus_url=walrus_url, 76 | walrus_api_key=walrus_api_key, 77 | default_project=walrus_default_project, 78 | default_environment=walrus_default_environment, 79 | ) 80 | 81 | def get_tools(self): 82 | walrus_client = self.walrus_client 83 | llm = self.llm 84 | tools = [ 85 | CurrentContextTool(), 86 | ChangeContextTool(walrus_client=walrus_client), 87 | ListProjectsTool(walrus_client=walrus_client), 88 | ListEnvironmentsTool(walrus_client=walrus_client), 89 | DeleteEnvironmentsTool(walrus_client=walrus_client), 90 | CloneEnvironmentTool(walrus_client=walrus_client), 91 | GetEnvironmentDependencyGraphTool( 92 | walrus_client=walrus_client, return_direct=True 93 | ), 94 | MatchTemplateTool(llm=llm, walrus_client=walrus_client), 95 | GetTemplateSchemaTool(walrus_client=walrus_client), 96 | ConstructServiceToCreateTool(llm=llm, walrus_client=walrus_client), 97 | ConstructServiceToUpdateTool(llm=llm, walrus_client=walrus_client), 98 | GetServicesTool(walrus_client=walrus_client), 99 | ListServicesTool(walrus_client=walrus_client), 100 | WatchServicesTool(walrus_client=walrus_client, return_direct=True), 101 | InformServiceReadyTool( 102 | walrus_client=walrus_client, return_direct=True 103 | ), 104 | ListServicesInAllEnvironmentsTool(walrus_client=walrus_client), 105 | CreateServiceTool(walrus_client=walrus_client), 106 | UpdateServiceTool(walrus_client=walrus_client), 107 | DeleteServicesTool(walrus_client=walrus_client), 108 | ListServiceResourcesTool(walrus_client=walrus_client), 109 | GetServiceResourceLogsTool(walrus_client=walrus_client), 110 | GetServiceResourceLogsReturnDirectTool( 111 | walrus_client=walrus_client, return_direct=True 112 | ), 113 | GetServiceAccessEndpointsTool(walrus_client=walrus_client), 114 | BrowseURLTool(), 115 | GetServiceDependencyGraphTool(walrus_client=walrus_client), 116 | ] 117 | return tools 118 | -------------------------------------------------------------------------------- /walrus/tools/base/tools.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from langchain.agents.tools import BaseTool 3 | from walrus.client import WalrusClient 4 | 5 | 6 | class WalrusTool(BaseTool): 7 | """Tool to interacte with Walrus APIs.""" 8 | 9 | walrus_client: WalrusClient 10 | -------------------------------------------------------------------------------- /walrus/tools/general/__init__.py: -------------------------------------------------------------------------------- 1 | """General Tools.""" 2 | -------------------------------------------------------------------------------- /walrus/tools/general/tools.py: -------------------------------------------------------------------------------- 1 | from langchain.agents.tools import BaseTool 2 | import webbrowser 3 | 4 | 5 | class BrowseURLTool(BaseTool): 6 | """Tool to access a URL in browser.""" 7 | 8 | name = "open_url_in_browser" 9 | description = "Open a URL in browser. Input is a URL." 10 | 11 | def _run(self, url: str) -> str: 12 | webbrowser.open(url) 13 | return "The URL is opened in browser." 14 | -------------------------------------------------------------------------------- /walrus/tools/manage_context/__init__.py: -------------------------------------------------------------------------------- 1 | """Tool for managing Walrus project and environment context.""" 2 | -------------------------------------------------------------------------------- /walrus/tools/manage_context/tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | from langchain.agents.tools import BaseTool 3 | from walrus.client import WalrusClient 4 | from walrus import context as walrus_context 5 | 6 | 7 | class CurrentContextTool(BaseTool): 8 | """Tool to get current project and environment context.""" 9 | 10 | name = "current_context" 11 | description = "Get current project and environment context." 12 | 13 | def _run(self, query: str) -> str: 14 | return json.dumps(walrus_context.GLOBAL_CONTEXT.__dict__) 15 | 16 | 17 | class ChangeContextTool(BaseTool): 18 | """Tool to change project and environment context.""" 19 | 20 | name = "change_context" 21 | description = ( 22 | "Change project and environment context." 23 | "Input should be a json string with 2 keys: " 24 | "project_name, environment_name." 25 | "If users did not specify any of the two keys, leave it empty." 26 | ) 27 | walrus_client: WalrusClient 28 | 29 | def _run(self, text: str) -> str: 30 | try: 31 | context = json.loads(text) 32 | except Exception as e: 33 | return e 34 | 35 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 36 | if "project_name" in context and context["project_name"] != "": 37 | try: 38 | project = self.walrus_client.get_project( 39 | context["project_name"] 40 | ) 41 | except Exception as e: 42 | return e 43 | project_id = project["id"] 44 | context["project_id"] = project_id 45 | 46 | if "environment_name" in context and context["environment_name"] != "": 47 | try: 48 | environment = self.walrus_client.get_environment( 49 | project_id, context["environment_name"] 50 | ) 51 | except Exception as e: 52 | return e 53 | context["environment_id"] = environment["id"] 54 | 55 | walrus_context.update_context(context) 56 | -------------------------------------------------------------------------------- /walrus/tools/manage_environment/__init__.py: -------------------------------------------------------------------------------- 1 | """Tool for managing environments.""" 2 | -------------------------------------------------------------------------------- /walrus/tools/manage_environment/tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from langchain.agents.tools import BaseTool 4 | from i18n import text 5 | from tools.base.tools import RequireApprovalTool 6 | import pydot 7 | from PIL import Image 8 | 9 | from walrus.client import WalrusClient 10 | from walrus import context as walrus_context 11 | 12 | 13 | class ListEnvironmentsTool(BaseTool): 14 | """Tool to list environments.""" 15 | 16 | name = "list_environments" 17 | description = ( 18 | "List environments of a project." 19 | "Input should be a project id or an empty string indicating current project in the context." 20 | ) 21 | walrus_client: WalrusClient 22 | 23 | def _run(self, project_id: str) -> str: 24 | if project_id == "": 25 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 26 | try: 27 | environments = self.walrus_client.list_environments(project_id) 28 | except Exception as e: 29 | return e 30 | if environments is not None and len(environments) > 0: 31 | return json.dumps(environments) 32 | return "No environments found." 33 | 34 | 35 | class DeleteEnvironmentsTool(RequireApprovalTool): 36 | """Tool to delete environments.""" 37 | 38 | name = "delete_environments" 39 | description = 'Delete one or multiple environments. Input should be a list of object, each object contains 2 keys, "name" and "id" of an environment.' 40 | walrus_client: WalrusClient 41 | 42 | def _run(self, text: str) -> str: 43 | try: 44 | environments = json.loads(text) 45 | except Exception as e: 46 | raise e 47 | 48 | ids = [env["id"] for env in environments if "id" in env] 49 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 50 | try: 51 | self.walrus_client.delete_environments(project_id, ids) 52 | except Exception as e: 53 | return e 54 | 55 | return "Deletion started." 56 | 57 | 58 | class GetEnvironmentDependencyGraphTool(BaseTool): 59 | """Tool to get environment dependency graph.""" 60 | 61 | name = "get_environment_dependency_graph" 62 | description = "Get dependency graph of an environment. Input should be name or id of an environment." 63 | walrus_client: WalrusClient 64 | 65 | def show_graph(self, graph_data: dict): 66 | node_shape_map = { 67 | "Service": "box", 68 | "ServiceResourceGroup": "ellipse", 69 | "ServiceResource": "ellipse", 70 | } 71 | edge_style_map = { 72 | "Composition": "composition", 73 | "Realization": "dotted", 74 | "Dependency": "dashed", 75 | } 76 | 77 | node_height = 1 78 | graph = pydot.Dot(graph_type="digraph") 79 | node_ids = [] 80 | for vertex in graph_data["vertices"]: 81 | label_suffix = "" 82 | if vertex["kind"] == "Service": 83 | label_suffix = "
service" 84 | else: 85 | label_suffix = f"
{vertex['extensions']['type']}" 86 | 87 | graph.add_node( 88 | pydot.Node( 89 | name=vertex["id"], 90 | label=f"<{vertex['name']}{label_suffix}>", 91 | shape=node_shape_map[vertex["kind"]], 92 | height=node_height, 93 | ) 94 | ) 95 | node_ids.append(vertex["id"]) 96 | 97 | edge_ids = [] 98 | for edge in graph_data["edges"]: 99 | # FIXME: This is a hack to avoid edges that are not in the graph. resolve in API. 100 | if ( 101 | edge.get("start").get("id") not in node_ids 102 | or edge.get("end").get("id") not in node_ids 103 | ): 104 | continue 105 | # FIXME: This is a hack to avoid duplicate edges. resolve in API. 106 | edge_id = edge.get("start").get("id") + edge.get("end").get("id") 107 | if edge_id in edge_ids: 108 | continue 109 | 110 | graph.add_edge( 111 | pydot.Edge( 112 | edge.get("start").get("id"), 113 | edge.get("end").get("id"), 114 | style=edge_style_map[edge.get("type")], 115 | ) 116 | ) 117 | edge_ids.append(edge_id) 118 | 119 | output_directory = "/tmp/appilot" 120 | if not os.path.exists(output_directory): 121 | os.makedirs(output_directory) 122 | image_path = os.path.join(output_directory, "dependency_graph.png") 123 | graph.write_png(image_path) 124 | image = Image.open(image_path) 125 | image.show() 126 | 127 | def _run(self, environment: str) -> str: 128 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 129 | if environment is None or environment == "": 130 | environment = walrus_context.GLOBAL_CONTEXT.environment_id 131 | 132 | graph_data = self.walrus_client.get_environment_graph( 133 | project_id, environment 134 | ) 135 | self.show_graph(graph_data) 136 | 137 | return text.get("show_graph_message") 138 | 139 | 140 | class CloneEnvironmentTool(RequireApprovalTool): 141 | """Tool to clone an environment.""" 142 | 143 | name = "clone_environment" 144 | description = ( 145 | "Clone an environment to a new one. " 146 | 'Input should be a json string with two keys: "original_environment_name" and "target_environment_name".' 147 | ) 148 | 149 | walrus_client: WalrusClient 150 | 151 | def _run(self, text: str) -> str: 152 | try: 153 | data = json.loads(text) 154 | except Exception as e: 155 | return e 156 | 157 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 158 | original_environment_name = data.get("original_environment_name") 159 | target_environment_name = data.get("target_environment_name") 160 | 161 | try: 162 | environment = self.walrus_client.get_environment( 163 | project_id, original_environment_name 164 | ) 165 | services = self.walrus_client.list_services( 166 | project_id, original_environment_name 167 | ) 168 | environment["name"] = target_environment_name 169 | environment["services"] = services 170 | 171 | self.walrus_client.create_environment(project_id, environment) 172 | except Exception as e: 173 | return e 174 | 175 | return "Successfully cloned." 176 | -------------------------------------------------------------------------------- /walrus/tools/manage_project/__init__.py: -------------------------------------------------------------------------------- 1 | """Tool for managing projects.""" 2 | -------------------------------------------------------------------------------- /walrus/tools/manage_project/tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | from langchain.agents.tools import BaseTool 3 | from walrus.client import WalrusClient 4 | 5 | 6 | class ListProjectsTool(BaseTool): 7 | """Tool to list projects.""" 8 | 9 | name = "list_projects" 10 | description = "List projects." 11 | walrus_client: WalrusClient 12 | 13 | def _run(self, query: str) -> str: 14 | try: 15 | projects = self.walrus_client.list_projects() 16 | except Exception as e: 17 | return e 18 | return json.dumps(projects) 19 | -------------------------------------------------------------------------------- /walrus/tools/manage_service/__init__.py: -------------------------------------------------------------------------------- 1 | """Tools for managing services.""" 2 | -------------------------------------------------------------------------------- /walrus/tools/manage_service/prompt.py: -------------------------------------------------------------------------------- 1 | CONSTRUCT_SERVICE_TO_CREATE_PROMPT = """ 2 | You are a planner that constructs the expected service object given a user query describing a deployment task. 3 | For your reference, you will be provided existing services and related template version if there's any in the environment. Choose a template and fill in the input variables for the service. 4 | 5 | You should: 6 | 1) evaluate whether the service object can be constructed according to the user query. If no, say why. 7 | 2) if yes, output in the following format: 8 | 3) for sensitive attributes, reference existing variables in the format `${{var.variable_name}}`. 9 | 10 | CONSTRUCTED SERVICE: 11 | 12 | Strictly follow the above output format, do not add extra explanation or words. 13 | Service id is needed in upgrade cases. 14 | Environment info is needed in any cases. 15 | Service status is not needed in any cases. 16 | Do not miss the environment info in the constructed service object. 17 | The output will be passed to an API controller that can format it into web requests and return the responses. 18 | 19 | Example: 20 | 21 | CONSTRUCTED SERVICE: {{"name":"example","template":{{"name":"webservice","version":"0.0.4"}},"environment":{{"id":"1234567"}},"attributes":{{"image":"nginx","ports":[80],"request_cpu":"0.1","request_memory":"128Mi"}}}} 22 | 23 | ---- 24 | 25 | Context: {context} 26 | User query: {query} 27 | 28 | EXISTING SERVICES: 29 | {existing_services} 30 | 31 | RELATED TEMPLATE: 32 | {related_template} 33 | 34 | """ 35 | 36 | 37 | CONSTRUCT_SERVICE_TO_UPDATE_PROMPT = """ 38 | You are a planner that constructs the expected service object given a user query describing an upgrade task. 39 | For your reference, you will be provided the service about to upgrade and related template version if there's any in the environment. Choose a template and fill in the input variables for the service. 40 | 41 | You should: 42 | 1) evaluate whether the service object can be constructed according to the user query. If no, say why. 43 | 2) if yes, output in the following format: 44 | 45 | CONSTRUCTED SERVICE: 46 | 47 | Strictly follow the above output format, do not add extra explanation or words. 48 | Service id is required. 49 | Environment info is required. 50 | Service status is not needed. 51 | The output will be used in the update API call of the service. 52 | 53 | Example: 54 | 55 | CONSTRUCTED SERVICE: {{"name":"example","template":{{"name":"webservice","version":"0.0.4"}},"environment":{{"id":"1234567"}},"attributes":{{"image":"nginx","ports":[80],"request_cpu":"0.1","request_memory":"128Mi"}}}} 56 | 57 | ---- 58 | 59 | Context: {context} 60 | User query: {query} 61 | 62 | CURRENT SERVICE: 63 | {current_service} 64 | 65 | RELATED TEMPLATE: 66 | {related_template} 67 | 68 | """ 69 | -------------------------------------------------------------------------------- /walrus/tools/manage_service/tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | import threading 3 | import time 4 | 5 | from utils import utils 6 | from i18n import text 7 | from walrus.client import WalrusClient 8 | from langchain.agents.tools import BaseTool 9 | from langchain import LLMChain 10 | from langchain.prompts import PromptTemplate 11 | from langchain.schema.language_model import BaseLanguageModel 12 | from tools.base.tools import RequireApprovalTool 13 | from walrus import context as walrus_context 14 | from walrus.tools.manage_service.prompt import ( 15 | CONSTRUCT_SERVICE_TO_CREATE_PROMPT, 16 | CONSTRUCT_SERVICE_TO_UPDATE_PROMPT, 17 | ) 18 | 19 | 20 | class ListServicesTool(BaseTool): 21 | """Tool to list services.""" 22 | 23 | name = "list_services" 24 | description = "List services in current environment." 25 | walrus_client: WalrusClient 26 | 27 | def _run(self, query: str) -> str: 28 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 29 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 30 | try: 31 | services = self.walrus_client.list_services( 32 | project_id, environment_id 33 | ) 34 | except Exception as e: 35 | return e 36 | 37 | if services is not None and len(services) > 0: 38 | return json.dumps(services) 39 | 40 | return "No services found." 41 | 42 | 43 | class WatchServicesTool(BaseTool): 44 | """Tool to watch services.""" 45 | 46 | name = "watch_services" 47 | description = "Watch service changes in current environment." 48 | walrus_client: WalrusClient 49 | 50 | def _run(self, query: str) -> str: 51 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 52 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 53 | 54 | try: 55 | self.walrus_client.watch_services(project_id, environment_id) 56 | except KeyboardInterrupt: 57 | # Ctrl+C detected. Stopping the request. 58 | print("") 59 | 60 | return text.get("watch_service_ending") 61 | 62 | 63 | class InformServiceReadyTool(BaseTool): 64 | """Tool to inform user when service becomes ready.""" 65 | 66 | name = "inform_service_ready" 67 | description = "Inform user when a service becomes ready. Input should be name or id of a service." 68 | walrus_client: WalrusClient 69 | 70 | def watch_service_ready(self, input: str): 71 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 72 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 73 | start_time = time.time() 74 | timeout = 600 75 | while True: 76 | time.sleep(3) 77 | if time.time() - start_time > timeout: 78 | break 79 | service = self.walrus_client.get_service_by_name( 80 | project_id, environment_id, input 81 | ) 82 | if service.get("status").get("summaryStatus") == "Ready": 83 | utils.print_ai_inform( 84 | text.get("service_ready_message").format(input) 85 | ) 86 | break 87 | 88 | def _run(self, input: str) -> str: 89 | threading.Thread( 90 | target=self.watch_service_ready, args=(input,) 91 | ).start() 92 | return text.get("inform_ready_start") 93 | 94 | 95 | class ListServicesInAllEnvironmentsTool(BaseTool): 96 | """Tool to list services in all environments.""" 97 | 98 | name = "list_services_in_all_environments" 99 | description = "List services in all environments of current project." 100 | walrus_client: WalrusClient 101 | 102 | def _run(self, query: str) -> str: 103 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 104 | try: 105 | services = self.walrus_client.list_services_in_all_environments( 106 | project_id 107 | ) 108 | except Exception as e: 109 | return e 110 | 111 | if services is not None and len(services) > 0: 112 | return json.dumps(services) 113 | 114 | return "No services found." 115 | 116 | 117 | class GetServicesTool(BaseTool): 118 | """Tool to get a service.""" 119 | 120 | name = "get_service" 121 | description = "Get a service object. Input should be a service name." 122 | walrus_client: WalrusClient 123 | 124 | def _run(self, query: str) -> str: 125 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 126 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 127 | try: 128 | service = self.walrus_client.get_service_by_name( 129 | project_id, environment_id, query 130 | ) 131 | except Exception as e: 132 | return e 133 | 134 | return json.dumps(service) 135 | 136 | 137 | class CreateServiceTool(RequireApprovalTool): 138 | """Tool to create a service.""" 139 | 140 | name = "create_service" 141 | description = ( 142 | "Create a service." 143 | "Input should be a service object in json format." 144 | 'Output a json string with 2 keys, "id" and "name" of the service.' 145 | ) 146 | walrus_client: WalrusClient 147 | 148 | def _run(self, text: str) -> str: 149 | try: 150 | service = json.loads(text) 151 | except Exception as e: 152 | raise e 153 | 154 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 155 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 156 | try: 157 | self.walrus_client.create_service( 158 | project_id, environment_id, service 159 | ) 160 | except Exception as e: 161 | return e 162 | 163 | return "Successfully created." 164 | 165 | 166 | class UpdateServiceTool(RequireApprovalTool): 167 | """Tool to update a service.""" 168 | 169 | name = "update_service" 170 | description = ( 171 | "Update a service." 172 | "Input should be a service object in json format." 173 | 'Output a json string with 2 keys, "id" and "name" of the service.' 174 | ) 175 | walrus_client: WalrusClient 176 | 177 | def _run(self, text: str) -> str: 178 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 179 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 180 | try: 181 | self.walrus_client.update_service(project_id, environment_id, text) 182 | except Exception as e: 183 | return e 184 | 185 | return "Successfully updated." 186 | 187 | 188 | class DeleteServicesTool(RequireApprovalTool): 189 | """Tool to delete one or multiple services.""" 190 | 191 | name = "delete_services" 192 | description = 'Delete one or multiple services. Input should be a list of object, each object contains 2 keys, "name" and "id" of a service.' 193 | walrus_client: WalrusClient 194 | 195 | def _run(self, query: str) -> str: 196 | try: 197 | services = json.loads(query) 198 | except Exception as e: 199 | raise e 200 | 201 | ids = [service["id"] for service in services if "id" in service] 202 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 203 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 204 | try: 205 | self.walrus_client.delete_services(project_id, environment_id, ids) 206 | except Exception as e: 207 | return e 208 | 209 | return "Deletion started." 210 | 211 | 212 | class GetServiceAccessEndpointsTool(BaseTool): 213 | """Tool to get access endpoints of a service.""" 214 | 215 | name = "get_service_access_endpoints" 216 | description = ( 217 | "Get access endpoints of a service." 218 | "Input should be id of a service." 219 | "Output service access endpoints." 220 | ) 221 | walrus_client: WalrusClient 222 | 223 | def _run(self, text: str) -> str: 224 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 225 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 226 | try: 227 | endpoints = self.walrus_client.get_service_access_endpoints( 228 | project_id, environment_id, text 229 | ) 230 | except Exception as e: 231 | return e 232 | 233 | return endpoints 234 | 235 | 236 | class ListServiceResourcesTool(BaseTool): 237 | """Tool to get resources of a service.""" 238 | 239 | name = "get_service_resources" 240 | description = ( 241 | "Get resources of a service. " 242 | "Helpful to know what resources a service consists of, what status they are in. what keys they have." 243 | "Input should be id of a service. " 244 | "Output resource objects in json format." 245 | ) 246 | walrus_client: WalrusClient 247 | 248 | def _run(self, text: str) -> str: 249 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 250 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 251 | try: 252 | resources = self.walrus_client.list_service_resources( 253 | project_id, environment_id, text 254 | ) 255 | except Exception as e: 256 | return e 257 | 258 | return resources 259 | 260 | 261 | class GetServiceDependencyGraphTool(BaseTool): 262 | """Tool to get service dependency graph.""" 263 | 264 | name = "get_service_dependency_graph" 265 | description = ( 266 | "Get dependency graph of a service. Input should be a service id." 267 | "Output is a json data wrapped in triple backticks, representing the dependency graph." 268 | "You can directly return the output to the user. No need to reformat. " 269 | "UI can use this data to render the graph." 270 | ) 271 | walrus_client: WalrusClient 272 | 273 | def _run(self, text: str) -> str: 274 | data = { 275 | "project_id": walrus_context.GLOBAL_CONTEXT.project_id, 276 | "service_id": text, 277 | } 278 | return f"```service_resource_graph\n{data}\n```" 279 | 280 | 281 | class GetServiceResourceLogsTool(BaseTool): 282 | """Tool to get logs of a service resource.""" 283 | 284 | name = "get_service_resource_logs_for_diagnose" 285 | description = ( 286 | "Get logs of a service resource. Use when you need to diagnose service error with logs." 287 | "Before using this tool, you should get keys of the resource first." 288 | 'Input should be a json with 4 keys: "service_id", "service_resource_id", "key", "line_number".' 289 | '"key" is identity of a service resource\'s component. You can get available keys by listing service resources. ' 290 | '"line_number" is the number of lines of logs to get. defaults to 100 if user does not specify. ' 291 | "Output is log text." 292 | ) 293 | walrus_client: WalrusClient 294 | 295 | def _run(self, query: str) -> str: 296 | try: 297 | input = json.loads(query) 298 | except Exception as e: 299 | raise e 300 | service_id = input.get("service_id") 301 | service_resource_id = input.get("service_resource_id") 302 | key = input.get("key") 303 | line_number = input.get("line_number", 100) 304 | 305 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 306 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 307 | try: 308 | log = self.walrus_client.get_service_resource_logs( 309 | project_id, 310 | environment_id, 311 | service_id, 312 | service_resource_id, 313 | key, 314 | line_number, 315 | ) 316 | except Exception as e: 317 | return e 318 | 319 | prefix = text.get("resource_log_prefix") 320 | return f"{prefix}\n```{log}```" 321 | 322 | 323 | class GetServiceResourceLogsReturnDirectTool(BaseTool): 324 | """Tool to get logs of a service resource.""" 325 | 326 | name = "get_service_resource_logs_return_direct" 327 | description = ( 328 | "Get logs of a service resource. The logs will be shown to users directly. Useful when users want to see logs." 329 | "Before using this tool, you should get keys of the resource first." 330 | 'Input should be a json with 4 keys: "service_id", "service_resource_id", "key", "line_number".' 331 | '"key" is identity of a service resource\'s component. You can get available keys by listing service resources. ' 332 | '"line_number" is the number of lines of logs to get. defaults to 100 if user does not specify. ' 333 | "Output is log text." 334 | ) 335 | walrus_client: WalrusClient 336 | 337 | def _run(self, query: str) -> str: 338 | try: 339 | input = json.loads(query) 340 | except Exception as e: 341 | raise e 342 | service_id = input.get("service_id") 343 | service_resource_id = input.get("service_resource_id") 344 | key = input.get("key") 345 | line_number = input.get("line_number", 100) 346 | 347 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 348 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 349 | try: 350 | log = self.walrus_client.get_service_resource_logs( 351 | project_id, 352 | environment_id, 353 | service_id, 354 | service_resource_id, 355 | key, 356 | line_number, 357 | ) 358 | except Exception as e: 359 | return e 360 | 361 | prefix = text.get("resource_log_prefix") 362 | return f"{prefix}\n```{log}```" 363 | 364 | 365 | class ConstructServiceToCreateTool(BaseTool): 366 | """Construct a service for deployment in Walrus system.""" 367 | 368 | name = "construct_service_to_create" 369 | description = ( 370 | "Construct a service for creation in Walrus system." 371 | 'Input to the tool should be a json with 3 keys: "user_query" and "related_template_name".' 372 | 'The value of "user_query" should be the description of a deployment task.' 373 | 'The value of "related_template_name" should be name of a template related to the deployment task.' 374 | "The output is a service object in json. It will be used in the creation of a service." 375 | ) 376 | llm: BaseLanguageModel 377 | walrus_client: WalrusClient 378 | 379 | def _run(self, text: str) -> str: 380 | try: 381 | data = json.loads(text) 382 | except Exception as e: 383 | raise e 384 | 385 | query = data.get("user_query") 386 | template_name = data.get("related_template_name") 387 | 388 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 389 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 390 | existing_services = self.walrus_client.list_services( 391 | project_id, environment_id 392 | ) 393 | related_template = self.walrus_client.get_template_version( 394 | template_name 395 | ) 396 | 397 | prompt = PromptTemplate( 398 | template=CONSTRUCT_SERVICE_TO_CREATE_PROMPT, 399 | input_variables=["query"], 400 | partial_variables={ 401 | "context": json.dumps(walrus_context.GLOBAL_CONTEXT.__dict__), 402 | "existing_services": json.dumps(existing_services), 403 | "related_template": json.dumps(related_template), 404 | }, 405 | ) 406 | chain = LLMChain(llm=self.llm, prompt=prompt) 407 | return chain.run(json.dumps(query)).strip() 408 | 409 | 410 | class ConstructServiceToUpdateTool(BaseTool): 411 | """Construct a service for upgrade in Walrus system.""" 412 | 413 | name = "construct_service_to_update" 414 | description = ( 415 | "Construct a service for update in Walrus system." 416 | 'Input to the tool should be a json with 3 keys: "user_query", "service_name" and "related_template_name".' 417 | 'The value of "user_query" should be the description of a deployment task.' 418 | 'The value of "service_name" should be name of the service about to update.' 419 | 'The value of "related_template_name" should be name of a template related to the deployment task.' 420 | "The output is a service object in json. It will be used in the update of a service." 421 | ) 422 | llm: BaseLanguageModel 423 | walrus_client: WalrusClient 424 | 425 | def _run(self, text: str) -> str: 426 | try: 427 | data = json.loads(text) 428 | except Exception as e: 429 | raise e 430 | 431 | query = data.get("user_query") 432 | service_name = data.get("service_name") 433 | template_name = data.get("related_template_name") 434 | 435 | project_id = walrus_context.GLOBAL_CONTEXT.project_id 436 | environment_id = walrus_context.GLOBAL_CONTEXT.environment_id 437 | related_template = self.walrus_client.get_template_version( 438 | template_name 439 | ) 440 | current_service = self.walrus_client.get_service_by_name( 441 | project_id=project_id, 442 | environment_id=environment_id, 443 | service_name=service_name, 444 | ) 445 | 446 | prompt = PromptTemplate( 447 | template=CONSTRUCT_SERVICE_TO_UPDATE_PROMPT, 448 | input_variables=["query"], 449 | partial_variables={ 450 | "context": json.dumps(walrus_context.GLOBAL_CONTEXT.__dict__), 451 | "current_service": json.dumps(current_service), 452 | "related_template": json.dumps(related_template), 453 | }, 454 | ) 455 | chain = LLMChain(llm=self.llm, prompt=prompt) 456 | return chain.run(json.dumps(query)).strip() 457 | -------------------------------------------------------------------------------- /walrus/tools/manage_template/__init__.py: -------------------------------------------------------------------------------- 1 | """Tools for managing templates.""" 2 | -------------------------------------------------------------------------------- /walrus/tools/manage_template/prompt.py: -------------------------------------------------------------------------------- 1 | FIND_TEMPLATE_PROMPT = """ 2 | Templates are predefined configuration to create a particular type of service. 3 | 4 | You will be provided existing templates and a user query describing a deployment task. 5 | Find a template that most likely can be used to accomplish the user query. 6 | If you don't find any, say why. 7 | 8 | Output the matched template name in quoted string. 9 | 10 | TEMPLATES: 11 | {templates} 12 | 13 | User query: {query} 14 | Output:""" 15 | -------------------------------------------------------------------------------- /walrus/tools/manage_template/tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | from langchain import LLMChain 3 | 4 | from langchain.agents.tools import BaseTool 5 | from langchain.prompts import PromptTemplate 6 | from langchain.schema.language_model import BaseLanguageModel 7 | from walrus.tools.manage_template.prompt import FIND_TEMPLATE_PROMPT 8 | from walrus.client import WalrusClient 9 | 10 | 11 | class MatchTemplateTool(BaseTool): 12 | """Find matching template useful for a deployment task. 13 | Input should be a deployment task. 14 | Output matching template name. 15 | """ 16 | 17 | name = "find_matching_template" 18 | description = ( 19 | "Find a matching template for a deploy/upgrade task." 20 | "Input should be description of the task. For upgrade task, include previous template info." 21 | "Output matching template name, or None when no matching template found." 22 | ) 23 | walrus_client: WalrusClient 24 | llm: BaseLanguageModel 25 | 26 | def _run(self, query: str) -> str: 27 | try: 28 | templates = self.walrus_client.list_templates() 29 | except Exception as e: 30 | return e 31 | 32 | prompt = PromptTemplate( 33 | template=FIND_TEMPLATE_PROMPT, 34 | input_variables=["query"], 35 | partial_variables={ 36 | "templates": json.dumps(templates), 37 | }, 38 | ) 39 | chain = LLMChain(llm=self.llm, prompt=prompt) 40 | return chain.run(query) 41 | 42 | 43 | class GetTemplateSchemaTool(BaseTool): 44 | """Tool to get template version and schema given template name.""" 45 | 46 | name = "get_template_schema" 47 | description = ( 48 | "Get template version and schema given template name." 49 | "Input should be a template name." 50 | "Output template schema." 51 | ) 52 | walrus_client: WalrusClient 53 | 54 | def _run(self, query: str) -> str: 55 | try: 56 | template_version = self.walrus_client.get_template_version(query) 57 | except Exception as e: 58 | return e 59 | 60 | return template_version 61 | --------------------------------------------------------------------------------