├── sdk ├── src │ └── og_sdk │ │ ├── __init__.py │ │ ├── kernel_sdk.py │ │ └── utils.py ├── README.md ├── tests │ ├── mock_messages.json │ ├── utils_test.py │ ├── kernel_sdk_tests.py │ └── agent_sdk_tests.py └── setup.py ├── up ├── src │ └── og_up │ │ ├── __init__.py │ │ ├── utils.py │ │ ├── model_downloader.py │ │ └── kernel_up.py ├── README.md └── setup.py ├── agent ├── src │ └── og_agent │ │ ├── __init__.py │ │ ├── grammar.bnf │ │ ├── base_stream_client.py │ │ ├── agent_builder.py │ │ ├── agent_setup.py │ │ ├── llama_client.py │ │ ├── agent_llm.py │ │ ├── prompt.py │ │ ├── mock_agent.py │ │ └── agent_api_server.py ├── README.md ├── tests │ ├── tokenizer_test.py │ ├── agent_api_tests.py │ └── openai_agent_tests.py ├── setup.py └── .gitignore ├── chat ├── src │ ├── og_discord │ │ ├── __init__.py │ │ └── discord_chat.py │ └── og_terminal │ │ ├── __init__.py │ │ ├── utils.py │ │ ├── markdown.py │ │ ├── ping.py │ │ └── ui_block.py ├── README.md ├── tests │ ├── test_parse_files.py │ └── test_chat_function.py └── setup.py ├── kernel ├── src │ └── og_kernel │ │ ├── __init__.py │ │ ├── kernel │ │ ├── __init__.py │ │ ├── config.py │ │ ├── launch_kernel.py │ │ ├── kernel_mgr.py │ │ ├── kernel_app.py │ │ └── kernel_client.py │ │ └── server │ │ └── __init__.py ├── .env.sample ├── README.md ├── update_requirement.sh ├── cases │ └── pie.py ├── setup.py ├── tests │ ├── kernel_mgr_tests.py │ └── kernel_client_tests.py └── .gitignore ├── memory ├── src │ └── og_memory │ │ ├── __init__.py │ │ ├── template │ │ ├── __init__.py │ │ └── agent.jinja │ │ └── memory.py ├── README.md ├── setup.py └── tests │ └── memory_tests.py ├── roles ├── src │ └── og_roles │ │ ├── __init__.py │ │ └── code_interpreter.py ├── README.md └── setup.py ├── serving ├── src │ └── og_serving │ │ ├── __init__.py │ │ └── http_serving.py ├── README.md └── setup.py ├── examples ├── requirements.txt └── chainlit │ ├── chainlit.md │ ├── .chainlit │ └── config.toml │ └── chainlit_ui.py ├── docker ├── server ├── Dockerfile ├── add_endpoint.sh ├── Dockerfile_chrome ├── start_kernel.sh └── start_all.sh ├── format.sh ├── images └── octopus_logo.png ├── docs ├── source │ ├── _static │ │ ├── octogen_logo.png │ │ └── octogen-internal.drawio.png │ ├── index.rst │ ├── conf.py │ └── getstarted.rst └── Makefile ├── tea.yaml ├── env_sample ├── model.env ├── openai_env.sample ├── codellama_env.sample └── azure_env.sample ├── proto ├── requirements.txt ├── README.md ├── src │ └── og_proto │ │ ├── common.proto │ │ ├── prompt.proto │ │ ├── memory.proto │ │ ├── kernel_server.proto │ │ └── agent_server.proto ├── setup.py ├── Makefile └── .gitignore ├── install_package.sh ├── clean_sandbox.sh ├── .github ├── dependabot.yml └── workflows │ ├── cli_ci.yaml │ ├── ci.yaml │ ├── doc_page.yml │ └── release_libraries.yml ├── requirements.txt ├── start_sandbox.sh ├── README_zh_cn.md ├── .gitignore ├── LICENSES └── Elastic-2.0.txt └── README.md /sdk/src/og_sdk/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /up/src/og_up/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent/src/og_agent/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /chat/src/og_discord/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /chat/src/og_terminal/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kernel/src/og_kernel/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /memory/src/og_memory/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /roles/src/og_roles/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /roles/README.md: -------------------------------------------------------------------------------- 1 | # the role module 2 | -------------------------------------------------------------------------------- /serving/src/og_serving/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kernel/src/og_kernel/kernel/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kernel/src/og_kernel/server/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /memory/src/og_memory/template/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/requirements.txt: -------------------------------------------------------------------------------- 1 | chainlit 2 | aiohttp 3 | -------------------------------------------------------------------------------- /memory/README.md: -------------------------------------------------------------------------------- 1 | the memory module for octogen 2 | -------------------------------------------------------------------------------- /serving/README.md: -------------------------------------------------------------------------------- 1 | the serving module for octogen 2 | -------------------------------------------------------------------------------- /docker/server: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dbpunk-labs/octogen/HEAD/docker/server -------------------------------------------------------------------------------- /format.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | # format.sh 4 | pyink agent kernel chat up sdk examples serving 5 | -------------------------------------------------------------------------------- /images/octopus_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dbpunk-labs/octogen/HEAD/images/octopus_logo.png -------------------------------------------------------------------------------- /kernel/.env.sample: -------------------------------------------------------------------------------- 1 | config_path=/tmp/kernel_cnn_file1.json 2 | workspace=/tmp/ws1 3 | ws_host=127.0.0.1 4 | ws_port=9527 5 | -------------------------------------------------------------------------------- /docs/source/_static/octogen_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dbpunk-labs/octogen/HEAD/docs/source/_static/octogen_logo.png -------------------------------------------------------------------------------- /docs/source/_static/octogen-internal.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dbpunk-labs/octogen/HEAD/docs/source/_static/octogen-internal.drawio.png -------------------------------------------------------------------------------- /tea.yaml: -------------------------------------------------------------------------------- 1 | # https://tea.xyz/what-is-this-file 2 | --- 3 | version: 1.0.0 4 | codeOwners: 5 | - '0xA54DDD5d601D0f7E66A1956225063e7B84C49002' 6 | quorum: 1 7 | -------------------------------------------------------------------------------- /env_sample/model.env: -------------------------------------------------------------------------------- 1 | model=phind-codellama-34b-v2.Q6_K.gguf 2 | model_alias=phind-codellama-34b 3 | n_ctx=4096 4 | n_gpu_layers=32 5 | chat_format=phind 6 | n_batch=2048 7 | -------------------------------------------------------------------------------- /sdk/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | the sdk module of octopus 9 | -------------------------------------------------------------------------------- /agent/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | the agent module of octopus 9 | -------------------------------------------------------------------------------- /chat/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | the terminal cli module of octopus 9 | -------------------------------------------------------------------------------- /up/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | octopus setup tool 9 | 10 | -------------------------------------------------------------------------------- /proto/requirements.txt: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | grpcio-tools==1.60.0 7 | grpc-google-iam-v1==0.13.0 8 | -------------------------------------------------------------------------------- /kernel/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | The Kernel Server for Python Execution 9 | 10 | -------------------------------------------------------------------------------- /kernel/src/og_kernel/kernel/config.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | from dotenv import dotenv_values 7 | 8 | config = dotenv_values(".env") 9 | -------------------------------------------------------------------------------- /kernel/update_requirement.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | # Copyright (C) 2023 jackwang 4 | # SPDX-FileCopyrightText: 2023 imotai 5 | # SPDX-FileContributor: imotai 6 | # 7 | # SPDX-License-Identifier: Elastic-2.0 8 | 9 | pipreqs . --savepath ./requirements.txt 10 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | RUN apt update 3 | RUN apt install -y python3 python3-pip 4 | RUN pip install -U og_proto og_sdk og_agent og_kernel hapless 5 | RUN useradd -ms /bin/bash octogen 6 | ADD server /bin/ 7 | ADD start_all.sh /bin/ 8 | ADD start_kernel.sh /bin/ 9 | ADD add_endpoint.sh /bin/ 10 | -------------------------------------------------------------------------------- /kernel/src/og_kernel/kernel/launch_kernel.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | from og_kernel.kernel.kernel_app import run_app 8 | 9 | if __name__ == "__main__": 10 | run_app() 11 | -------------------------------------------------------------------------------- /env_sample/openai_env.sample: -------------------------------------------------------------------------------- 1 | rpc_host=127.0.0.1 2 | rpc_port=9528 3 | admin_key=test_key 4 | max_file_size=10240000 5 | verbose=True 6 | db_path=/tmp/octopus.db 7 | llm_key=openai 8 | # the openai api mode name 9 | openai_api_model=model_nama 10 | # the openai api key 11 | openai_api_key=api_key 12 | max_iterations=10 13 | log_level=debug 14 | -------------------------------------------------------------------------------- /env_sample/codellama_env.sample: -------------------------------------------------------------------------------- 1 | rpc_host=127.0.0.1 2 | rpc_port=9528 3 | admin_key=gennerate_admin_key 4 | max_file_size=202400000 5 | max_iterations=8 6 | # the sqlite db path 7 | db_path=/tmp/octogen.db 8 | llm_key=codellama 9 | # the llama cpp server 10 | llama_api_base=http://127.0.0.1:8080 11 | max_file_size=202400000 12 | max_iterations=8 13 | log_level=debug 14 | 15 | -------------------------------------------------------------------------------- /install_package.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | # install_package.sh 4 | 5 | WORKDIR=`pwd` 6 | cd ${WORKDIR}/proto && make && pip install . 7 | cd ${WORKDIR}/sdk && pip install . 8 | cd ${WORKDIR}/memory && pip install . 9 | cd ${WORKDIR}/kernel && pip install . 10 | cd ${WORKDIR}/agent && pip install . 11 | cd ${WORKDIR}/chat && pip install . 12 | cd ${WORKDIR}/up && pip install . 13 | -------------------------------------------------------------------------------- /proto/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | the protobuf module of octogen 9 | 10 | build the proto module 11 | ``` 12 | pip install -r requirements.txt 13 | ``` 14 | generate the python stubs and replace the module path 15 | ``` 16 | make 17 | ``` 18 | -------------------------------------------------------------------------------- /examples/chainlit/chainlit.md: -------------------------------------------------------------------------------- 1 | # Welcome to Octogen! 🚀🤖 2 | 3 | Hi there, Developer! 👋 We're excited to have you on board. Octogen is a powerful code interpter agent serive. You can use it as an API 4 | 5 | ## Useful Links 🔗 6 | 7 | - **Documentation:** Get started with our comprehensive [Octogen Documentation](https://docs.octogen.dev) 📚 8 | 9 | We can't wait to see what you create with Octogen ! Happy coding! 💻😊 10 | 11 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. octogen documentation master file, created by 2 | sphinx-quickstart on Thu Sep 7 23:35:56 2023. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Octogen's documentation! 7 | =================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | :caption: Contents: 12 | 13 | getstarted.rst 14 | 15 | -------------------------------------------------------------------------------- /env_sample/azure_env.sample: -------------------------------------------------------------------------------- 1 | rpc_host=127.0.0.1 2 | rpc_port=9528 3 | admin_key=test_key 4 | max_file_size=10240000 5 | verbose=True 6 | db_path=/tmp/octopus.db 7 | llm_key=azure_openai 8 | openai_api_type=azure 9 | openai_api_version=2023-07-01-preview 10 | # tha api key 11 | openai_api_key=api_key 12 | # the azure api base 13 | openai_api_base=base_url 14 | # the deployment_name 15 | openai_api_deployment=deployment_name 16 | max_iterations=10 17 | log_level=debug 18 | -------------------------------------------------------------------------------- /proto/src/og_proto/common.proto: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2023 imotai 2 | // SPDX-FileContributor: imotai 3 | // 4 | // SPDX-License-Identifier: Elastic-2.0 5 | 6 | syntax = "proto3"; 7 | 8 | package octogen_common_proto; 9 | 10 | message FileChunk { 11 | bytes buffer = 1; 12 | string filename = 2; 13 | } 14 | 15 | message FileUploaded { 16 | int32 length = 1; 17 | } 18 | 19 | message DownloadRequest { 20 | string filename = 1; 21 | } 22 | -------------------------------------------------------------------------------- /clean_sandbox.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | # clean_sandbox.sh 4 | # Copyright (C) 2023 jackwang 5 | # 6 | # Distributed under terms of the MIT license. 7 | # 8 | 9 | WORKDIR=`pwd` 10 | ps -eu | grep python3 | grep -v grep | awk '{print $2}' | while read line; do kill -9 $line; done 11 | cd ${WORKDIR}/proto && test -e dist && rm -rf dist 12 | cd ${WORKDIR}/agent && test -e dist && rm -rf dist 13 | cd ${WORKDIR}/chat && test -e dist && rm -rf dist 14 | cd ${WORKDIR}/kernel && test -e dist && rm -rf dist 15 | -------------------------------------------------------------------------------- /chat/tests/test_parse_files.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ """ 9 | 10 | from og_terminal.utils import parse_file_path 11 | 12 | 13 | def test_parse_file_path(): 14 | prompt = "convert the file /up /home/test.pdf to text" 15 | paths = parse_file_path(prompt) 16 | assert len(paths) == 1, "bad file path count" 17 | assert paths[0] == "/home/test.pdf", "bad file path " 18 | -------------------------------------------------------------------------------- /kernel/cases/pie.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 dbpunk.com Author imotai 2 | # SPDX-FileCopyrightText: 2023 imotai 3 | # SPDX-FileContributor: imotai 4 | # 5 | # SPDX-License-Identifier: Elastic-2.0 6 | 7 | """ """ 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | 11 | # Create a pie chart 12 | data = np.array([10, 20, 30, 40]) 13 | labels = ["Category 1", "Category 2", "Category 3", "Category 4"] 14 | 15 | plt.pie(data, labels=labels, autopct="%1.1f%%") 16 | plt.title("Pie Chart") 17 | plt.show() 18 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | version: 2 6 | updates: 7 | - package-ecosystem: "pip" # See documentation for possible values 8 | directory: "/" # Location of package manifests 9 | schedule: 10 | interval: "weekly" 11 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ipykernel==6.29.3 2 | jupyter_client==8.6.0 3 | pytest==7.4.2 4 | python-dotenv==1.0.0 5 | setuptools==69.0.2 6 | grpcio-tools==1.60.0 7 | pytest-asyncio==0.21.1 8 | grpc-google-iam-v1==0.13.0 9 | matplotlib 10 | pydantic==2.5.2 11 | aiosqlite==0.19.0 12 | rich==13.7.0 13 | prompt_toolkit==3.0.40 14 | aiofiles==23.2.1 15 | click==8.1.7 16 | discord.py==2.3.2 17 | openai==0.28.1 18 | build==1.0.3 19 | python-dotenv==1.0.0 20 | tiktoken 21 | fastapi 22 | uvicorn 23 | pytest-mock 24 | pydantic-settings 25 | sse-starlette 26 | starlette-context 27 | llama-cpp-python 28 | 29 | -------------------------------------------------------------------------------- /docker/add_endpoint.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | # add_endpoint.sh 4 | # Copyright (C) 2023 ubuntu 5 | # 6 | # Distributed under terms of the MIT license. 7 | # 8 | 9 | if [ "$#" -eq 0 ] 10 | then 11 | echo "No arguments supplied" 12 | exit 1 13 | fi 14 | 15 | ROOT_DIR=$1 16 | AGENT_RPC_KEY=$(cat ${ROOT_DIR}/agent/.env | grep admin_key | tr -d '\r' | cut -d "=" -f 2) 17 | KERNEL_RPC_KEY=$(cat ${ROOT_DIR}/kernel/.env | grep rpc_key | tr -d '\r' | cut -d "=" -f 2) 18 | og_agent_setup --kernel_endpoint=127.0.0.1:9527 --kernel_api_key=${KERNEL_RPC_KEY} --agent_endpoint=127.0.0.1:9528 --admin_key=${AGENT_RPC_KEY} 19 | -------------------------------------------------------------------------------- /agent/tests/tokenizer_test.py: -------------------------------------------------------------------------------- 1 | # vim:fenc=utf-8 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ """ 9 | 10 | import logging 11 | import io 12 | from og_agent.tokenizer import tokenize 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | def test_parse_explanation(): 18 | arguments = """{"function_call":"execute", "arguments": {"explanation":"h""" 19 | for token_state, token in tokenize(io.StringIO(arguments)): 20 | logger.info(f"token_state: {token_state}, token: {token}") 21 | -------------------------------------------------------------------------------- /docker/Dockerfile_chrome: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | RUN apt update 3 | RUN apt install -y python3 python3-pip 4 | RUN pip install -U og_proto og_sdk og_agent og_kernel hapless 5 | RUN adduser octogen 6 | ADD server /bin/ 7 | ADD start_all.sh /bin/ 8 | ADD start_kernel.sh /bin/ 9 | ADD add_endpoint.sh /bin/ 10 | RUN apt install -y wget 11 | RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - 12 | RUN sh -c 'echo "deb http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google.list' 13 | RUN apt-get update 14 | RUN apt --fix-broken install 15 | RUN apt-get install google-chrome-stable -y 16 | -------------------------------------------------------------------------------- /sdk/tests/mock_messages.json: -------------------------------------------------------------------------------- 1 | { 2 | "hello":[ 3 | { 4 | "explanation": "how can I help you today?" 5 | } 6 | ], 7 | "write a hello world in python":[ 8 | { 9 | "explanation": "this is a hello world code", 10 | "code":"print('hello world')" 11 | }, 12 | { 13 | "explanation": "this code prints 'hello world'" 14 | } 15 | ], 16 | "error function":[ 17 | { 18 | "explanation": "this is a hello world code", 19 | "code":"print(hello world')" 20 | }, 21 | { 22 | "explanation": "this code prints 'hello world'" 23 | } 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /agent/src/og_agent/grammar.bnf: -------------------------------------------------------------------------------- 1 | root ::= object 2 | value ::= object | array | string | number | ("true" | "false" | "null") ws 3 | 4 | object ::= 5 | "{" ws ( 6 | string ":" ws value 7 | ("," ws string ":" ws value)* 8 | )? "}" ws 9 | 10 | array ::= 11 | "[" ws ( 12 | value 13 | ("," ws value)* 14 | )? "]" ws 15 | 16 | string ::= 17 | "\"" ( 18 | [^"\\] | 19 | "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes 20 | )* "\"" ws 21 | 22 | number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws 23 | 24 | # Optional space: by convention, applied in this grammar after literal chars when allowed 25 | ws ::= ([ \t\n] ws)? 26 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /proto/src/og_proto/prompt.proto: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2023 imotai 2 | // SPDX-FileContributor: imotai 3 | // 4 | // SPDX-License-Identifier: Elastic-2.0 5 | 6 | syntax = "proto3"; 7 | package octogen_agent_prompt; 8 | 9 | message ActionDesc { 10 | // the action name 11 | string name = 1; 12 | // the action description 13 | string desc = 2; 14 | // the parameters with json schema format for the action 15 | string parameters = 3; 16 | } 17 | 18 | message AgentPrompt { 19 | // the system role of agent 20 | string role = 1; 21 | // the rules for the role 22 | repeated string rules = 2; 23 | repeated ActionDesc actions = 3; 24 | // the response format for LLM 25 | string output_format = 4; 26 | string role_name = 5; 27 | } 28 | -------------------------------------------------------------------------------- /sdk/setup.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | from setuptools import setup, find_packages 8 | 9 | setup( 10 | name="og_sdk", 11 | version="0.3.6", 12 | description="Open source code interpreter agent for LLM", 13 | author="imotai", 14 | author_email="codego.me@gmail.com", 15 | url="https://github.com/dbpunk-labs/octogen", 16 | long_description=open("README.md").read(), 17 | long_description_content_type="text/markdown", 18 | packages=[ 19 | "og_sdk", 20 | ], 21 | package_dir={ 22 | "og_sdk": "src/og_sdk", 23 | }, 24 | install_requires=[ 25 | "og_proto", 26 | "aiofiles", 27 | ], 28 | ) 29 | -------------------------------------------------------------------------------- /roles/setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 dbpunk.com Author imotai 2 | # SPDX-FileCopyrightText: 2023 imotai 3 | # SPDX-FileContributor: imotai 4 | # 5 | # SPDX-License-Identifier: Elastic-2.0 6 | 7 | """ """ 8 | from setuptools import setup, find_packages 9 | 10 | setup( 11 | name="og_roles", 12 | version="0.3.6", 13 | description="Open source llm agent service", 14 | author="imotai", 15 | author_email="wangtaize@dbpunk.com", 16 | url="https://github.com/dbpunk-labs/octogen", 17 | long_description=open("README.md").read(), 18 | long_description_content_type="text/markdown", 19 | packages=[ 20 | "og_roles", 21 | ], 22 | package_dir={ 23 | "og_roles": "src/og_roles", 24 | }, 25 | package_data={}, 26 | ) 27 | -------------------------------------------------------------------------------- /docker/start_kernel.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | # start_kernel.sh 4 | # Copyright (C) 2023 imotai 5 | # 6 | # Distributed under terms of the MIT license. 7 | # 8 | 9 | if [ "$#" -eq 0 ] 10 | then 11 | echo "No arguments supplied" 12 | exit 1 13 | fi 14 | ROOT_DIR=$1 15 | 16 | mkdir -p ${ROOT_DIR}/kernel/ws 17 | mkdir -p ${ROOT_DIR}/kernel/config 18 | mkdir -p ${ROOT_DIR}/kernel/logs 19 | chown -R octogen:octogen ${ROOT_DIR}/kernel/ws 20 | chown -R octogen:octogen ${ROOT_DIR}/kernel/config 21 | chown -R octogen:octogen ${ROOT_DIR}/kernel/logs 22 | 23 | cat < /bin/start_service.sh 24 | echo "start kernel.." 25 | cd ${ROOT_DIR}/kernel && hap run -n octopus_kernel -- og_kernel_rpc_server >>${ROOT_DIR}/kernel/logs/kernel_rpc.log 2>&1 26 | 27 | while true 28 | do 29 | hap status 30 | sleep 10 31 | done 32 | EOF 33 | su - octogen -c "bash /bin/start_service.sh" 34 | -------------------------------------------------------------------------------- /agent/src/og_agent/base_stream_client.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | import json 8 | import aiohttp 9 | import logging 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | class BaseStreamClient: 15 | 16 | def __init__(self, endpoint, key): 17 | self.endpoint = endpoint 18 | self.key = key 19 | 20 | async def arun(self, request_data): 21 | logging.debug(f"{request_data}") 22 | headers = {"Authorization": self.key} 23 | async with aiohttp.ClientSession( 24 | headers=headers, raise_for_status=True 25 | ) as session: 26 | async with session.post(self.endpoint, json=request_data) as r: 27 | async for line in r.content: 28 | if line: 29 | yield line 30 | -------------------------------------------------------------------------------- /proto/setup.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | from setuptools import setup, find_packages 8 | 9 | setup( 10 | name="og_proto", 11 | version="0.3.6", 12 | description="Open source code interpreter agent for LLM", 13 | author="imotai", 14 | author_email="codego.me@gmail.com", 15 | url="https://github.com/dbpunk-labs/octogen", 16 | long_description= open('README.md').read(), 17 | long_description_content_type='text/markdown', 18 | 19 | packages=[ 20 | "og_proto", 21 | ], 22 | 23 | package_dir={ 24 | "og_proto": "src/og_proto", 25 | }, 26 | 27 | package_data={"og_proto": ["*.pyi"]}, 28 | 29 | install_requires=[ 30 | "grpc-google-iam-v1>=0.12.0", 31 | "grpcio-tools>=1.40.0", 32 | ], 33 | 34 | entry_points={ 35 | }, 36 | 37 | ) 38 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | 9 | project = 'Octogen' 10 | copyright = '2023 octogem.dev' 11 | author = 'imotai' 12 | 13 | # -- General configuration --------------------------------------------------- 14 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 15 | 16 | extensions = [] 17 | 18 | templates_path = ['_templates'] 19 | exclude_patterns = [] 20 | 21 | 22 | 23 | # -- Options for HTML output ------------------------------------------------- 24 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 25 | 26 | html_theme = "alabaster" 27 | html_static_path = ['_static'] 28 | -------------------------------------------------------------------------------- /memory/setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 dbpunk.com Author imotai 2 | # SPDX-FileCopyrightText: 2023 imotai 3 | # SPDX-FileContributor: imotai 4 | # 5 | # SPDX-License-Identifier: Elastic-2.0 6 | 7 | """ """ 8 | from setuptools import setup, find_packages 9 | 10 | setup( 11 | name="og_memory", 12 | version="0.3.6", 13 | description="Open source code interpreter agent", 14 | author="imotai", 15 | author_email="wangtaize@dbpunk.com", 16 | url="https://github.com/dbpunk-labs/octogen", 17 | long_description=open("README.md").read(), 18 | long_description_content_type="text/markdown", 19 | 20 | packages=[ 21 | "og_memory", 22 | "og_memory.template", 23 | ], 24 | 25 | package_dir={ 26 | "og_memory": "src/og_memory", 27 | "og_memory.template": "src/og_memory/template", 28 | }, 29 | install_requires=[ 30 | "og_proto", 31 | "Jinja2", 32 | ], 33 | package_data={"og_memory.template": ["*.jinja"]}, 34 | ) 35 | -------------------------------------------------------------------------------- /serving/setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 dbpunk.com Author imotai 2 | # SPDX-FileCopyrightText: 2023 imotai 3 | # SPDX-FileContributor: imotai 4 | # 5 | # SPDX-License-Identifier: Elastic-2.0 6 | 7 | """ """ 8 | from setuptools import setup, find_packages 9 | 10 | setup( 11 | name="og_serving", 12 | version="0.3.6", 13 | description="Open source code interpreter agent", 14 | author="imotai", 15 | author_email="wangtaize@dbpunk.com", 16 | url="https://github.com/dbpunk-labs/octogen", 17 | long_description=open("README.md").read(), 18 | long_description_content_type="text/markdown", 19 | packages=[ 20 | "og_serving", 21 | ], 22 | package_dir={ 23 | "og_serving": "src/og_serving", 24 | }, 25 | install_requires=["fastapi", "pydantic_settings"], 26 | package_data={}, 27 | entry_points={ 28 | "console_scripts": [ 29 | "og_serving_http_server = og_serving.http_serving:run_serving", 30 | ] 31 | }, 32 | ) 33 | -------------------------------------------------------------------------------- /up/setup.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | from setuptools import setup, find_packages 8 | 9 | setup( 10 | name="og_up", 11 | version="0.3.6", 12 | description="Open source code interpreter agent for LLM", 13 | author="imotai", 14 | author_email="codego.me@gmail.com", 15 | url="https://github.com/dbpunk-labs/octogen", 16 | long_description=open("README.md").read(), 17 | long_description_content_type="text/markdown", 18 | packages=[ 19 | "og_up", 20 | ], 21 | package_dir={ 22 | "og_up": "src/og_up", 23 | }, 24 | install_requires=["og_sdk", "requests", "huggingface_hub", "rich", "click"], 25 | entry_points={ 26 | "console_scripts": [ 27 | "og_up = og_up.up:init_octogen", 28 | "og_kernel_up = og_up.kernel_up:init_kernel", 29 | "og_download = og_up.model_downloader:download", 30 | ] 31 | }, 32 | ) 33 | -------------------------------------------------------------------------------- /memory/src/og_memory/template/agent.jinja: -------------------------------------------------------------------------------- 1 | {#the role description#}{{prompt.role}} 2 | {#the rule list#}{%if prompt.rules -%}Follow the rules 3 | {% for rule in prompt.rules if rule -%} 4 | {{loop.index}}.{{rule}} 5 | {% endfor -%} 6 | {% endif -%} 7 | {%if prompt.actions and options.show_function_instruction -%} 8 | Use the following actions to help you finishing your task 9 | {% for action in prompt.actions if action -%} 10 | {% set temp_parameters = action.parameters | from_json %} 11 | {{loop.index}}.{{action.name}}: {{action.desc}}, the following are parameters 12 | {% for key, value in temp_parameters.properties.items() -%} 13 | {{key}}({{value.type}}):{{value.description}} 14 | {%endfor-%} 15 | {% endfor -%} 16 | {% endif -%}{%if guides -%}The instructions for the tools and libraries you recently used. 17 | {% for guide in guides if guide -%}{{loop.index}}.{{guide.name}}{{guide.what_it_can_do}}{{guide.how_to_use}} 18 | {% endfor -%}{% endif -%}{%if prompt.output_format and not options.disable_output_format -%} 19 | {{prompt.output_format}} 20 | {% endif -%} 21 | -------------------------------------------------------------------------------- /up/src/og_up/utils.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ """ 9 | import subprocess 10 | import os 11 | import sys 12 | import io 13 | 14 | USE_SHELL = sys.platform.startswith("win") 15 | 16 | 17 | def run_with_realtime_print( 18 | command, universal_newlines=True, useshell=USE_SHELL, env=os.environ 19 | ): 20 | try: 21 | p = subprocess.Popen( 22 | command, 23 | stdout=subprocess.PIPE, 24 | stderr=subprocess.STDOUT, 25 | shell=useshell, 26 | env=env, 27 | ) 28 | 29 | text_fd = io.TextIOWrapper( 30 | p.stdout, encoding="utf-8", newline=os.linesep, errors="replace" 31 | ) 32 | while True: 33 | chunk = text_fd.read(40) 34 | if not chunk: 35 | break 36 | yield 0, chunk 37 | p.wait() 38 | yield p.returncode, "" 39 | except Exception as ex: 40 | yield -1, str(ex) 41 | -------------------------------------------------------------------------------- /.github/workflows/cli_ci.yaml: -------------------------------------------------------------------------------- 1 | name: Cli Testing 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | jobs: 11 | build: 12 | strategy: 13 | matrix: 14 | #os: [ubuntu-latest, windows-latest, macos-latest] 15 | os: [ubuntu-latest] 16 | python-version: ["3.11"] 17 | runs-on: ${{ matrix.os }} 18 | steps: 19 | - name: Checkout repository 20 | uses: actions/checkout@v3 21 | with: 22 | submodules: recursive 23 | 24 | - name: Setup Docker on macOS 25 | uses: douglascamata/setup-docker-macos-action@v1-alpha 26 | if: startsWith(runner.os, 'macos') 27 | 28 | - name: Set up Python ${{ matrix.python-version }} 29 | uses: actions/setup-python@v4 30 | with: 31 | python-version: ${{ matrix.python-version }} 32 | - name: Install dependencies 33 | run: pip install -r requirements.txt 34 | 35 | - name: Run cli tests 36 | shell: bash 37 | run: | 38 | bash install_package.sh 39 | pytest up/tests/*.py 40 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: backend testing 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | jobs: 11 | build: 12 | #runs-on: self-hosted 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout repository 16 | uses: actions/checkout@v3 17 | with: 18 | submodules: recursive 19 | - name: Install dependencies 20 | run: pip install -r requirements.txt 21 | - name: Run tests 22 | run: | 23 | WS_DIR=`pwd` 24 | bash start_sandbox.sh 25 | cd ${WS_DIR}/memory 26 | pytest tests/*.py 27 | cd ${WS_DIR}/agent 28 | pytest tests/*.py 29 | cd ${WS_DIR}/sdk 30 | pytest tests/*.py 31 | cd ${WS_DIR}/kernel 32 | pytest tests/*.py 33 | cd ${WS_DIR}/chat 34 | pytest tests/*.py 35 | - uses: actions/upload-artifact@v3 36 | if: failure() 37 | with: 38 | name: log-artifact 39 | path: | 40 | sandbox/agent/*.log 41 | sandbox/kernel/*.log 42 | -------------------------------------------------------------------------------- /agent/src/og_agent/agent_builder.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | import json 8 | from .llama_agent import LlamaAgent 9 | from .openai_agent import OpenaiAgent 10 | from .llama_client import LlamaClient 11 | from .mock_agent import MockAgent 12 | 13 | 14 | def build_llama_agent(endpoint, key, sdk, grammer_path): 15 | """ 16 | build llama agent 17 | """ 18 | with open(grammer_path, "r") as fd: 19 | grammar = fd.read() 20 | client = LlamaClient(endpoint, key, grammar) 21 | # init the agent 22 | return LlamaAgent(client, sdk) 23 | 24 | 25 | def build_openai_agent(sdk, model_name, is_azure=True): 26 | """build openai function call agent""" 27 | # TODO a data dir per user 28 | # init the agent 29 | 30 | agent = OpenaiAgent(model_name, sdk, is_azure=is_azure) 31 | return agent 32 | 33 | 34 | def build_mock_agent(sdk, cases_path): 35 | """ 36 | build the mock agent for testing 37 | """ 38 | with open(cases_path, "r") as fd: 39 | messages = json.load(fd) 40 | agent = MockAgent(messages, sdk) 41 | return agent 42 | -------------------------------------------------------------------------------- /agent/src/og_agent/agent_setup.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | import click 8 | import asyncio 9 | from og_sdk.agent_sdk import AgentSDK 10 | 11 | 12 | async def add_kernel(endpoint, api_key, kernel_endpoint, kernel_api_key): 13 | sdk = AgentSDK(endpoint, api_key) 14 | sdk.connect() 15 | try: 16 | await sdk.add_kernel(kernel_api_key, kernel_endpoint) 17 | print("add kernel %s done" % kernel_endpoint) 18 | except Exception as ex: 19 | print("add kernel %s failed %s" % (kernel_endpoint, ex)) 20 | 21 | 22 | @click.command() 23 | @click.option("--kernel_endpoint", help="the endpoint of kernel") 24 | @click.option("--kernel_api_key", help="the api key of kernel") 25 | @click.option("--agent_endpoint", help="the endpoint of agent") 26 | @click.option("--admin_key", help="the admin key of agent") 27 | def setup(kernel_endpoint, kernel_api_key, agent_endpoint, admin_key): 28 | if not kernel_endpoint or not kernel_api_key or not admin_key or not agent_endpoint: 29 | print("kernel_endpoint or kernel_api_key or admin key is empty") 30 | return 31 | asyncio.run(add_kernel(agent_endpoint, admin_key, kernel_endpoint, kernel_api_key)) 32 | -------------------------------------------------------------------------------- /chat/src/og_terminal/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 dbpunk.com Author imotai 2 | # SPDX-FileCopyrightText: 2023 imotai 3 | # SPDX-FileContributor: imotai 4 | # 5 | # SPDX-License-Identifier: Elastic-2.0 6 | 7 | """ """ 8 | 9 | 10 | def parse_file_path(real_prompt): 11 | """ 12 | parse the file path from the prompt 13 | """ 14 | filepaths = [] 15 | position = 0 16 | while position < len(real_prompt): 17 | first_pos = real_prompt.find("/up", position) 18 | # break the loop if no file to upload 19 | if first_pos == -1 or len(real_prompt) - first_pos <= 4: 20 | break 21 | # 22 | if real_prompt[first_pos + 3] != " ": 23 | position = first_pos + 4 24 | continue 25 | end_pos = real_prompt.find("\n", first_pos + 4) 26 | end_pos = end_pos if end_pos >= 0 else len(real_prompt) 27 | blank_pos = real_prompt.find(" ", first_pos + 4, end_pos) 28 | if blank_pos == -1: 29 | filepath = real_prompt[first_pos + 4 : end_pos] 30 | position = len(real_prompt) 31 | else: 32 | filepath = real_prompt[first_pos + 4 : blank_pos] 33 | position = blank_pos 34 | if filepath: 35 | filepaths.append(filepath) 36 | return filepaths 37 | -------------------------------------------------------------------------------- /chat/setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 dbpunk.com Author imotai 2 | # SPDX-FileCopyrightText: 2023 imotai 3 | # SPDX-FileContributor: imotai 4 | # 5 | # SPDX-License-Identifier: Elastic-2.0 6 | 7 | """ """ 8 | from setuptools import setup, find_packages 9 | 10 | setup( 11 | name="og_chat", 12 | version="0.3.6", 13 | description="the chat client for open source code interpreter octogen", 14 | author="imotai", 15 | author_email="codego.me@gmail.com", 16 | url="https://github.com/dbpunk-labs/octogen", 17 | long_description=open("README.md").read(), 18 | long_description_content_type="text/markdown", 19 | packages=[ 20 | "og_discord", 21 | "og_terminal", 22 | ], 23 | package_dir={ 24 | "og_discord": "src/og_discord", 25 | "og_terminal": "src/og_terminal", 26 | }, 27 | install_requires=[ 28 | "og_sdk>=0.1.0", 29 | "rich>=13.5.2", 30 | "prompt_toolkit>=3.0.0", 31 | "click>=8.0.0", 32 | "discord.py>=2.3.2", 33 | "clipboard>=0.0.4", 34 | "term-image>=0.7.0", 35 | "python-dotenv", 36 | ], 37 | entry_points={ 38 | "console_scripts": [ 39 | "og = og_terminal.terminal_chat:app", 40 | "og_ping = og_terminal.ping:app", 41 | "og_discord_bot = og_discord.discord_chat:run_app", 42 | ] 43 | }, 44 | ) 45 | -------------------------------------------------------------------------------- /kernel/setup.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | from setuptools import setup, find_packages 8 | 9 | setup( 10 | name="og_kernel", 11 | version="0.3.6", 12 | description="Open source code interpreter agent for LLM", 13 | author="imotai", 14 | author_email="codego.me@gmail.com", 15 | url="https://github.com/dbpunk-labs/octogen", 16 | long_description=open("README.md").read(), 17 | long_description_content_type="text/markdown", 18 | packages=[ 19 | "og_kernel", 20 | "og_kernel.kernel", 21 | "og_kernel.server", 22 | ], 23 | package_dir={ 24 | "og_kernel": "src/og_kernel", 25 | "og_kernel.kernel": "src/og_kernel/kernel", 26 | "og_kernel.server": "src/og_kernel/server", 27 | }, 28 | install_requires=[ 29 | "og_proto", 30 | "grpc-google-iam-v1>=0.12.6", 31 | "grpcio-tools>=1.57.0", 32 | "ipykernel>=6.25.1", 33 | "jupyter_client>=8.3.0", 34 | "matplotlib>=3.7.2", 35 | "pandas", 36 | "numpy", 37 | ], 38 | entry_points={ 39 | "console_scripts": [ 40 | "og_kernel_rpc_server = og_kernel.server.kernel_rpc_server:server_main", 41 | "og_kernel_app = og_kernel.kernel.kernel_app:run_app", 42 | ] 43 | }, 44 | ) 45 | -------------------------------------------------------------------------------- /chat/src/og_terminal/markdown.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 dbpunk.com Author imotai 2 | # SPDX-FileCopyrightText: 2023 imotai 3 | # SPDX-FileContributor: imotai 4 | # 5 | # SPDX-License-Identifier: Elastic-2.0 6 | 7 | from rich.markdown import TextElement, Markdown 8 | from rich.syntax import Syntax 9 | from rich.console import Console, ConsoleOptions, RenderResult 10 | from markdown_it.token import Token 11 | 12 | 13 | class CodeBlock(TextElement): 14 | """A code block with syntax highlighting.""" 15 | 16 | style_name = "markdown.code_block" 17 | 18 | @classmethod 19 | def create(cls, markdown: "Markdown", token: Token) -> "CodeBlock": 20 | node_info = token.info or "" 21 | lexer_name = node_info.partition(" ")[0] 22 | return cls(lexer_name or "default", markdown.code_theme) 23 | 24 | def __init__(self, lexer_name: str, theme: str) -> None: 25 | self.lexer_name = lexer_name 26 | self.theme = theme 27 | 28 | def __rich_console__( 29 | self, console: Console, options: ConsoleOptions 30 | ) -> RenderResult: 31 | code = str(self.text).rstrip() 32 | syntax = Syntax( 33 | code, 34 | self.lexer_name, 35 | # background_color="default", 36 | line_numbers=True, 37 | theme=self.theme, 38 | word_wrap=True, 39 | padding=1, 40 | ) 41 | yield syntax 42 | -------------------------------------------------------------------------------- /agent/setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 dbpunk.com Author imotai 2 | # SPDX-FileCopyrightText: 2023 imotai 3 | # SPDX-FileContributor: imotai 4 | # 5 | # SPDX-License-Identifier: Elastic-2.0 6 | 7 | """ """ 8 | from setuptools import setup, find_packages 9 | 10 | setup( 11 | name="og_agent", 12 | version="0.3.6", 13 | description="Open source code interpreter agent", 14 | author="imotai", 15 | author_email="wangtaize@dbpunk.com", 16 | url="https://github.com/dbpunk-labs/octogen", 17 | long_description=open("README.md").read(), 18 | long_description_content_type="text/markdown", 19 | packages=[ 20 | "og_agent", 21 | ], 22 | package_dir={ 23 | "og_agent": "src/og_agent", 24 | }, 25 | install_requires=[ 26 | "og_proto", 27 | "og_kernel", 28 | "og_sdk", 29 | "grpcio-tools>=1.57.0", 30 | "grpc-google-iam-v1>=0.12.6", 31 | "aiofiles", 32 | "orm[sqlite]", 33 | "python-dotenv", 34 | "openai", 35 | "aiohttp>=3.8.5", 36 | "pydantic", 37 | "tiktoken", 38 | "fastapi", 39 | "uvicorn", 40 | ], 41 | package_data={"og_agent": ["*.bnf"]}, 42 | entry_points={ 43 | "console_scripts": [ 44 | "og_agent_rpc_server = og_agent.agent_server:server_main", 45 | "og_agent_setup = og_agent.agent_setup:setup", 46 | "og_agent_http_server = og_agent.agent_api_server:run_app", 47 | ] 48 | }, 49 | ) 50 | -------------------------------------------------------------------------------- /agent/src/og_agent/llama_client.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 dbpunk.com Author imotai 2 | # SPDX-FileCopyrightText: 2023 imotai 3 | # SPDX-FileContributor: imotai 4 | # 5 | # SPDX-License-Identifier: Elastic-2.0 6 | 7 | """ """ 8 | 9 | import json 10 | import aiohttp 11 | import logging 12 | from .base_stream_client import BaseStreamClient 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | class LlamaClient(BaseStreamClient): 18 | 19 | def __init__(self, endpoint, key, grammar): 20 | super().__init__(endpoint + "/v1/chat/completions", key) 21 | self.grammar = grammar 22 | 23 | async def chat(self, messages, model, temperature=0, max_tokens=1024, stop=["\n"]): 24 | data = { 25 | "messages": messages, 26 | "temperature": temperature, 27 | "grammar": self.grammar, 28 | "stream": True, 29 | "model": model, 30 | "max_tokens": max_tokens, 31 | "top_p": 0.9, 32 | } 33 | if stop: 34 | data["stop"] = stop 35 | async for line in self.arun(data): 36 | if len(line) < 6: 37 | continue 38 | try: 39 | content = line[6:] 40 | logger.debug(f"llama response content: {content}") 41 | message = json.loads(content) 42 | yield message 43 | except Exception as e: 44 | logger.error("error: %s, content: %s", e, content) 45 | continue 46 | -------------------------------------------------------------------------------- /kernel/tests/kernel_mgr_tests.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | import pytest 7 | 8 | from og_kernel.kernel.kernel_mgr import KernelManager 9 | 10 | 11 | @pytest.mark.parametrize( 12 | "config_path, workspace", 13 | [ 14 | ("", ""), 15 | ("kernel_connection_file.json", ""), 16 | ("", "/tmp/workspace1"), 17 | ], 18 | ) 19 | def test_init_with_invalid_args(config_path, workspace): 20 | with pytest.raises(ValueError): 21 | KernelManager(config_path, workspace) 22 | 23 | 24 | def test_init_with_valid_args(): 25 | km = KernelManager( 26 | config_path="/tmp/kernel_connection_file.json", 27 | workspace="/tmp/workspace1", 28 | ) 29 | assert km.config_path == "/tmp/kernel_connection_file.json" 30 | assert km.workspace == "/tmp/workspace1" 31 | assert km.process is None 32 | assert not km.is_running 33 | 34 | 35 | def test_start_kernel(): 36 | km = KernelManager( 37 | config_path="/tmp/kernel_connection_file1.json", 38 | workspace="/tmp/workspace1", 39 | ) 40 | km.start() 41 | assert km.is_running 42 | assert km.process is not None 43 | km.stop() 44 | 45 | 46 | def test_stop_kernel(): 47 | km = KernelManager( 48 | config_path="/tmp/kernel_connection_file2.json", 49 | workspace="/tmp/workspace2", 50 | ) 51 | km.start() 52 | km.stop() 53 | assert not km.is_running 54 | assert km.process is None 55 | -------------------------------------------------------------------------------- /up/src/og_up/model_downloader.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | import os 8 | import click 9 | from huggingface_hub import hf_hub_download 10 | 11 | 12 | @click.command() 13 | @click.option("--repo", help="the repo of huggingface") 14 | @click.option("--filename", help="the filename of model") 15 | @click.option( 16 | "--cache_dir", default="~/.octogen/app/cache", help="the cache_dir of huggingface" 17 | ) 18 | @click.option( 19 | "--local_dir", default="~/.octogen/app/model", help="the local dir of huggingface" 20 | ) 21 | @click.option("--socks_proxy", default="", help="the socks proxy url") 22 | def download(repo, filename, cache_dir, local_dir, socks_proxy): 23 | if local_dir.find("~") == 0: 24 | real_local_dir = local_dir.replace("~", os.path.expanduser("~")) 25 | else: 26 | real_local_dir = local_dir 27 | if cache_dir.find("~") == 0: 28 | real_cache_dir = cache_dir.replace("~", os.path.expanduser("~")) 29 | else: 30 | real_cache_dir = cache_dir 31 | 32 | os.makedirs(real_cache_dir, exist_ok=True) 33 | os.makedirs(real_local_dir, exist_ok=True) 34 | proxies = {} 35 | if socks_proxy: 36 | proxies = {"http": socks_proxy, "https": socks_proxy} 37 | hf_hub_download( 38 | repo_id=repo, 39 | filename=filename, 40 | cache_dir=real_cache_dir, 41 | local_dir=real_local_dir, 42 | proxies=proxies, 43 | resume_download=True, 44 | ) 45 | -------------------------------------------------------------------------------- /start_sandbox.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # start_local.sh 3 | 4 | ps -ef | grep python3 | grep og |grep -v grep | awk '{print $2}' | while read line; do kill -9 $line; done 5 | WORKDIR=`pwd` 6 | bash install_package.sh 7 | mkdir -p ${WORKDIR}/sandbox/kernel 8 | mkdir -p ${WORKDIR}/sandbox/agent 9 | cd ${WORKDIR}/sandbox/kernel 10 | KERNEL_RPC_KEY=ZCeI9cYtOCyLISoi488BgZHeBkHWuFUH 11 | echo ${KERNEL_RPC_KEY} 12 | mkdir -p /tmp/ws1 /tmp/kernel_config 13 | 14 | cat < .env 15 | config_root_path=/tmp/kernel_config 16 | workspace=/tmp/ws1 17 | rpc_host=127.0.0.1 18 | rpc_port=9527 19 | rpc_key=${KERNEL_RPC_KEY} 20 | EOF 21 | 22 | echo "start kernel with endpoint 127.0.0.1:9527" 23 | 24 | og_kernel_rpc_server > kernel_rpc.log 2>&1 & 25 | sleep 2 26 | 27 | cd ${WORKDIR}/sandbox/agent 28 | AGENT_RPC_KEY=ZCeI9cYtOCyLISoi488BgZHeBkHWuFUH 29 | test -f /tmp/octopus_sandbox.db && rm /tmp/octopus_sandbox.db 30 | echo "start agent with endpoint 127.0.0.1:9528" 31 | 32 | cat < .env 33 | rpc_host=127.0.0.1 34 | rpc_port=9528 35 | admin_key=${AGENT_RPC_KEY} 36 | llm_key=mock 37 | max_file_size=10240000 38 | verbose=True 39 | db_path=/tmp/octopus_sandbox.db 40 | cases_path=${WORKDIR}/sdk/tests/mock_messages.json 41 | EOF 42 | 43 | og_agent_rpc_server > agent_rpc.log 2>&1 & 44 | og_agent_http_server > agent_http.log 2>&1 & 45 | sleep 2 46 | echo "add a kernel" 47 | og_agent_setup --kernel_endpoint=127.0.0.1:9527 --kernel_api_key=${KERNEL_RPC_KEY} --agent_endpoint=127.0.0.1:9528 --admin_key=${AGENT_RPC_KEY} 48 | 49 | mkdir -p ~/.octogen 50 | cat < ~/.octogen/config 51 | endpoint=127.0.0.1:9528 52 | api_key=${KERNEL_RPC_KEY} 53 | EOF 54 | og_ping 55 | -------------------------------------------------------------------------------- /proto/Makefile: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | # 7 | # Makefile 8 | # jackwang, 2023-08-25 12:24 9 | # 10 | 11 | PROTO_PATH = ./src/og_proto/ 12 | PROTOS := $(shell find ${PROTO_PATH} -name '*.proto') 13 | generate: $(PROTOS) 14 | @python3 -m grpc_tools.protoc --python_out=./src/og_proto \ 15 | --grpc_python_out=./src/og_proto \ 16 | --pyi_out=./src/og_proto \ 17 | --proto_path $(PROTO_PATH) \ 18 | $^ 19 | @sed -i'' -e 's/import kernel_server_pb2 as kernel__server__pb2/from . import kernel_server_pb2 as kernel__server__pb2/' src/og_proto/kernel_server_pb2_grpc.py 20 | @sed -i'' -e 's/import common_pb2 as common__pb2/from . import common_pb2 as common__pb2/' src/og_proto/kernel_server_pb2_grpc.py 21 | @sed -i'' -e 's/import common_pb2 as common__pb2/from . import common_pb2 as common__pb2/' src/og_proto/kernel_server_pb2.py 22 | @sed -i'' -e 's/import agent_server_pb2 as agent__server__pb2/from . import agent_server_pb2 as agent__server__pb2/' src/og_proto/agent_server_pb2_grpc.py 23 | @sed -i'' -e 's/import common_pb2 as common__pb2/from . import common_pb2 as common__pb2/' src/og_proto/agent_server_pb2_grpc.py 24 | @sed -i'' -e 's/import common_pb2 as common__pb2/from . import common_pb2 as common__pb2/' src/og_proto/agent_server_pb2.py 25 | @sed -i'' -e 's/import prompt_pb2 as prompt__pb2/from . import prompt_pb2 as prompt__pb2/' src/og_proto/memory_pb2.py 26 | @echo "'${@}' done" 27 | 28 | clean: 29 | @rm src/og_proto/*_pb2.py 30 | @rm src/og_proto/*_pb2_grpc.py 31 | @rm src/og_proto/*.pyi 32 | -------------------------------------------------------------------------------- /proto/src/og_proto/memory.proto: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2023 imotai 2 | // SPDX-FileContributor: imotai 3 | // 4 | // SPDX-License-Identifier: Elastic-2.0 5 | 6 | syntax = "proto3"; 7 | package octogen_agent_memory; 8 | import "prompt.proto"; 9 | 10 | message GuideMemory { 11 | // the name of tool or library 12 | string name = 1; 13 | // the tool or method of library can solve what kind of problem 14 | string what_it_can_do = 2; 15 | // how to use the tool or library include how to import and usage 16 | string how_to_use = 3; 17 | // the timestamp in second 18 | int32 recall_time = 4; 19 | } 20 | 21 | message Feedback { 22 | bool is_correct = 1; 23 | // the timestamp in second 24 | int32 feedback_time = 2; 25 | } 26 | 27 | message ChatMessage { 28 | // the role name 29 | string role_name = 1; 30 | // the content of chat message 31 | string content = 2; 32 | // the function name 33 | string function_name = 3; 34 | // the function call content 35 | string function_call = 4; 36 | // the timestamp in second 37 | int32 chat_time = 5; 38 | // the feedback of the chat message 39 | // and it will not be sent to LLM 40 | Feedback feedback = 6; 41 | string id = 7; 42 | } 43 | 44 | // every user has his own memory 45 | message AgentMemory { 46 | octogen_agent_prompt.AgentPrompt instruction = 1; 47 | string user_id = 2; 48 | string user_name = 3; 49 | repeated GuideMemory guide_memory = 4; 50 | repeated ChatMessage chat_memory = 5; 51 | // reset the memory id to clean the memory 52 | string memory_id = 6; 53 | } 54 | -------------------------------------------------------------------------------- /.github/workflows/doc_page.yml: -------------------------------------------------------------------------------- 1 | # Simple workflow for deploying static content to GitHub Pages 2 | name: Deploy static content to Pages 3 | 4 | on: 5 | # Runs on pushes targeting the default branch 6 | push: 7 | branches: ["main"] 8 | 9 | # Allows you to run this workflow manually from the Actions tab 10 | workflow_dispatch: 11 | 12 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 13 | permissions: 14 | contents: read 15 | pages: write 16 | id-token: write 17 | 18 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. 19 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. 20 | concurrency: 21 | group: "pages" 22 | cancel-in-progress: false 23 | 24 | jobs: 25 | # Single deploy job since we're just deploying 26 | deploy: 27 | environment: 28 | name: github-pages 29 | url: ${{ steps.deployment.outputs.page_url }} 30 | runs-on: ubuntu-latest 31 | steps: 32 | - name: Checkout 33 | uses: actions/checkout@v3 34 | with: 35 | submodules: recursive 36 | 37 | - name: Install dependencies 38 | run: pip install -U sphinx 39 | 40 | - name: Setup Pages 41 | uses: actions/configure-pages@v3 42 | - name: build page 43 | run: | 44 | cd docs 45 | make html 46 | 47 | - name: Upload artifact 48 | uses: actions/upload-pages-artifact@v2 49 | with: 50 | path: 'docs/build/html/' 51 | - name: Deploy to GitHub Pages 52 | id: deployment 53 | uses: actions/deploy-pages@v2 54 | -------------------------------------------------------------------------------- /chat/src/og_terminal/ping.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 dbpunk.com Author imotai 2 | # SPDX-FileCopyrightText: 2023 imotai 3 | # SPDX-FileContributor: imotai 4 | # 5 | # SPDX-License-Identifier: Elastic-2.0 6 | 7 | """ """ 8 | import sys 9 | import os 10 | import click 11 | from og_sdk.agent_sdk import AgentSyncSDK 12 | from rich.console import Console 13 | from dotenv import dotenv_values 14 | 15 | 16 | @click.command() 17 | @click.option("--octogen_dir", default="~/.octogen", help="the root path of octogen") 18 | def app(octogen_dir): 19 | console = Console() 20 | if octogen_dir.find("~") == 0: 21 | real_octogen_dir = octogen_dir.replace("~", os.path.expanduser("~")) 22 | else: 23 | real_octogen_dir = octogen_dir 24 | if not os.path.exists(real_octogen_dir): 25 | os.mkdir(real_octopus_dir) 26 | octogen_config = dotenv_values(real_octogen_dir + "/config") 27 | console = Console() 28 | try: 29 | if "api_key" not in octogen_config or "endpoint" not in octogen_config: 30 | console.print( 31 | f"❌ api key and endpoint are required! please check your config {octogen_dir}/config" 32 | ) 33 | sys.exit(1) 34 | sdk = AgentSyncSDK(octogen_config["endpoint"], octogen_config["api_key"]) 35 | sdk.connect() 36 | response = sdk.ping() 37 | if response.code == 0: 38 | console.print(f"👍 {response.msg}") 39 | sys.exit(0) 40 | else: 41 | console.print(f"❌ {response.msg}") 42 | sys.exit(1) 43 | except Exception as ex: 44 | console.print( 45 | f"❌ please check your config {octogen_dir}/config with error {ex}" 46 | ) 47 | sys.exit(1) 48 | -------------------------------------------------------------------------------- /docker/start_all.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | # start_all.sh 4 | # Copyright (C) 2023 imotai 5 | # 6 | # Distributed under terms of the MIT license. 7 | # 8 | 9 | if [ "$#" -eq 0 ] 10 | then 11 | echo "No arguments supplied" 12 | exit 1 13 | fi 14 | ROOT_DIR=$1 15 | 16 | mkdir -p ${ROOT_DIR}/kernel/ws 17 | mkdir -p ${ROOT_DIR}/agent/db 18 | mkdir -p ${ROOT_DIR}/agent/logs 19 | mkdir -p ${ROOT_DIR}/kernel/config 20 | mkdir -p ${ROOT_DIR}/kernel/logs 21 | mkdir -p ${ROOT_DIR}/model_server/logs 22 | chown -R octogen:octogen ${ROOT_DIR}/kernel/ws 23 | chown -R octogen:octogen ${ROOT_DIR}/kernel/config 24 | chown -R octogen:octogen ${ROOT_DIR}/agent/db 25 | chown -R octogen:octogen ${ROOT_DIR}/agent/logs 26 | chown -R octogen:octogen ${ROOT_DIR}/kernel/logs 27 | chown -R octogen:octogen ${ROOT_DIR}/model_server/logs 28 | 29 | cat < /bin/start_service.sh 30 | export PYTHONPATH=/home/octogen/.local/lib/python3.10/site-packages:$PYTHONPATH 31 | 32 | if [ "$2" -eq 1 ] 33 | then 34 | if [ -z "$3" ] 35 | then 36 | echo "no model name" 37 | exit 1 38 | else 39 | echo "start codellama with model name $3" 40 | mkdir -p ${ROOT_DIR}/model_server 41 | cd ${ROOT_DIR}/model_server && hap run -n codellama -- server -m ../model/$3 --alias codellama --host 127.0.0.1 --port 8080 >> ${ROOT_DIR}/model_server/logs/server.log 2>&1 42 | fi 43 | fi 44 | 45 | echo "start kernel.." 46 | cd ${ROOT_DIR}/kernel && hap run -n octogen_kernel -- og_kernel_rpc_server >> ${ROOT_DIR}/kernel/logs/kernel_rpc.log 2>&1 47 | 48 | echo "start agent.." 49 | cd ${ROOT_DIR}/agent && hap run -n octogen_agent -- og_agent_rpc_server >> ${ROOT_DIR}/agent/logs/agent_rpc.log 2>&1 50 | cd ${ROOT_DIR}/agent && hap run -n octogen_api -- og_agent_http_server >> ${ROOT_DIR}/agent/logs/agent_http.log 2>&1 51 | 52 | while true 53 | do 54 | hap status 55 | sleep 10 56 | done 57 | EOF 58 | su - octogen -c "bash /bin/start_service.sh" 59 | -------------------------------------------------------------------------------- /proto/src/og_proto/kernel_server.proto: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2023 imotai 2 | // SPDX-FileContributor: imotai 3 | // 4 | // SPDX-License-Identifier: Elastic-2.0 5 | 6 | syntax = "proto3"; 7 | package octogen_kernel_proto; 8 | 9 | import "common.proto"; 10 | 11 | // Start the kernel request 12 | message StartKernelRequest { 13 | string kernel_name = 1; 14 | } 15 | 16 | // Start the kernel response 17 | message StartKernelResponse { 18 | // the kernel key 19 | int32 code = 2; 20 | string msg = 3; 21 | } 22 | 23 | message StopKernelRequest { 24 | string kernel_name = 1; 25 | } 26 | 27 | message StopKernelResponse { 28 | int32 code = 1; 29 | string msg = 2; 30 | } 31 | 32 | message RestartKernelRequest { 33 | string kernel_name = 1; 34 | } 35 | 36 | message RestartKernelResponse { 37 | int32 code = 2; 38 | string msg = 3; 39 | } 40 | 41 | message ExecuteRequest { 42 | string code = 1; 43 | string kernel_name = 2; 44 | } 45 | 46 | message ExecuteResponse { 47 | enum OutputType { 48 | // the stdout stream 49 | StdoutType = 0; 50 | // the stderr stream 51 | StderrType = 1; 52 | // the result 53 | ResultType = 2; 54 | // the traceback 55 | TracebackType = 3; 56 | } 57 | OutputType output_type = 1; 58 | string output = 2; 59 | } 60 | 61 | message GetStatusRequest { 62 | string kernel_name = 1; 63 | } 64 | 65 | message GetStatusResponse { 66 | bool is_alive = 1; 67 | int32 code = 2; 68 | string msg = 3; 69 | } 70 | 71 | service KernelServerNode { 72 | rpc start(StartKernelRequest) returns (StartKernelResponse) {} 73 | rpc stop(StopKernelRequest) returns (StopKernelResponse) {} 74 | rpc execute(ExecuteRequest) returns (stream ExecuteResponse) {} 75 | rpc get_status(GetStatusRequest) returns (GetStatusResponse) {} 76 | rpc upload(stream octogen_common_proto.FileChunk) returns (octogen_common_proto.FileUploaded) {} 77 | rpc download(octogen_common_proto.DownloadRequest) returns (stream octogen_common_proto.FileChunk) {} 78 | } 79 | -------------------------------------------------------------------------------- /examples/chainlit/.chainlit/config.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | # Whether to enable telemetry (default: true). No personal data is collected. 3 | enable_telemetry = true 4 | 5 | # List of environment variables to be provided by each user to use the app. 6 | user_env = [] 7 | 8 | # Duration (in seconds) during which the session is saved when the connection is lost 9 | session_timeout = 3600 10 | 11 | # Enable third parties caching (e.g LangChain cache) 12 | cache = false 13 | 14 | # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317) 15 | # follow_symlink = false 16 | 17 | [features] 18 | # Show the prompt playground 19 | prompt_playground = true 20 | 21 | [UI] 22 | # Name of the app and chatbot. 23 | name = "Chatbot" 24 | 25 | # Description of the app and chatbot. This is used for HTML tags. 26 | # description = "" 27 | 28 | # Large size content are by default collapsed for a cleaner ui 29 | default_collapse_content = true 30 | 31 | # The default value for the expand messages settings. 32 | default_expand_messages = false 33 | 34 | # Hide the chain of thought details from the user in the UI. 35 | hide_cot = false 36 | 37 | # Link to your github repo. This will add a github button in the UI's header. 38 | # github = "" 39 | 40 | # Specify a CSS file that can be used to customize the user interface. 41 | # The CSS file can be served from the public directory or via an external link. 42 | # custom_css = "/public/test.css" 43 | 44 | # Override default MUI light theme. (Check theme.ts) 45 | [UI.theme.light] 46 | #background = "#FAFAFA" 47 | #paper = "#FFFFFF" 48 | 49 | [UI.theme.light.primary] 50 | #main = "#F80061" 51 | #dark = "#980039" 52 | #light = "#FFE7EB" 53 | 54 | # Override default MUI dark theme. (Check theme.ts) 55 | [UI.theme.dark] 56 | #background = "#FAFAFA" 57 | #paper = "#FFFFFF" 58 | 59 | [UI.theme.dark.primary] 60 | #main = "#F80061" 61 | #dark = "#980039" 62 | #light = "#FFE7EB" 63 | 64 | 65 | [meta] 66 | generated_by = "0.7.2" 67 | -------------------------------------------------------------------------------- /serving/src/og_serving/http_serving.py: -------------------------------------------------------------------------------- 1 | # vim:fenc=utf-8 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ """ 9 | import os 10 | import sys 11 | import uvicorn 12 | import logging 13 | from dotenv import dotenv_values 14 | from .server_app import create_app, Settings 15 | from llama_cpp.llama_chat_format import register_chat_format, ChatFormatterResponse, _map_roles, _format_add_colon_single 16 | from llama_cpp import llama_types 17 | from typing import Any, List 18 | 19 | config = dotenv_values(".env") 20 | 21 | settings = Settings(_env_file="model.env") 22 | LOG_LEVEL = ( 23 | logging.DEBUG if config.get("log_level", "info") == "debug" else logging.INFO 24 | ) 25 | 26 | logging.basicConfig( 27 | level=LOG_LEVEL, 28 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", 29 | handlers=[logging.StreamHandler(sys.stdout)], 30 | ) 31 | 32 | logger = logging.getLogger(__name__) 33 | 34 | 35 | @register_chat_format("phind") 36 | def format_phind( 37 | messages: List[llama_types.ChatCompletionRequestMessage], 38 | **kwargs: Any, 39 | ) -> ChatFormatterResponse: 40 | _roles = dict(user="### User Message", assistant="### Assistant") 41 | _sep = "\n\n" 42 | _system_message = "### System Prompt\nYou are an intelligent programming assistant." 43 | for message in messages: 44 | if message["role"] == "system" and message["content"]: 45 | _system_message = f"""### System Prompt\n{message['content']}""" 46 | _messages = _map_roles(messages, _roles) 47 | _messages.append((_roles["assistant"], None)) 48 | _prompt = _format_add_colon_single(_system_message, _messages, _sep) 49 | return ChatFormatterResponse(prompt=_prompt) 50 | 51 | 52 | def run_serving(): 53 | app = create_app(settings) 54 | host = config.get("host", "localhost") 55 | port = int(config.get("port", "8080")) 56 | logger.info(f"Starting serving at {host}:{port}") 57 | uvicorn.run(app, host=host, port=port) 58 | -------------------------------------------------------------------------------- /agent/src/og_agent/agent_llm.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | 8 | import logging 9 | import openai 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | class LLMManager: 15 | 16 | def __init__(self, config): 17 | """ 18 | llm_key=xxxx 19 | # the other optional 20 | """ 21 | self.config = config 22 | self.llms = {} 23 | self.llm_key = self.config["llm_key"] 24 | if self.config["llm_key"] == "azure_openai": 25 | self._build_azure_openai() 26 | elif self.config["llm_key"] == "openai": 27 | self._build_openai() 28 | 29 | def get_llm(self): 30 | return self.llms.get(self.llm_key, None) 31 | 32 | def get_llm_by_key(self, llm_key): 33 | """ 34 | get llm with a key, the supported keys are 'mock', 'openai', 'azure_openai', 'codellama' 35 | """ 36 | return self.llms.get(self.llm_key, None) 37 | 38 | def _no_empty_value_required(self, keys): 39 | for key in keys: 40 | if not self.config.get(key, None): 41 | raise Exception(f"the value of required {key} is empty") 42 | 43 | def _build_openai(self): 44 | self._no_empty_value_required([ 45 | "openai_api_key", 46 | "openai_api_model", 47 | ]) 48 | if self.config.get("openai_api_base", None): 49 | openai.api_base = self.config.get("openai_api_base", None) 50 | openai.api_key = self.config["openai_api_key"] 51 | 52 | def _build_azure_openai(self): 53 | """ 54 | build azure openai client from config 55 | """ 56 | self._no_empty_value_required([ 57 | "openai_api_base", 58 | "openai_api_version", 59 | "openai_api_key", 60 | "openai_api_type", 61 | "openai_api_deployment", 62 | ]) 63 | openai.api_base = self.config["openai_api_base"] 64 | openai.api_version = self.config["openai_api_version"] 65 | openai.api_type = self.config["openai_api_type"] 66 | openai.api_key = self.config["openai_api_key"] 67 | self.config["openai_api_model"] = self.config["openai_api_deployment"] 68 | -------------------------------------------------------------------------------- /sdk/tests/utils_test.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | import pytest 8 | import json 9 | from og_sdk.utils import process_char_stream 10 | 11 | 12 | def test_process_char_stream_case2(): 13 | stream1 = "\rt: 0%| | 0/518 [00:00,.?/`~") 69 | == "ab!@#$%^&*()_+{}|:\";'<>,.?/`~" 70 | ) 71 | -------------------------------------------------------------------------------- /.github/workflows/release_libraries.yml: -------------------------------------------------------------------------------- 1 | name: Relase CLI 2 | on: 3 | push: 4 | tags: 5 | - "[v]?[0-9]+.[0-9]+.[0-9]+" 6 | jobs: 7 | pypi-publish: 8 | name: Release the library to PYPI 9 | #runs-on: self-hosted 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout repository 13 | uses: actions/checkout@v3 14 | with: 15 | submodules: recursive 16 | - name: Install dependencies 17 | run: pip install -r requirements.txt 18 | - name: build packages 19 | run: | 20 | TAG=${GITHUB_REF/refs\/tags\//} 21 | VERSION=${TAG#*v} 22 | bash build_package.sh ${VERSION} 23 | - name: Publish Proto 24 | uses: pypa/gh-action-pypi-publish@release/v1 25 | with: 26 | packages-dir: proto/dist 27 | password: ${{ secrets.PYPI_TOKEN }} 28 | - name: Publish Kernel 29 | uses: pypa/gh-action-pypi-publish@release/v1 30 | with: 31 | packages-dir: kernel/dist 32 | password: ${{ secrets.PYPI_TOKEN }} 33 | - name: Publish Agent 34 | uses: pypa/gh-action-pypi-publish@release/v1 35 | with: 36 | packages-dir: agent/dist 37 | password: ${{ secrets.PYPI_TOKEN }} 38 | - name: Publish Chat 39 | uses: pypa/gh-action-pypi-publish@release/v1 40 | with: 41 | packages-dir: chat/dist 42 | password: ${{ secrets.PYPI_TOKEN }} 43 | 44 | - name: Publish Up 45 | uses: pypa/gh-action-pypi-publish@release/v1 46 | with: 47 | packages-dir: up/dist 48 | password: ${{ secrets.PYPI_TOKEN }} 49 | 50 | - name: Publish SDK 51 | uses: pypa/gh-action-pypi-publish@release/v1 52 | with: 53 | packages-dir: sdk/dist 54 | password: ${{ secrets.PYPI_TOKEN }} 55 | 56 | - name: docker login 57 | uses: docker/login-action@v3 58 | with: 59 | username: ${{ secrets.DOCKERHUB_USERNAME }} 60 | password: ${{ secrets.DOCKERHUB_TOKEN }} 61 | 62 | - name: Build Docker image 63 | run: | 64 | ROOT_DIR=`pwd` 65 | RELEASE_NAME=${GITHUB_REF/refs\/tags\//} 66 | cd ${ROOT_DIR}/docker && docker build -f Dockerfile --no-cache -t dbpunk/octogen:${RELEASE_NAME} . 67 | cd ${ROOT_DIR}/docker && docker build -f Dockerfile_chrome -t dbpunk/octogen_chrome:${RELEASE_NAME} . 68 | docker push dbpunk/octogen:${RELEASE_NAME} 69 | docker push dbpunk/octogen_chrome:${RELEASE_NAME} 70 | echo "the new images version $RELEASE_NAME" 71 | 72 | 73 | -------------------------------------------------------------------------------- /agent/tests/agent_api_tests.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | 8 | import os 9 | import pytest 10 | import asyncio 11 | import logging 12 | import json 13 | import random 14 | import logging 15 | from tempfile import gettempdir 16 | from pathlib import Path 17 | from og_sdk.agent_sdk import AgentProxySDK 18 | from og_sdk.utils import random_str 19 | from og_agent import agent_api_server 20 | 21 | logger = logging.getLogger(__name__) 22 | api_base = "127.0.0.1:9528" 23 | api_key = "ZCeI9cYtOCyLISoi488BgZHeBkHWuFUH" 24 | 25 | 26 | @pytest.fixture 27 | def agent_sdk(): 28 | sdk = AgentProxySDK(api_base) 29 | sdk.connect() 30 | yield sdk 31 | 32 | 33 | @pytest.mark.asyncio 34 | async def test_helloworld_test(agent_sdk): 35 | await agent_sdk.add_kernel(api_key, "127.0.0.1:9527", api_key) 36 | agent_api_server.agent_sdk = agent_sdk 37 | request = agent_api_server.TaskRequest( 38 | prompt="hello", token_limit=0, llm_model_name="", input_files=[], context_id="" 39 | ) 40 | responds = [] 41 | async for respond in agent_api_server.run_task(request, api_key): 42 | json_data = respond[6:] 43 | responds.append(json.loads(respond[6:])) 44 | logger.debug(f"{responds}") 45 | assert len(responds) > 0, "no responds for the prompt" 46 | assert ( 47 | responds[len(responds) - 1]["step_type"] 48 | == agent_api_server.StepResponseType.OnFinalAnswer 49 | ) 50 | assert ( 51 | responds[len(responds) - 1]["final_answer"]["answer"] 52 | == "how can I help you today?" 53 | ) 54 | 55 | 56 | @pytest.mark.asyncio 57 | async def test_run_code_test(agent_sdk): 58 | agent_api_server.agent_sdk = agent_sdk 59 | sdk = agent_sdk 60 | await sdk.add_kernel(api_key, "127.0.0.1:9527", api_key) 61 | request = agent_api_server.TaskRequest( 62 | prompt="write a hello world in python", 63 | token_limit=0, 64 | llm_model_name="", 65 | input_files=[], 66 | context_id="", 67 | ) 68 | responds = [] 69 | async for respond in agent_api_server.run_task(request, api_key): 70 | responds.append(json.loads(respond[6:])) 71 | logger.debug(f"{responds}") 72 | assert len(responds) > 0, "no responds for the prompt" 73 | assert ( 74 | responds[len(responds) - 1]["step_type"] 75 | == agent_api_server.StepResponseType.OnFinalAnswer 76 | ) 77 | assert ( 78 | responds[len(responds) - 1]["final_answer"]["answer"] 79 | == "this code prints 'hello world'" 80 | ) 81 | -------------------------------------------------------------------------------- /sdk/src/og_sdk/kernel_sdk.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | 8 | import logging 9 | import grpc 10 | from grpc import aio 11 | from og_proto import kernel_server_pb2 12 | from og_proto import common_pb2 13 | from og_proto.kernel_server_pb2_grpc import KernelServerNodeStub 14 | from typing import AsyncIterable 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | class KernelSDK: 20 | 21 | def __init__(self, endpoint, api_key): 22 | self.endpoint = endpoint 23 | self.stub = None 24 | self.metadata = aio.Metadata( 25 | ("api_key", api_key), 26 | ) 27 | 28 | def connect(self): 29 | """ 30 | Connect the remote kernel instance 31 | """ 32 | channel = aio.insecure_channel(self.endpoint) 33 | self.channel = channel 34 | self.stub = KernelServerNodeStub(channel) 35 | 36 | async def stop(self, kernel_name=None): 37 | """ 38 | Stop the kernel 39 | """ 40 | request = kernel_server_pb2.StopKernelRequest(kernel_name=kernel_name) 41 | response = await self.stub.stop(request, metadata=self.metadata) 42 | return response 43 | 44 | async def is_alive(self, kernel_name=None): 45 | request = kernel_server_pb2.GetStatusRequest(kernel_name=kernel_name) 46 | response = await self.stub.get_status(request, metadata=self.metadata) 47 | return response.is_alive 48 | 49 | async def download_file(self, filename): 50 | request = common_pb2.DownloadRequest(filename=filename) 51 | async for chunk in self.stub.download(request, metadata=self.metadata): 52 | yield chunk 53 | 54 | async def upload_binary(self, chunks: AsyncIterable[common_pb2.FileChunk]): 55 | try: 56 | return await self.stub.upload(chunks, metadata=self.metadata) 57 | except Exception as ex: 58 | logger.error("upload file ex %s" % ex) 59 | 60 | async def start(self, kernel_name=None): 61 | """ 62 | Start the kernel 63 | """ 64 | request = kernel_server_pb2.StartKernelRequest(kernel_name=kernel_name) 65 | response = await self.stub.start(request, metadata=self.metadata) 66 | return response 67 | 68 | async def execute(self, code, kernel_name=None): 69 | """ 70 | Execute the python code 71 | """ 72 | request = kernel_server_pb2.ExecuteRequest(code=code, kernel_name=kernel_name) 73 | async for respond in self.stub.execute(request, metadata=self.metadata): 74 | yield respond 75 | 76 | async def close(self): 77 | if self.channel: 78 | self.channel.close() 79 | self.channel = None 80 | -------------------------------------------------------------------------------- /kernel/src/og_kernel/kernel/kernel_mgr.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | import os 7 | import logging 8 | import subprocess 9 | import json 10 | import sys 11 | import pathlib 12 | from time import sleep 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | """ 17 | The jupytor kernel manager used for 18 | - create kernel instance 19 | - start kernel instance 20 | - stop kernel instance 21 | 22 | Typical usage example: 23 | config_path = "kernel_connection_file.json" 24 | workspace = "/mnt/workspace1" 25 | km = KernelManager(config_path, workspace) 26 | # start the kernel 27 | km.start() 28 | """ 29 | 30 | 31 | class KernelManager: 32 | 33 | def __init__(self, config_path: str, workspace: str, kernel: str = "python3"): 34 | if not config_path or not workspace: 35 | raise ValueError( 36 | f"config path={config_path} or workspace={workspace} is empty" 37 | ) 38 | self.config_path = config_path 39 | self.workspace = workspace 40 | self.process = None 41 | self.is_running = False 42 | logger.info( 43 | "new kernel manager with config path %s and worksapce %s", 44 | config_path, 45 | workspace, 46 | ) 47 | self.kernel = kernel 48 | 49 | def start(self): 50 | """ 51 | Start a kernel instance and generate the kernel connection file 52 | """ 53 | self.is_running = True 54 | 55 | os.makedirs(self.workspace, exist_ok=True) 56 | launch_kernel_script_path = os.path.join( 57 | pathlib.Path(__file__).parent.resolve(), "launch_kernel.py" 58 | ) 59 | self.process = subprocess.Popen( 60 | [ 61 | sys.executable, 62 | launch_kernel_script_path, 63 | "--connection_file=" + self.config_path, 64 | "--kernel=" + self.kernel, 65 | ], 66 | cwd=self.workspace, 67 | ) 68 | logger.info("Start the kernel with process id %s", str(self.process.pid)) 69 | while True: 70 | if not os.path.isfile(self.config_path): 71 | sleep(1) 72 | else: 73 | try: 74 | with open(self.config_path, "r") as fp: 75 | logger.info("connection file content %s", json.load(fp)) 76 | break 77 | except json.JSONDecodeError: 78 | pass 79 | 80 | def stop(self): 81 | """ 82 | stop the kernel instance 83 | """ 84 | self.is_running = False 85 | logger.info("stop the kernel with process id %s", str(self.process.pid)) 86 | if self.process: 87 | self.process.terminate() 88 | self.process.wait() 89 | self.process = None 90 | 91 | def __str__(self): 92 | return f'KernelManager(config_path="{self.config_path}", workspace="{self.workspace}")' 93 | -------------------------------------------------------------------------------- /memory/tests/memory_tests.py: -------------------------------------------------------------------------------- 1 | # vim:fenc=utf-8 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ 9 | 10 | """ 11 | import json 12 | from og_memory.memory import agent_memory_to_context, AgentMemoryOption 13 | from og_proto.memory_pb2 import AgentMemory, ChatMessage, GuideMemory, Feedback 14 | from og_proto.prompt_pb2 import AgentPrompt, ActionDesc 15 | # defina a logger variable 16 | import logging 17 | logger = logging.getLogger(__name__) 18 | 19 | def test_agent_memory_to_context_smoke_test(): 20 | """ 21 | test the gent_memory_to_contex for smoke test 22 | """ 23 | 24 | action = ActionDesc(name="execute_python_code", desc="run python code", parameters=json.dumps({ 25 | "type": "object", 26 | "properties": { 27 | "explanation": { 28 | "type": "string", 29 | "description": "the explanation about the bash code", 30 | }, 31 | "code": { 32 | "type": "string", 33 | "description": "the bash code to be executed", 34 | }, 35 | "language": { 36 | "type": "string", 37 | "description": "the language of the code", 38 | }, 39 | "saved_filenames": { 40 | "type": "array", 41 | "items": {"type": "string"}, 42 | "description": "A list of filenames that were created by the code", 43 | }, 44 | }, 45 | "required": ["explanation", "code", "language"], 46 | })) 47 | rules = ["To complete the goal, write a plan and execute it step-by-step, limiting the number of steps to five. the following are examples", "rule2"] 48 | prompt = AgentPrompt(actions=[action, action], rules=rules, 49 | role="You are the QA engineer", 50 | role_name="Kitty", output_format="") 51 | context = agent_memory_to_context(prompt, [], AgentMemoryOption(show_function_instruction=True)) 52 | expected_context="""You are the QA engineer 53 | Follow the rules 54 | 1.To complete the goal, write a plan and execute it step-by-step, limiting the number of steps to five. the following are examples 55 | 2.rule2 56 | Use the following actions to help you finishing your task 57 | 58 | 1.execute_python_code: run python code, the following are parameters 59 | explanation(string):the explanation about the bash code 60 | code(string):the bash code to be executed 61 | language(string):the language of the code 62 | saved_filenames(array):A list of filenames that were created by the code 63 | 64 | 2.execute_python_code: run python code, the following are parameters 65 | explanation(string):the explanation about the bash code 66 | code(string):the bash code to be executed 67 | language(string):the language of the code 68 | saved_filenames(array):A list of filenames that were created by the code 69 | """ 70 | assert context == expected_context, "context is not expected" 71 | -------------------------------------------------------------------------------- /agent/src/og_agent/prompt.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | import json 7 | from og_proto.prompt_pb2 import ActionDesc 8 | 9 | ROLE = """You are the Programming Copilot called Octogen, a world-class programmer to complete any goal by executing code""" 10 | 11 | RULES = [ 12 | "To complete the goal, write a plan and execute it step-by-step, limiting the number of steps to five", 13 | "Every step must include the explanation and the code block. if the code block has any display data, save it as a file and add it to saved_filenames field", 14 | "You have a fully controlled programming environment to execute code with internet connection but sudo is not allowed", 15 | "You must try to correct your code when you get errors from the output", 16 | "You can install new package with pip", 17 | "Use `execute` action to execute any code and `direct_message` action to send message to user", 18 | ] 19 | 20 | FUNCTION_EXECUTE = ActionDesc( 21 | name="execute", 22 | desc="This action executes code in your programming environment and returns the output", 23 | parameters=json.dumps({ 24 | "type": "object", 25 | "properties": { 26 | "explanation": { 27 | "type": "string", 28 | "description": "the explanation about the code parameters", 29 | }, 30 | "code": { 31 | "type": "string", 32 | "description": "the bash code to be executed", 33 | }, 34 | "language": { 35 | "type": "string", 36 | "description": "the language of the code, only python and bash are supported", 37 | }, 38 | "saved_filenames": { 39 | "type": "array", 40 | "items": {"type": "string"}, 41 | "description": "A list of filenames that were created by the code", 42 | }, 43 | }, 44 | "required": ["explanation", "code", "language"], 45 | }), 46 | ) 47 | 48 | FUNCTION_DIRECT_MESSAGE = ActionDesc( 49 | name="direct_message", 50 | desc="This action sends a direct message to user.", 51 | parameters=json.dumps({ 52 | "type": "object", 53 | "properties": { 54 | "message": { 55 | "type": "string", 56 | "description": "the message will be sent to user", 57 | }, 58 | }, 59 | "required": ["message"], 60 | }), 61 | ) 62 | 63 | ACTIONS = [FUNCTION_EXECUTE] 64 | 65 | OUTPUT_FORMAT = """The output format must be a JSON format with the following fields: 66 | * function_call: The name of the action 67 | * arguments: The arguments of the action 68 | """ 69 | 70 | OCTOGEN_CODELLAMA_MID_INS = """The above output of the %s determines whether the execution is successful. 71 | If successful, go to the next step. If the current step is the final step, summarize the entire plan. If not, adjust the input and try again""" 72 | 73 | OCTOGEN_CODELLAMA_MID_ERROR_INS = """Adjust the action input and try again for the above output of %s showing the error message""" 74 | -------------------------------------------------------------------------------- /README_zh_cn.md: -------------------------------------------------------------------------------- 1 |

2 | 3 | 4 | ![GitHub Workflow Status (with event)](https://img.shields.io/github/actions/workflow/status/dbpunk-labs/octogen/ci.yaml) 5 | [![PyPI - Version](https://img.shields.io/pypi/v/og_chat)](https://pypi.org/project/og-chat/) 6 | ![PyPI - Downloads](https://img.shields.io/pypi/dm/og_chat?logo=pypi) 7 | [![Gitter](https://img.shields.io/gitter/room/octogen/%20)](https://app.gitter.im/#/room/#octogen:gitter.im) 8 | 9 | [English](./README.md) 10 | > ## Octopus 11 | > 一款为开发者打造的开源的代码解释器 12 | 13 |

14 | 15 | 17 | 18 | |Supported OSs|Supported Interpreters|Supported Dev Enviroment| 19 | |----|-----|-----| 20 | | | | | 21 | 22 | 23 | 24 | ## 快速上手 25 | 26 | 在本地电脑安装octopus, 你可以选择使用openai 或者codellama-7B 27 | 28 | 本地环境要求 29 | * python 3.10 and above 30 | * pip 31 | * [docker](https://www.docker.com/products/docker-desktop/) 24.0.0 and above or [Podman](https://podman.io/) 32 | 33 | 34 | 安装octogen启动器 35 | 36 | ```bash 37 | pip install og_up 38 | ``` 39 | 40 | 使用og_up启动器初始化本地环境 41 | ``` 42 | og_up 43 | ``` 44 | 45 | 开始体验octogen, 在命令行执行`og` 46 | 47 | ``` 48 | Welcome to use octogen❤️ . To ask a programming question, simply type your question and press esc + enter 49 | You can use /help to look for help 50 | 51 | [1]🎧> 52 | ``` 53 | 54 | ## Octopus内部实现 55 | 56 | ![octogen-internal drawio](https://github.com/dbpunk-labs/octogen/assets/8623385/95dd6f84-6de8-476a-9c66-9ab591ed9b0e) 57 | 58 | * Octopus 内核: 当前基于notebook实现的代码执行引擎 59 | * Octopus Agent: 处理用户请求,将请求发给大模型服务API和将大模型生成的代码发给Octopus 内核执行代码 60 | * Octopus 命令行工具: 将用户请求发给Agent和渲染Agent返回的代码,文本和图片 61 | 62 | 每个组件之间都是采用流式方式进行数据交换,大模型每写一个字都会在命令行上面实时展示. 63 | 64 | ## 功能列表 65 | 66 | * 在docker环境自动执行代码 67 | * 实验功能,在iterm2 和kitty终端进行图片显示 68 | * 支持通过`/up`命令将文件上传到Octopus内核,你可以在写问题描述的过程中使用上传文件命令 69 | * 实验功能, 支持将大模型生成的代码片段打包在一起生成一个应用,然后通过`/run` 命令直接执行 70 | * 支持将输出内容文本和代码通过 `/cc`命令复制到粘贴板上面 71 | * 支持问题历史功能,提问历史将会被保存在本地 72 | 73 | 如果你有功能需求建议,可以创建一个讨论帖子和大家一起讨论 74 | 75 | ## 计划 76 | 77 | * [roadmap for v0.5.0](https://github.com/dbpunk-labs/octogen/issues/64) 78 | 79 | -------------------------------------------------------------------------------- /roles/src/og_roles/code_interpreter.py: -------------------------------------------------------------------------------- 1 | # vim:fenc=utf-8 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ 9 | 10 | """ 11 | import json 12 | from og_proto.prompt_pb2 import ActionDesc 13 | 14 | 15 | ROLE = f"""You are the Programming Copilot, a world-class programmer to complete any goal by executing code""" 16 | RULES = [ 17 | "To complete the goal, write a plan and execute it step-by-step, limiting the number of steps to five", 18 | "Every step must include the explanation and the code block. if the code block has any display data, save it as a file and add it to saved_filenames field", 19 | "You have a fully controlled programming environment to execute code with internet connection but sudo is not allowed", 20 | "You must try to correct your code when you get errors from the output", 21 | "You can install new package with pip", 22 | "Use `execute` action to execute any code and `direct_message` action to send message to user", 23 | ] 24 | FUNCTION_EXECUTE= ActionDesc( 25 | name="execute", 26 | desc="This action executes code in your programming environment and returns the output", 27 | parameters=json.dumps({ 28 | "type": "object", 29 | "properties": { 30 | "explanation": { 31 | "type": "string", 32 | "description": "the explanation about the code parameters", 33 | }, 34 | "code": { 35 | "type": "string", 36 | "description": "the bash code to be executed", 37 | }, 38 | "language": { 39 | "type": "string", 40 | "description": "the language of the code, only python and bash are supported", 41 | }, 42 | "saved_filenames": { 43 | "type": "array", 44 | "items": {"type": "string"}, 45 | "description": "A list of filenames that were created by the code", 46 | }, 47 | }, 48 | "required": ["explanation", "code", "language"], 49 | }), 50 | ) 51 | 52 | FUNCTION_DIRECT_MESSAGE= ActionDesc( 53 | name="direct_message", 54 | desc="This action sends a direct message to user.", 55 | parameters=json.dumps({ 56 | "type": "object", 57 | "properties": { 58 | "message": { 59 | "type": "string", 60 | "description": "the message will be sent to user", 61 | }, 62 | }, 63 | "required": ["message"], 64 | }), 65 | ) 66 | ACTIONS = [ 67 | FUNCTION_EXECUTE 68 | ] 69 | OUTPUT_FORMAT = """The output format must be a JSON format with the following fields: 70 | * function_call: The name of the action 71 | * arguments: The arguments of the action 72 | """ 73 | 74 | OCTOGEN_CODELLAMA_MID_INS = """The above output of the %s determines whether the execution is successful. 75 | If successful, go to the next step. If the current step is the final step, summarize the entire plan. If not, adjust the input and try again""" 76 | 77 | OCTOGEN_CODELLAMA_MID_ERROR_INS = """Adjust the action input and try again for the above output of %s showing the error message""" 78 | -------------------------------------------------------------------------------- /docs/source/getstarted.rst: -------------------------------------------------------------------------------- 1 | Getting Started 2 | =============== 3 | 4 | Octogen is an Open-Source Code Interpreter Powered by GPT 3.5/4 and Codellama. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Octopus works with macOs, Linux and Windows. 10 | Octopus requires the following enviroment 11 | 12 | - Python 3.10.0 and above. 13 | - `Pip `_ 14 | - `Docker Desktop 24.0.0 and above `_ or `Podman `_ 15 | 16 | To use codellama, your host must have at least 8 CPUs and 16 GB of RAM 17 | 18 | Install 19 | ------- 20 | 21 | the first step, install ``og_up`` tool:: 22 | 23 | $ pip install og_up 24 | 25 | the second step, use ``og_up`` to setup the octopus service and cli:: 26 | 27 | $ og_up 28 | 29 | You have the option to select from 30 | 31 | - OpenAI, `apply `_ the openai api key 32 | - Azure OpenAI, `apply `_ the azure api key 33 | - CodeLlama 34 | - Octogen(beta) agent services, `apply `_ the octogen agent service key 35 | 36 | the default is using docker as container engine, use podman with flag ``--use_podman`` 37 | 38 | If you opt for CodeLlama, Octogen will automatically download it from huggingface.co. 39 | In case the installation of the Octogen Terminal CLI is taking longer than expected, 40 | you might want to consider switching to a different pip mirror. 41 | 42 | the third step, open your terminal and execute the command ``og``, you will see the following output:: 43 | 44 | Welcome to use octogen❤️ . To ask a programming question, simply type your question and press esc + enter 45 | You can use /help to look for help 46 | 47 | [1]🎧> 48 | 49 | 50 | How to use 51 | ---------- 52 | 53 | just type the question and press ``esc + enter`` to get the answer, if you want to know more function just type ``/help`` + ``esc+enter``:: 54 | 55 | [1]🎧>/help 56 | Keyboard Shortcut: 57 | 58 | • ESC + ENTER: Submit your question to Octogen or execute your command. 59 | 60 | Commands: 61 | 62 | • /clear: Clears the screen. 63 | • /cc{number}: Copies the output of Octogen to your clipboard. 64 | • /exit: Exits the Octogen CLI. 65 | • /up: Uploads files from your local machine; useful for including in your questions. 66 | • /assemble {name} {number1} {number2}: Assembles the specified code segments into an application. 67 | • /run {name}: Executes an application with the specified name. 68 | • /apps: Displays a list of all your apps. 69 | 70 | Need Help? 71 | 72 | 1 Create an issue on our GitHub page: Octogen GitHub Issues 73 | 2 Alternatively, you can email us at codego.me@gmail.com. 74 | 75 | 76 | How it works 77 | ------------ 78 | 79 | .. image:: _static/octogen-internal.drawio.png 80 | 81 | - Octogen Kernel: The code execution engine, based on notebook kernels. 82 | - Octogen Agent: Manages client requests, uses ReAct to process complex tasks, and stores user-assembled applications. 83 | - Octogen Terminal Cli: Accepts user requests, sends them to the Agent, and renders rich results. Currently supports Discord, iTerm2, and Kitty terminals. 84 | 85 | -------------------------------------------------------------------------------- /kernel/src/og_kernel/kernel/kernel_app.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | """An application to launch a kernel by name in a local subprocess.""" 8 | import os 9 | import signal 10 | import uuid 11 | 12 | from jupyter_core.application import JupyterApp, base_flags 13 | from tornado.ioloop import IOLoop 14 | from traitlets import Unicode 15 | from jupyter_client.kernelspec import NATIVE_KERNEL_NAME, KernelSpecManager 16 | from jupyter_client.manager import KernelManager 17 | 18 | 19 | class KernelApp(JupyterApp): 20 | """Launch a kernel by name in a local subprocess.""" 21 | 22 | description = "Run a kernel locally in a subprocess" 23 | classes = [KernelManager, KernelSpecManager] 24 | aliases = { 25 | "kernel": "KernelApp.kernel_name", 26 | "ip": "KernelManager.ip", 27 | "connection_file": "KernelApp.connection_file", 28 | } 29 | flags = {"debug": base_flags["debug"]} 30 | kernel_name = Unicode( 31 | NATIVE_KERNEL_NAME, help="The name of a kernel type to start" 32 | ).tag(config=True) 33 | connection_file = Unicode("", help="The connection file path of the kernel").tag( 34 | config=True 35 | ) 36 | 37 | def initialize(self, argv=None): 38 | """Initialize the application.""" 39 | super().initialize(argv) 40 | cf_basename = ( 41 | self.connection_file 42 | if self.connection_file 43 | else "kernel-%s.json" % uuid.uuid4() 44 | ) 45 | self.config.setdefault("KernelManager", {}).setdefault( 46 | "connection_file", os.path.join(self.runtime_dir, cf_basename) 47 | ) 48 | self.km = KernelManager(kernel_name=self.kernel_name, config=self.config) 49 | 50 | self.loop = IOLoop.current() 51 | self.loop.add_callback(self._record_started) 52 | 53 | def setup_signals(self) -> None: 54 | """Shutdown on SIGTERM or SIGINT (Ctrl-C)""" 55 | if os.name == "nt": 56 | return 57 | 58 | def shutdown_handler(signo, frame): 59 | self.loop.add_callback_from_signal(self.shutdown, signo) 60 | 61 | for sig in [signal.SIGTERM, signal.SIGINT]: 62 | signal.signal(sig, shutdown_handler) 63 | 64 | def shutdown(self, signo: int) -> None: 65 | """Shut down the application.""" 66 | self.log.info("Shutting down on signal %d", signo) 67 | self.km.shutdown_kernel() 68 | self.loop.stop() 69 | 70 | def log_connection_info(self) -> None: 71 | """Log the connection info for the kernel.""" 72 | cf = self.km.connection_file 73 | self.log.info("Connection file: %s", cf) 74 | self.log.info("To connect a client: --existing %s", os.path.basename(cf)) 75 | 76 | def _record_started(self) -> None: 77 | """For tests, create a file to indicate that we've started 78 | 79 | Do not rely on this except in our own tests! 80 | """ 81 | fn = os.environ.get("JUPYTER_CLIENT_TEST_RECORD_STARTUP_PRIVATE") 82 | if fn is not None: 83 | with open(fn, "wb"): 84 | pass 85 | 86 | def start(self) -> None: 87 | """Start the application.""" 88 | self.log.info("Starting kernel %r", self.kernel_name) 89 | try: 90 | self.km.start_kernel() 91 | self.log_connection_info() 92 | self.setup_signals() 93 | self.loop.start() 94 | finally: 95 | self.km.cleanup_resources() 96 | 97 | 98 | def run_app(): 99 | KernelApp.launch_instance() 100 | -------------------------------------------------------------------------------- /sdk/tests/kernel_sdk_tests.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | import os 8 | import asyncio 9 | import pytest 10 | import logging 11 | import json 12 | from og_sdk.kernel_sdk import KernelSDK 13 | from og_sdk.utils import generate_async_chunk 14 | from og_proto.kernel_server_pb2 import ExecuteResponse 15 | import aiofiles 16 | from typing import AsyncIterable 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | 21 | @pytest.fixture 22 | def kernel_sdk(): 23 | endpoint = ( 24 | "localhost:9527" # Replace with the actual endpoint of your test gRPC server 25 | ) 26 | return KernelSDK(endpoint, "ZCeI9cYtOCyLISoi488BgZHeBkHWuFUH") 27 | 28 | 29 | @pytest.fixture 30 | def bad_kernel_sdk(): 31 | endpoint = ( 32 | "localhost:9527" # Replace with the actual endpoint of your test gRPC server 33 | ) 34 | return KernelSDK(endpoint, "ZCeI9cYtOCyLISoi488BgZHeBkHWuFU") 35 | 36 | 37 | @pytest.mark.asyncio 38 | async def test_bad_sdk(bad_kernel_sdk): 39 | try: 40 | kernel_sdk.connect() 41 | assert kernel_sdk.stub is not None # Check that stub is initialized 42 | await kernel_sdk.start() 43 | assert False 44 | except Exception as e: 45 | assert True 46 | 47 | 48 | @pytest.mark.asyncio 49 | async def test_upload_and_download_smoke_test(kernel_sdk): 50 | kernel_sdk.connect() 51 | path = os.path.abspath(__file__) 52 | response = await kernel_sdk.upload_binary( 53 | generate_async_chunk(path, "kernel_sdk_tests.py") 54 | ) 55 | assert response 56 | file_stats = os.stat(path) 57 | assert response.length == file_stats.st_size, "bad upload file size" 58 | length = 0 59 | async for chunk in kernel_sdk.download_file("kernel_sdk_tests.py"): 60 | length += len(chunk.buffer) 61 | assert length == file_stats.st_size, "bad upload file size" 62 | 63 | 64 | @pytest.mark.asyncio 65 | async def test_stop_kernel(kernel_sdk): 66 | kernel_sdk.connect() 67 | assert kernel_sdk.stub is not None # Check that stub is initialized 68 | if not await kernel_sdk.is_alive(): 69 | await kernel_sdk.start() 70 | assert await kernel_sdk.is_alive() 71 | response = await kernel_sdk.stop() 72 | assert response.code == 0 73 | assert not await kernel_sdk.is_alive() 74 | 75 | 76 | @pytest.mark.asyncio 77 | async def test_sdk_smoke_test(kernel_sdk): 78 | kernel_sdk.connect() 79 | assert kernel_sdk.stub is not None # Check that stub is initialized 80 | if not await kernel_sdk.is_alive(): 81 | await kernel_sdk.start() 82 | code = """print('hello world!')""" 83 | responds = [] 84 | async for respond in kernel_sdk.execute(code): 85 | responds.append(respond) 86 | await kernel_sdk.stop() 87 | assert len(responds) == 1 88 | assert responds[0].output_type == ExecuteResponse.StdoutType 89 | assert json.loads(responds[0].output)["text"] == "hello world!\n" 90 | 91 | 92 | @pytest.mark.asyncio 93 | async def test_sdk_result_test(kernel_sdk): 94 | kernel_sdk.connect() 95 | assert kernel_sdk.stub is not None # Check that stub is initialized 96 | if not await kernel_sdk.is_alive(): 97 | await kernel_sdk.start() 98 | code = """print('hello world!') 99 | 5""" 100 | responds = [] 101 | async for respond in kernel_sdk.execute(code): 102 | responds.append(respond) 103 | await kernel_sdk.stop() 104 | assert len(responds) == 2 105 | assert responds[0].output_type == ExecuteResponse.StdoutType 106 | assert responds[1].output_type == ExecuteResponse.ResultType 107 | assert json.loads(responds[0].output)["text"] == "hello world!\n" 108 | assert json.loads(responds[1].output)["text/plain"] == "5" 109 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | sandbox/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | cover/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | db.sqlite3-journal 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | .pybuilder/ 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | # For a library or package, you might want to ignore these files since the code is 88 | # intended to run in multiple environments; otherwise, check them in: 89 | # .python-version 90 | 91 | # pipenv 92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 95 | # install all needed dependencies. 96 | #Pipfile.lock 97 | 98 | # poetry 99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 100 | # This is especially recommended for binary packages to ensure reproducibility, and is more 101 | # commonly ignored for libraries. 102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 103 | #poetry.lock 104 | 105 | # pdm 106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 107 | #pdm.lock 108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 109 | # in version control. 110 | # https://pdm.fming.dev/#use-with-ide 111 | .pdm.toml 112 | 113 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 114 | __pypackages__/ 115 | 116 | # Celery stuff 117 | celerybeat-schedule 118 | celerybeat.pid 119 | 120 | # SageMath parsed files 121 | *.sage.py 122 | 123 | # Environments 124 | .env 125 | .venv 126 | env/ 127 | venv/ 128 | ENV/ 129 | env.bak/ 130 | venv.bak/ 131 | 132 | # Spyder project settings 133 | .spyderproject 134 | .spyproject 135 | 136 | # Rope project settings 137 | .ropeproject 138 | 139 | # mkdocs documentation 140 | /site 141 | 142 | # mypy 143 | .mypy_cache/ 144 | .dmypy.json 145 | dmypy.json 146 | 147 | # Pyre type checker 148 | .pyre/ 149 | 150 | # pytype static type analyzer 151 | .pytype/ 152 | 153 | # Cython debug symbols 154 | cython_debug/ 155 | 156 | # PyCharm 157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 158 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 159 | # and can be added to the global gitignore or merged into this file. For a more nuclear 160 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 161 | #.idea/ 162 | 163 | .cosine -------------------------------------------------------------------------------- /sdk/src/og_sdk/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 dbpunk.com Author imotai 2 | # SPDX-FileCopyrightText: 2023 imotai 3 | # SPDX-FileContributor: imotai 4 | # 5 | # SPDX-License-Identifier: Elastic-2.0 6 | 7 | import re 8 | import string 9 | import random 10 | import aiofiles 11 | import logging 12 | from og_proto import agent_server_pb2, common_pb2 13 | from typing import AsyncIterable 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | 18 | def generate_chunk(filepath, filename) -> common_pb2.FileChunk: 19 | try: 20 | with open(filepath, "rb") as fp: 21 | while True: 22 | chunk = fp.read(1024 * 128) 23 | if not chunk: 24 | break 25 | yield common_pb2.FileChunk(buffer=chunk, filename=filename) 26 | except Exception as ex: 27 | logger.error("fail to read file %s" % ex) 28 | 29 | 30 | async def generate_async_chunk( 31 | filepath, filename 32 | ) -> AsyncIterable[common_pb2.FileChunk]: 33 | try: 34 | async with aiofiles.open(filepath, "rb") as afp: 35 | while True: 36 | chunk = await afp.read(1024 * 128) 37 | if not chunk: 38 | break 39 | yield common_pb2.FileChunk(buffer=chunk, filename=filename) 40 | except Exception as ex: 41 | logger.error("fail to read file %s", ex) 42 | 43 | 44 | def process_char_stream(stream): 45 | buffer = [] 46 | i = 0 47 | 48 | def carriage_return(buf): 49 | pop_buf = [] 50 | if "\n" in buf: 51 | for _ in range(buf[::-1].index("\n")): 52 | pop_buf.append(buf.pop()) 53 | return pop_buf[::-1] 54 | else: 55 | pop_buf.extend(buf) 56 | buf.clear() 57 | return pop_buf 58 | 59 | last_pop_buf = [] 60 | while i < len(stream): 61 | c = stream[i] 62 | if c == "\b": 63 | if buffer: 64 | buffer.pop() 65 | last_pop_buf = [] 66 | elif c == "\r": 67 | last_pop_buf = carriage_return(buffer) 68 | elif c == "\n": 69 | if last_pop_buf: 70 | buffer.extend(last_pop_buf) 71 | last_pop_buf = [] 72 | buffer.append(c) 73 | else: 74 | last_pop_buf = [] 75 | buffer.append(c) 76 | i += 1 77 | if last_pop_buf: 78 | buffer.extend(last_pop_buf) 79 | return "".join(buffer) 80 | 81 | 82 | def clean_code(code: str): 83 | start_tag = "```" 84 | end_tag = "```" 85 | index = code.find(start_tag) 86 | if index >= 0: 87 | last = code.rfind(end_tag) 88 | return code[index + len(start_tag) : last] 89 | return code 90 | 91 | 92 | def parse_link(text): 93 | """Parses a link from markdown text. 94 | 95 | Args: 96 | text: The markdown text. 97 | 98 | Returns: 99 | The link text and href, or None if no link is found. 100 | """ 101 | link_regex = r"\[(.+?)\]\((.+?)\)" 102 | match = re.search(link_regex, text) 103 | if match: 104 | return match.groups() 105 | else: 106 | return None, None 107 | 108 | 109 | def parse_image_filename(string): 110 | """Parses the image filename from a string. 111 | 112 | Args: 113 | string: A string containing the image filename. 114 | 115 | Returns: 116 | The image filename, or None if the filename is not valid. 117 | """ 118 | 119 | pattern = r"octopus_\w+\.(jpg|png|gif)" 120 | match = re.search(pattern, string) 121 | if match: 122 | return match.group() 123 | else: 124 | return None 125 | 126 | 127 | def random_str(n): 128 | # using random.choices() 129 | # generating random strings 130 | res = "".join(random.choices(string.ascii_uppercase + string.digits, k=n)) 131 | return str(res) 132 | -------------------------------------------------------------------------------- /proto/.gitignore: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | src/og_proto/*_pb2.py 18 | src/og_proto/*_pb2.pyi 19 | src/og_proto/*_pb2_grpc.py 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | wheels/ 31 | share/python-wheels/ 32 | *.egg-info/ 33 | .installed.cfg 34 | *.egg 35 | MANIFEST 36 | 37 | # PyInstaller 38 | # Usually these files are written by a python script from a template 39 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 40 | *.manifest 41 | *.spec 42 | 43 | # Installer logs 44 | pip-log.txt 45 | pip-delete-this-directory.txt 46 | 47 | # Unit test / coverage reports 48 | htmlcov/ 49 | .tox/ 50 | .nox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *.cover 57 | *.py,cover 58 | .hypothesis/ 59 | .pytest_cache/ 60 | cover/ 61 | 62 | # Translations 63 | *.mo 64 | *.pot 65 | 66 | # Django stuff: 67 | *.log 68 | local_settings.py 69 | db.sqlite3 70 | db.sqlite3-journal 71 | 72 | # Flask stuff: 73 | instance/ 74 | .webassets-cache 75 | 76 | # Scrapy stuff: 77 | .scrapy 78 | 79 | # Sphinx documentation 80 | docs/_build/ 81 | 82 | # PyBuilder 83 | .pybuilder/ 84 | target/ 85 | 86 | # Jupyter Notebook 87 | .ipynb_checkpoints 88 | 89 | # IPython 90 | profile_default/ 91 | ipython_config.py 92 | 93 | # pyenv 94 | # For a library or package, you might want to ignore these files since the code is 95 | # intended to run in multiple environments; otherwise, check them in: 96 | # .python-version 97 | 98 | # pipenv 99 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 100 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 101 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 102 | # install all needed dependencies. 103 | #Pipfile.lock 104 | 105 | # poetry 106 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 107 | # This is especially recommended for binary packages to ensure reproducibility, and is more 108 | # commonly ignored for libraries. 109 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 110 | #poetry.lock 111 | 112 | # pdm 113 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 114 | #pdm.lock 115 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 116 | # in version control. 117 | # https://pdm.fming.dev/#use-with-ide 118 | .pdm.toml 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | -------------------------------------------------------------------------------- /proto/src/og_proto/agent_server.proto: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2023 imotai 2 | // SPDX-FileContributor: imotai 3 | // 4 | // SPDX-License-Identifier: Elastic-2.0 5 | 6 | syntax = "proto3"; 7 | package octogen_agent_proto; 8 | 9 | import "common.proto"; 10 | 11 | message ProcessOptions { 12 | // respond all output in stream 13 | bool streaming = 1; 14 | // the llm name eg gpt3.5, gpt4 or codellama 15 | string llm_name = 2; 16 | // the token limit for input sent to model 17 | int32 input_token_limit = 3; 18 | // the token limit for output generated by model 19 | int32 output_token_limit = 4; 20 | // the max time(s) for processing task 21 | int32 timeout = 5; 22 | } 23 | 24 | message ProcessTaskRequest { 25 | // the input_files will be uploaded to the server 26 | repeated string input_files = 1; 27 | // the task description 28 | string task = 2; 29 | // the context id for task 30 | string context_id = 3; 31 | ProcessOptions options = 4; 32 | } 33 | 34 | message OnStepActionStart { 35 | // the input for the tool 36 | string input = 1; 37 | // the tool name 38 | string tool = 2; 39 | } 40 | 41 | message OnStepActionEnd { 42 | string output = 1; 43 | repeated string output_files = 2; 44 | bool has_error = 3; 45 | } 46 | 47 | message FinalAnswer { 48 | string answer = 1; 49 | } 50 | 51 | message ContextState { 52 | int32 output_token_count = 1; 53 | int32 input_token_count = 2; 54 | string llm_name = 3; 55 | // the time(ms) of code execution and text generation 56 | int32 total_duration = 4; 57 | // the prompt token count 58 | // the time(ms) that the model has been used 59 | int32 llm_response_duration = 5; 60 | } 61 | 62 | message TypingContent { 63 | string content = 1; 64 | string language = 2; 65 | } 66 | 67 | message TaskResponse { 68 | ContextState state = 1; 69 | enum ResponseType { 70 | OnStepActionStart = 0; 71 | OnStepActionStreamStdout = 1; 72 | OnStepActionStreamStderr = 2; 73 | OnStepActionEnd = 3; 74 | OnFinalAnswer = 4; 75 | OnModelTypeText = 5; 76 | OnModelTypeCode = 6; 77 | OnModelOutputError = 7; 78 | OnInputTokenLimitExceed = 8; 79 | OnOutputTokenLimitExceed = 9; 80 | OnSystemError = 10; 81 | } 82 | ResponseType response_type = 4; 83 | oneof body { 84 | OnStepActionStart on_step_action_start = 5; 85 | OnStepActionEnd on_step_action_end = 6; 86 | FinalAnswer final_answer = 7; 87 | string console_stdout = 8; 88 | string console_stderr = 9; 89 | string error_msg = 10; 90 | TypingContent typing_content = 11; 91 | } 92 | string context_id = 20; 93 | } 94 | 95 | message AddKernelRequest { 96 | string endpoint = 1; 97 | string key = 3; 98 | } 99 | 100 | message AddKernelResponse { 101 | int32 code = 1; 102 | string msg = 2; 103 | } 104 | 105 | message AssembleAppRequest { 106 | string name = 1; 107 | string language = 2; 108 | string code = 3; 109 | repeated string saved_filenames = 4; 110 | string desc = 5; 111 | } 112 | 113 | message AssembleAppResponse { 114 | int32 code = 1; 115 | string msg = 2; 116 | } 117 | 118 | message RunAppRequest { 119 | string name = 1; 120 | } 121 | 122 | message AppInfo { 123 | string name = 1; 124 | string language = 2; 125 | // second 126 | int32 ctime = 3; 127 | string desc = 4; 128 | } 129 | 130 | message QueryAppsRequest {} 131 | 132 | message QueryAppsResponse { 133 | repeated AppInfo apps = 1; 134 | } 135 | 136 | message PingRequest {} 137 | 138 | message PongResponse { 139 | int32 code = 1; 140 | string msg = 2; 141 | } 142 | 143 | service AgentServer { 144 | // check the connection is ok 145 | rpc ping(PingRequest) returns (PongResponse) {} 146 | // upload the file 147 | rpc upload(stream octogen_common_proto.FileChunk) returns (octogen_common_proto.FileUploaded) {} 148 | rpc download(octogen_common_proto.DownloadRequest) returns (stream octogen_common_proto.FileChunk) {} 149 | rpc process_task(ProcessTaskRequest) returns (stream TaskResponse) {} 150 | rpc add_kernel(AddKernelRequest) returns (AddKernelResponse) {} 151 | } 152 | -------------------------------------------------------------------------------- /agent/.gitignore: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | src/octopus_agent/proto/*_pb2.py 18 | src/octopus_agent/proto/*_pb2.pyi 19 | src/octopus_agent/proto/*_pb2_grpc.py 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | wheels/ 31 | share/python-wheels/ 32 | *.egg-info/ 33 | .installed.cfg 34 | *.egg 35 | MANIFEST 36 | 37 | # PyInstaller 38 | # Usually these files are written by a python script from a template 39 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 40 | *.manifest 41 | *.spec 42 | 43 | # Installer logs 44 | pip-log.txt 45 | pip-delete-this-directory.txt 46 | 47 | # Unit test / coverage reports 48 | htmlcov/ 49 | .tox/ 50 | .nox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *.cover 57 | *.py,cover 58 | .hypothesis/ 59 | .pytest_cache/ 60 | cover/ 61 | 62 | # Translations 63 | *.mo 64 | *.pot 65 | 66 | # Django stuff: 67 | *.log 68 | local_settings.py 69 | db.sqlite3 70 | db.sqlite3-journal 71 | 72 | # Flask stuff: 73 | instance/ 74 | .webassets-cache 75 | 76 | # Scrapy stuff: 77 | .scrapy 78 | 79 | # Sphinx documentation 80 | docs/_build/ 81 | 82 | # PyBuilder 83 | .pybuilder/ 84 | target/ 85 | 86 | # Jupyter Notebook 87 | .ipynb_checkpoints 88 | 89 | # IPython 90 | profile_default/ 91 | ipython_config.py 92 | 93 | # pyenv 94 | # For a library or package, you might want to ignore these files since the code is 95 | # intended to run in multiple environments; otherwise, check them in: 96 | # .python-version 97 | 98 | # pipenv 99 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 100 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 101 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 102 | # install all needed dependencies. 103 | #Pipfile.lock 104 | 105 | # poetry 106 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 107 | # This is especially recommended for binary packages to ensure reproducibility, and is more 108 | # commonly ignored for libraries. 109 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 110 | #poetry.lock 111 | 112 | # pdm 113 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 114 | #pdm.lock 115 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 116 | # in version control. 117 | # https://pdm.fming.dev/#use-with-ide 118 | .pdm.toml 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | -------------------------------------------------------------------------------- /LICENSES/Elastic-2.0.txt: -------------------------------------------------------------------------------- 1 | Elastic License 2.0 2 | 3 | URL: https://www.elastic.co/licensing/elastic-license 4 | 5 | ## Acceptance 6 | 7 | By using the software, you agree to all of the terms and conditions below. 8 | 9 | ## Copyright License 10 | 11 | The licensor grants you a non-exclusive, royalty-free, worldwide, 12 | non-sublicensable, non-transferable license to use, copy, distribute, make 13 | available, and prepare derivative works of the software, in each case subject to 14 | the limitations and conditions below. 15 | 16 | ## Limitations 17 | 18 | You may not provide the software to third parties as a hosted or managed 19 | service, where the service provides users with access to any substantial set of 20 | the features or functionality of the software. 21 | 22 | You may not move, change, disable, or circumvent the license key functionality 23 | in the software, and you may not remove or obscure any functionality in the 24 | software that is protected by the license key. 25 | 26 | You may not alter, remove, or obscure any licensing, copyright, or other notices 27 | of the licensor in the software. Any use of the licensor’s trademarks is subject 28 | to applicable law. 29 | 30 | ## Patents 31 | 32 | The licensor grants you a license, under any patent claims the licensor can 33 | license, or becomes able to license, to make, have made, use, sell, offer for 34 | sale, import and have imported the software, in each case subject to the 35 | limitations and conditions in this license. This license does not cover any 36 | patent claims that you cause to be infringed by modifications or additions to 37 | the software. If you or your company make any written claim that the software 38 | infringes or contributes to infringement of any patent, your patent license for 39 | the software granted under these terms ends immediately. If your company makes 40 | such a claim, your patent license ends immediately for work on behalf of your 41 | company. 42 | 43 | ## Notices 44 | 45 | You must ensure that anyone who gets a copy of any part of the software from you 46 | also gets a copy of these terms. 47 | 48 | If you modify the software, you must include in any modified copies of the 49 | software prominent notices stating that you have modified the software. 50 | 51 | ## No Other Rights 52 | 53 | These terms do not imply any licenses other than those expressly granted in 54 | these terms. 55 | 56 | ## Termination 57 | 58 | If you use the software in violation of these terms, such use is not licensed, 59 | and your licenses will automatically terminate. If the licensor provides you 60 | with a notice of your violation, and you cease all violation of this license no 61 | later than 30 days after you receive that notice, your licenses will be 62 | reinstated retroactively. However, if you violate these terms after such 63 | reinstatement, any additional violation of these terms will cause your licenses 64 | to terminate automatically and permanently. 65 | 66 | ## No Liability 67 | 68 | *As far as the law allows, the software comes as is, without any warranty or 69 | condition, and the licensor will not be liable to you for any damages arising 70 | out of these terms or the use or nature of the software, under any kind of 71 | legal claim.* 72 | 73 | ## Definitions 74 | 75 | The **licensor** is the entity offering these terms, and the **software** is the 76 | software the licensor makes available under these terms, including any portion 77 | of it. 78 | 79 | **you** refers to the individual or entity agreeing to these terms. 80 | 81 | **your company** is any legal entity, sole proprietorship, or other kind of 82 | organization that you work for, plus all organizations that have control over, 83 | are under the control of, or are under common control with that 84 | organization. **control** means ownership of substantially all the assets of an 85 | entity, or the power to direct its management and policies by vote, contract, or 86 | otherwise. Control can be direct or indirect. 87 | 88 | **your licenses** are all the licenses granted to you for the software under 89 | these terms. 90 | 91 | **use** means anything you do with the software requiring one of your licenses. 92 | 93 | **trademark** means trademarks, service marks, and similar rights. 94 | -------------------------------------------------------------------------------- /kernel/.gitignore: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | src/octopus_kernel/proto/*_pb2.py 18 | src/octopus_kernel/proto/*_pb2.pyi 19 | src/octopus_kernel/proto/*_pb2_grpc.py 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | wheels/ 31 | share/python-wheels/ 32 | *.egg-info/ 33 | .installed.cfg 34 | *.egg 35 | MANIFEST 36 | 37 | # PyInstaller 38 | # Usually these files are written by a python script from a template 39 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 40 | *.manifest 41 | *.spec 42 | 43 | # Installer logs 44 | pip-log.txt 45 | pip-delete-this-directory.txt 46 | 47 | # Unit test / coverage reports 48 | htmlcov/ 49 | .tox/ 50 | .nox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *.cover 57 | *.py,cover 58 | .hypothesis/ 59 | .pytest_cache/ 60 | cover/ 61 | 62 | # Translations 63 | *.mo 64 | *.pot 65 | 66 | # Django stuff: 67 | *.log 68 | local_settings.py 69 | db.sqlite3 70 | db.sqlite3-journal 71 | 72 | # Flask stuff: 73 | instance/ 74 | .webassets-cache 75 | 76 | # Scrapy stuff: 77 | .scrapy 78 | 79 | # Sphinx documentation 80 | docs/_build/ 81 | 82 | # PyBuilder 83 | .pybuilder/ 84 | target/ 85 | 86 | # Jupyter Notebook 87 | .ipynb_checkpoints 88 | 89 | # IPython 90 | profile_default/ 91 | ipython_config.py 92 | 93 | # pyenv 94 | # For a library or package, you might want to ignore these files since the code is 95 | # intended to run in multiple environments; otherwise, check them in: 96 | # .python-version 97 | 98 | # pipenv 99 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 100 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 101 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 102 | # install all needed dependencies. 103 | #Pipfile.lock 104 | 105 | # poetry 106 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 107 | # This is especially recommended for binary packages to ensure reproducibility, and is more 108 | # commonly ignored for libraries. 109 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 110 | #poetry.lock 111 | 112 | # pdm 113 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 114 | #pdm.lock 115 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 116 | # in version control. 117 | # https://pdm.fming.dev/#use-with-ide 118 | .pdm.toml 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | -------------------------------------------------------------------------------- /memory/src/og_memory/memory.py: -------------------------------------------------------------------------------- 1 | # vim:fenc=utf-8 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ 9 | 10 | """ 11 | import json 12 | import logging 13 | from abc import ABC, abstractmethod 14 | from pydantic import BaseModel, Field 15 | from og_proto.memory_pb2 import AgentMemory as AgentMemoryProto 16 | from jinja2 import Environment 17 | from jinja2.loaders import PackageLoader 18 | import tiktoken 19 | logger = logging.getLogger(__name__) 20 | 21 | env = Environment(loader=PackageLoader("og_memory", "template")) 22 | env.filters['from_json'] = lambda s: json.loads(s) 23 | context_tpl = env.get_template("agent.jinja") 24 | encoding = tiktoken.encoding_for_model("gpt-3.5-turbo") 25 | 26 | 27 | def agent_memory_to_context(instruction, guide_memory, options): 28 | """ 29 | Convert the agent memory to context 30 | :param instruction: the instruction 31 | :param guide_memory: the guide memory 32 | :return: string context for llm 33 | """ 34 | return context_tpl.render(prompt=instruction, guides=guide_memory, options=options) 35 | 36 | class BaseAgentMemory(ABC): 37 | """ 38 | Base class for agent memory 39 | """ 40 | @abstractmethod 41 | def append_chat_message(self, message): 42 | """ 43 | Append chat message to the memory 44 | """ 45 | pass 46 | 47 | @abstractmethod 48 | def append_guide(self, guide): 49 | """ 50 | Append guide to the memory 51 | """ 52 | pass 53 | 54 | @abstractmethod 55 | def update_options(self, options): 56 | """ 57 | Update the options 58 | """ 59 | pass 60 | 61 | @abstractmethod 62 | def swap_instruction(self, instruction): 63 | """ 64 | Swap the instruction 65 | """ 66 | pass 67 | 68 | @abstractmethod 69 | def to_messages(self): 70 | """ 71 | Convert the memory to messages 72 | """ 73 | pass 74 | 75 | @abstractmethod 76 | def reset_memory(self): 77 | """ 78 | Reset the memory 79 | """ 80 | pass 81 | 82 | @abstractmethod 83 | def get_functions(self): 84 | """ 85 | return the function definitions for model that supports the function_call 86 | """ 87 | pass 88 | 89 | 90 | class AgentMemoryOption(BaseModel): 91 | """ 92 | The agent memory option 93 | """ 94 | show_function_instruction: bool = Field(False, description="Show the function instruction") 95 | disable_output_format: bool = Field(False, description="Disable the output format") 96 | 97 | class MemoryAgentMemory(BaseAgentMemory): 98 | """ 99 | The agent memory based on memory 100 | """ 101 | def __init__(self, memory_id, user_name, user_id): 102 | self.memory_id = memory_id 103 | self.user_name = user_name 104 | self.user_id = user_id 105 | self.guide_memory = [] 106 | self.chat_memory = [] 107 | self.instruction = None 108 | self.options = AgentMemoryOption(show_function_instruction=True) 109 | 110 | def update_options(self, options): 111 | self.options = options 112 | 113 | def reset_memory(self): 114 | self.guide_memory = [] 115 | self.chat_memory = [] 116 | 117 | def append_guide(self, guide): 118 | self.guide_memory.append(guide) 119 | 120 | def append_chat_message(self, message): 121 | self.chat_memory.append(message) 122 | 123 | def swap_instruction(self, instruction): 124 | self.instruction = instruction 125 | 126 | def get_functions(self): 127 | return [{"name": action.name, "description": action.desc, "parameters": 128 | json.loads(action.parameters)} for action in self.instruction.actions] 129 | 130 | def to_messages(self): 131 | system_message = { 132 | "role":"system", 133 | "content":agent_memory_to_context(self.instruction, self.guide_memory, options = self.options) 134 | } 135 | logging.debug(f"system message: {system_message}") 136 | return [system_message] + self.chat_memory 137 | 138 | 139 | -------------------------------------------------------------------------------- /agent/src/og_agent/mock_agent.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | import json 8 | import time 9 | import logging 10 | from .base_agent import BaseAgent, TypingState, TaskContext 11 | from og_proto.agent_server_pb2 import OnStepActionStart, TaskResponse, OnStepActionEnd, FinalAnswer, TypingContent 12 | from .tokenizer import tokenize 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | class MockAgent(BaseAgent): 18 | """ 19 | a test agent for octogen 20 | """ 21 | 22 | def __init__(self, messages, sdk): 23 | """ 24 | the messages are the cases 25 | """ 26 | super().__init__(sdk) 27 | self.messages = messages 28 | 29 | async def call_ai(self, prompt, queue, iteration, task_context): 30 | message = self.messages.get(prompt)[iteration] 31 | if message.get("explanation", None): 32 | await queue.put( 33 | TaskResponse( 34 | state=task_context.to_context_state_proto(), 35 | response_type=TaskResponse.OnModelTypeText, 36 | typing_content=TypingContent( 37 | content=message["explanation"], language="text" 38 | ), 39 | ) 40 | ) 41 | if message.get("code", None): 42 | await queue.put( 43 | TaskResponse( 44 | state=task_context.to_context_state_proto(), 45 | response_type=TaskResponse.OnModelTypeCode, 46 | typing_content=TypingContent( 47 | content=message["code"], language="python" 48 | ), 49 | ) 50 | ) 51 | return message 52 | 53 | async def handle_call_function( 54 | self, code, queue, explanation, context, task_context, saved_filenames=[] 55 | ): 56 | tool_input = json.dumps({ 57 | "code": code, 58 | "explanation": explanation, 59 | "saved_filenames": saved_filenames, 60 | "language": "python", 61 | }) 62 | await queue.put( 63 | TaskResponse( 64 | state=task_context.to_context_state_proto(), 65 | response_type=TaskResponse.OnStepActionStart, 66 | on_step_action_start=OnStepActionStart( 67 | input=tool_input, tool="execute" 68 | ), 69 | ) 70 | ) 71 | function_result = None 72 | async for (result, respond) in self.call_function(code, context, task_context): 73 | function_result = result 74 | if respond: 75 | await queue.put(respond) 76 | return function_result 77 | 78 | async def arun(self, request, queue, context, task_opt): 79 | """ 80 | run the agent 81 | 82 | """ 83 | task = request.task 84 | task_context = TaskContext( 85 | start_time=time.time(), 86 | output_token_count=10, 87 | input_token_count=10, 88 | llm_name="mock", 89 | llm_respond_duration=1000, 90 | ) 91 | iteration = 0 92 | try: 93 | while iteration <= 10: 94 | message = await self.call_ai(task, queue, iteration, task_context) 95 | iteration = iteration + 1 96 | if message.get("code", None): 97 | function_result = await self.handle_call_function( 98 | message["code"], 99 | queue, 100 | message["explanation"], 101 | context, 102 | task_context, 103 | message.get("saved_filenames", []), 104 | ) 105 | await queue.put( 106 | TaskResponse( 107 | state=task_context.to_context_state_proto(), 108 | response_type=TaskResponse.OnStepActionEnd, 109 | on_step_action_end=OnStepActionEnd( 110 | output="", 111 | output_files=function_result.saved_filenames, 112 | has_error=function_result.has_error, 113 | ), 114 | ) 115 | ) 116 | else: 117 | await queue.put( 118 | TaskResponse( 119 | state=task_context.to_context_state_proto(), 120 | response_type=TaskResponse.OnFinalAnswer, 121 | final_answer=FinalAnswer(answer=message["explanation"]), 122 | ) 123 | ) 124 | break 125 | finally: 126 | await queue.put(None) 127 | -------------------------------------------------------------------------------- /kernel/src/og_kernel/kernel/kernel_client.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | import os 7 | import logging 8 | import inspect 9 | import asyncio 10 | import queue 11 | import json 12 | from jupyter_client import AsyncKernelClient 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | """ 17 | the kernel client for watching the output of kernel 18 | """ 19 | 20 | 21 | class KernelClient: 22 | 23 | def __init__(self, connection_file): 24 | if not connection_file: 25 | raise ValueError(f"connection_file={connection_file} is empty") 26 | if not os.path.exists(connection_file): 27 | raise ValueError(f"connection_file={connection_file} do not exist") 28 | logger.info( 29 | "create a new kernel client with connection_file %s", connection_file 30 | ) 31 | self.client = None 32 | self.is_running = False 33 | self.task = None 34 | self.connection_file = connection_file 35 | 36 | async def is_alive(self): 37 | return await self.client.is_alive() 38 | 39 | async def start_client(self): 40 | self.client = AsyncKernelClient(connection_file=self.connection_file) 41 | self.client.load_connection_file() 42 | self.client.start_channels() 43 | await self.client.wait_for_ready() 44 | 45 | async def _loop(self, on_message_fn): 46 | logger.debug("start loop the kernel message") 47 | try: 48 | while self.is_running and self.client: 49 | try: 50 | logger.debug("start wait message") 51 | msg = await self.client.get_iopub_msg(timeout=1) 52 | logger.debug("msg %s", msg) 53 | try: 54 | await on_message_fn(msg) 55 | except Exception as e: 56 | logger.error("fail to call on message function for error %s", e) 57 | continue 58 | except queue.Empty: 59 | logger.debug("empty message") 60 | continue 61 | except (ValueError, IndexError): 62 | # get_iopub_msg suffers from message fetch errors 63 | logger.error("fail to get message") 64 | break 65 | except Exception as e: 66 | logger.error("fail to wait for message %s", e) 67 | break 68 | except Exception as e: 69 | logger.error("loop exception", e) 70 | 71 | async def watching(self, on_message_fn): 72 | """ 73 | Watch the message from kernel, when a new message arrived , the `on_message_fn` will be 74 | called 75 | 76 | Arguments 77 | on_message_fn - when a new message arrived the function will be called 78 | """ 79 | if self.is_running: 80 | raise ValueError(f"the watch is running, do not watch it again") 81 | if not on_message_fn or not self.client: 82 | raise ValueError(f"on_message_fn or clent is None") 83 | if not inspect.iscoroutinefunction(on_message_fn): 84 | raise ValueError(f"on_message_fn must be async function") 85 | self.is_running = True 86 | self.task = asyncio.create_task(self._loop(on_message_fn)) 87 | 88 | async def read_response(self, context, tries=1): 89 | try: 90 | hit_empty = 0 91 | while self.client: 92 | try: 93 | msg = await self.client.get_iopub_msg(timeout=1) 94 | if context.done(): 95 | logger.debug("the client has cancelled the request") 96 | break 97 | logger.debug(f"{msg}") 98 | yield msg 99 | except queue.Empty: 100 | hit_empty += 1 101 | if hit_empty >= tries: 102 | break 103 | except (ValueError, IndexError): 104 | # get_iopub_msg suffers from message fetch errors 105 | logger.error("fail to get message") 106 | break 107 | except Exception as e: 108 | logger.error("fail to wait for message %s", e) 109 | break 110 | yield None 111 | except Exception as e: 112 | logger.error("loop exception", e) 113 | yield None 114 | 115 | def execute(self, code): 116 | """ 117 | Execute the python code 118 | """ 119 | if not self.client: 120 | raise ValueError(f"no client is avaliable") 121 | msg_id = self.client.execute(code) 122 | logger.debug("the execute msg id %s", msg_id) 123 | return msg_id 124 | 125 | async def stop_watch(self): 126 | if self.task and self.is_running: 127 | self.is_running = False 128 | logger.info( 129 | "stop the kernel client for connection_file %s", self.connection_file 130 | ) 131 | try: 132 | self.task.cancel() 133 | await self.task 134 | except: 135 | pass 136 | 137 | def stop_client(self): 138 | if self.client: 139 | self.client.stop_channels() 140 | self.client = None 141 | -------------------------------------------------------------------------------- /up/src/og_up/kernel_up.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ """ 9 | import click 10 | import os 11 | import time 12 | from .utils import run_with_realtime_print 13 | from .up import check_the_env 14 | from .up import load_docker_image 15 | from .up import get_latest_release_version 16 | from .up import ping_agent_service 17 | from .up import stop_service 18 | from .up import generate_kernel_env 19 | from .up import random_str 20 | from .up import refresh 21 | from .up import add_kernel_endpoint 22 | from rich.live import Live 23 | from rich.spinner import Spinner 24 | from rich.console import Group 25 | from rich.console import Console 26 | from rich.markdown import Markdown 27 | from rich.prompt import Prompt 28 | from dotenv import dotenv_values 29 | from og_sdk.agent_sdk import AgentSyncSDK 30 | 31 | Welcome = "welcome to use og_kernel_up" 32 | 33 | 34 | def get_config(console): 35 | key = Prompt.ask("Agent Key", password=True) 36 | endpoint = Prompt.ask("Agent Endpoint") 37 | name = Prompt.ask("Kernel Name") 38 | port = Prompt.ask("Kernel Port") 39 | return key, endpoint, name, port 40 | 41 | 42 | def start_kernel_service( 43 | live, segments, install_dir, image_name, version, kernel_name, kernel_port 44 | ): 45 | spinner = Spinner("dots", style="status.spinner", speed=1.0, text="") 46 | step = "Start kernel service" 47 | output = "" 48 | vender = "docker" 49 | segments.append((spinner, step, "")) 50 | refresh(live, segments) 51 | stop_service(kernel_name) 52 | full_name = f"{image_name}:{version}" 53 | command = [ 54 | vender, 55 | "run", 56 | "--name", 57 | kernel_name, 58 | "-p", 59 | f"127.0.0.1:{kernel_port}:{kernel_port}", 60 | "-v", 61 | f"{install_dir}:/app", 62 | "-dt", 63 | f"{full_name}", 64 | "bash", 65 | "/bin/start_kernel.sh", 66 | "/app", 67 | ] 68 | result_code = 0 69 | output = "" 70 | for code, chunk in run_with_realtime_print(command=command): 71 | result_code = code 72 | output += chunk 73 | pass 74 | time.sleep(6) 75 | segments.pop() 76 | if result_code == 0: 77 | segments.append(("✅", "Start kernel service", "")) 78 | else: 79 | segments.append(("❌", "Start kernel service", output)) 80 | refresh(live, segments) 81 | return result_code 82 | 83 | 84 | @click.command("init") 85 | @click.option("--image_name", default="dbpunk/octogen", help="the octogen image name") 86 | @click.option( 87 | "--install_dir", default="~/kernel/apps", help="the install dir of kernel" 88 | ) 89 | @click.option("--octogen_version", default="", help="the version of octogen") 90 | def init_kernel( 91 | image_name, 92 | install_dir, 93 | octogen_version, 94 | ): 95 | if install_dir.find("~") == 0: 96 | real_install_dir = install_dir.replace("~", os.path.expanduser("~")) 97 | else: 98 | real_install_dir = install_dir 99 | os.makedirs(real_install_dir, exist_ok=True) 100 | console = Console() 101 | console.print(Welcome) 102 | key, agent_endpoint, kernel_name, kernel_port = get_config(console) 103 | kernel_dir = "/".join([real_install_dir, kernel_name]) 104 | os.makedirs(kernel_dir, exist_ok=True) 105 | segments = [] 106 | with Live(Group(*segments), console=console) as live: 107 | if octogen_version: 108 | version = octogen_version 109 | else: 110 | version = get_latest_release_version(repo_name, live, segments) 111 | check_result, _ = check_the_env(live, segments) 112 | if not check_result: 113 | segments.append(("❌", "Setup kernel service failed", "")) 114 | refresh(live, segments) 115 | return 116 | code = load_docker_image(version, image_name, live, segments) 117 | if code != 0: 118 | return 119 | kernel_key = random_str(32) 120 | env_path = kernel_dir + "/kernel/" + ".env" 121 | if not os.path.exists(env_path): 122 | generate_kernel_env( 123 | live, 124 | segments, 125 | kernel_dir, 126 | kernel_key, 127 | rpc_port=kernel_port, 128 | rpc_host="0.0.0.0", 129 | ) 130 | else: 131 | config = dotenv_values(env_path) 132 | kernel_key = config.get("rpc_key", "") 133 | if kernel_key: 134 | segments.append(("✅", "Use the exist kernel config", "")) 135 | refresh(live, segments) 136 | kernel_port = config.get("rpc_port") 137 | else: 138 | segments.append(("❌", "Bad kernel config", "")) 139 | refresh(live, segments) 140 | return 141 | code = start_kernel_service( 142 | live, segments, kernel_dir, image_name, version, kernel_name, kernel_port 143 | ) 144 | if code != 0: 145 | return 146 | if not add_kernel_endpoint( 147 | live, segments, key, f"127.0.0.1:{kernel_port}", kernel_key, agent_endpoint 148 | ): 149 | segments.append(("❌", "Setup kernel service failed", "")) 150 | refresh(live, segments) 151 | return 152 | 153 | if ping_agent_service(live, segments, kernel_key, api_base=agent_endpoint): 154 | segments.append(("👍", "Setup kernel service done", "")) 155 | refresh(live, segments) 156 | else: 157 | segments.append(("❌", "Setup kernel service failed", "")) 158 | refresh(live, segments) 159 | -------------------------------------------------------------------------------- /sdk/tests/agent_sdk_tests.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | 8 | import os 9 | import pytest 10 | import asyncio 11 | import logging 12 | import json 13 | import random 14 | import logging 15 | from tempfile import gettempdir 16 | from pathlib import Path 17 | from og_sdk.agent_sdk import AgentSDK, AgentSyncSDK 18 | from og_sdk.utils import random_str 19 | from og_proto.agent_server_pb2 import TaskResponse 20 | import pytest_asyncio 21 | 22 | logger = logging.getLogger(__name__) 23 | api_base = "127.0.0.1:9528" 24 | api_key = "ZCeI9cYtOCyLISoi488BgZHeBkHWuFUH" 25 | 26 | 27 | @pytest.fixture 28 | def agent_sync_sdk(): 29 | sdk = AgentSyncSDK(api_base, api_key) 30 | sdk.connect() 31 | yield sdk 32 | sdk.close() 33 | 34 | 35 | @pytest_asyncio.fixture 36 | async def agent_sdk(): 37 | sdk = AgentSDK(api_base, api_key) 38 | sdk.connect() 39 | yield sdk 40 | await sdk.close() 41 | 42 | 43 | def test_connect_bad_endpoint(): 44 | try: 45 | sdk = AgentSDK("xxx", api_key) 46 | sdk.connect() 47 | assert 0, "should not go here" 48 | except Exception as ex: 49 | assert 1 50 | 51 | 52 | def test_connect_bad_endpoint_for_sync_sdk(): 53 | try: 54 | sdk = AgentSyncSDK("xxx", api_key) 55 | sdk.connect() 56 | assert 0, "should not go here" 57 | except Exception as ex: 58 | assert 1 59 | 60 | 61 | def test_connect_bad_kernel_api_key_for_sync_sdk(agent_sync_sdk): 62 | try: 63 | agent_sync_sdk.add_kernel("bad_kernel_api_key", "127.0.0.1:9527") 64 | agent_sdk.ping() 65 | assert 0, "should not go here" 66 | except Exception as ex: 67 | assert 1 68 | 69 | 70 | def test_ping_test_for_sync_sdk(agent_sync_sdk): 71 | try: 72 | agent_sync_sdk.add_kernel(api_key, "127.0.0.1:9527") 73 | response = agent_sync_sdk.ping() 74 | assert response.code == 0 75 | except Exception as ex: 76 | assert 0, str(ex) 77 | 78 | 79 | @pytest.mark.asyncio 80 | async def test_ping_test_with_bad_kernel_api_key(agent_sdk): 81 | """ 82 | the ping method will throw an exception if the kernel api key is not valid 83 | """ 84 | try: 85 | await agent_sdk.add_kernel("bad_kernel_api_key", "127.0.0.1:9527") 86 | response = await agent_sdk.ping() 87 | assert 0, "should not go here" 88 | except Exception as ex: 89 | assert 1 90 | 91 | 92 | @pytest.mark.asyncio 93 | async def test_ping_test(agent_sdk): 94 | try: 95 | await agent_sdk.add_kernel(api_key, "127.0.0.1:9527") 96 | response = await agent_sdk.ping() 97 | assert response.code == 0 98 | except Exception as ex: 99 | assert 0, str(ex) 100 | 101 | 102 | @pytest.mark.asyncio 103 | async def test_upload_and_download_test(agent_sdk): 104 | sdk = agent_sdk 105 | await sdk.add_kernel(api_key, "127.0.0.1:9527") 106 | path = os.path.abspath(__file__) 107 | # upload file 108 | uploaded = await sdk.upload_file(path, "agent_sdk_tests.py") 109 | assert uploaded 110 | file_stats = os.stat(path) 111 | assert file_stats.st_size == uploaded.length, "bad upload_file size" 112 | # download file 113 | tmp_dir = gettempdir() 114 | fullpath = "%s%s%s" % (tmp_dir, os.sep, "agent_sdk_tests.py") 115 | await sdk.download_file("agent_sdk_tests.py", tmp_dir) 116 | file_stats2 = os.stat(fullpath) 117 | assert file_stats.st_size == file_stats2.st_size, "bad download_file size" 118 | 119 | 120 | @pytest.mark.asyncio 121 | async def test_prompt_smoke_test(agent_sdk): 122 | sdk = agent_sdk 123 | await sdk.add_kernel(api_key, "127.0.0.1:9527") 124 | try: 125 | responds = [] 126 | async for respond in sdk.prompt("hello"): 127 | responds.append(respond) 128 | logger.debug(f"{responds}") 129 | assert len(responds) > 0, "no responds for the prompt" 130 | assert responds[len(responds) - 1].response_type == TaskResponse.OnFinalAnswer 131 | assert ( 132 | responds[len(responds) - 1].final_answer.answer 133 | == "how can I help you today?" 134 | ) 135 | except Exception as ex: 136 | assert 0, str(ex) 137 | 138 | 139 | @pytest.mark.asyncio 140 | async def test_run_code_test(agent_sdk): 141 | sdk = agent_sdk 142 | await sdk.add_kernel(api_key, "127.0.0.1:9527") 143 | try: 144 | responds = [] 145 | async for respond in sdk.prompt("write a hello world in python"): 146 | responds.append(respond) 147 | logger.debug(f"{responds}") 148 | assert len(responds) > 0, "no responds for the prompt" 149 | assert responds[len(responds) - 1].response_type == TaskResponse.OnFinalAnswer 150 | assert ( 151 | responds[len(responds) - 1].final_answer.answer 152 | == "this code prints 'hello world'" 153 | ) 154 | except Exception as ex: 155 | assert 0, str(ex) 156 | 157 | 158 | @pytest.mark.asyncio 159 | async def test_run_code_with_error(agent_sdk): 160 | sdk = agent_sdk 161 | await sdk.add_kernel(api_key, "127.0.0.1:9527") 162 | try: 163 | responds = [] 164 | async for respond in sdk.prompt("error function"): 165 | responds.append(respond) 166 | logger.debug(f"{responds}") 167 | assert len(responds) > 0, "no responds for the prompt" 168 | assert responds[len(responds) - 3].response_type == TaskResponse.OnStepActionEnd 169 | assert responds[ 170 | len(responds) - 3 171 | ].on_step_action_end.has_error, "bad has error result" 172 | assert responds[len(responds) - 1].response_type == TaskResponse.OnFinalAnswer 173 | assert ( 174 | responds[len(responds) - 1].final_answer.answer 175 | == "this code prints 'hello world'" 176 | ) 177 | except Exception as ex: 178 | assert 0, str(ex) 179 | -------------------------------------------------------------------------------- /examples/chainlit/chainlit_ui.py: -------------------------------------------------------------------------------- 1 | # vim:fenc=utf-8 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ """ 9 | import json 10 | import chainlit as cl 11 | import aiohttp 12 | from chainlit.input_widget import TextInput 13 | from og_sdk.utils import process_char_stream 14 | 15 | 16 | @cl.on_chat_start 17 | async def start(): 18 | settings = await cl.ChatSettings([ 19 | TextInput( 20 | id="Endpoint", label="Octogen Endpoint", initial="http://127.0.0.1:9529" 21 | ), 22 | TextInput(id="API_KEY", label="Octogen KEY", initial=""), 23 | ]).send() 24 | await setup_agent(settings) 25 | 26 | 27 | @cl.on_settings_update 28 | async def setup_agent(settings): 29 | cl.user_session.set("Endpoint", settings["Endpoint"]) 30 | cl.user_session.set("Key", settings["API_KEY"]) 31 | 32 | 33 | @cl.on_message 34 | async def main(message): 35 | last_msg = cl.Message( 36 | content="", 37 | author="Octogen", 38 | ) 39 | request = { 40 | "prompt": message, 41 | "token_limit": 0, 42 | "llm_model_name": "string", 43 | "input_files": [], 44 | "context_id": "string", 45 | } 46 | headers = {"api-token": cl.user_session.get("Key")} 47 | last_type = None 48 | async with aiohttp.ClientSession(headers=headers, raise_for_status=True) as session: 49 | async with session.post( 50 | cl.user_session.get("Endpoint") + "/process", json=request 51 | ) as r: 52 | async for line in r.content: 53 | if line: 54 | text = str(line, encoding="utf-8") 55 | response = json.loads(text) 56 | if response["step_type"] == "OnStepTextTyping": 57 | if last_type and last_type != "OnStepTextTyping": 58 | if last_msg: 59 | await last_msg.send() 60 | new_msg = cl.Message(author="Octogen", content="") 61 | last_msg = new_msg 62 | await last_msg.stream_token(response["typing_content"]) 63 | last_type = "OnStepTextTyping" 64 | elif response["step_type"] == "OnStepCodeTyping": 65 | if last_type != "OnStepCodeTypeing": 66 | await last_msg.send() 67 | new_msg = cl.Message( 68 | author="Octogen", language="text", content="" 69 | ) 70 | last_msg = new_msg 71 | await last_msg.stream_token(response["typing_content"]) 72 | last_type = "OnStepCodeTypeing" 73 | elif response["step_type"] == "OnStepActionStart": 74 | parent_id = last_msg.parent_id 75 | await last_msg.remove() 76 | tool = response["step_action_start"]["tool"] 77 | if tool in ["execute_python_code", "show_sample_code"]: 78 | tool_input = json.loads( 79 | response["step_action_start"]["input"] 80 | ) 81 | new_msg = cl.Message( 82 | author="Octogen", 83 | language=tool_input.get("language", "text"), 84 | content="", 85 | ) 86 | last_msg = new_msg 87 | await last_msg.stream_token(tool_input["code"]) 88 | last_type = "OnStepActionStart" 89 | elif response["step_type"] == "OnStepActionStdout": 90 | if last_type not in [ 91 | "OnStepActionStdout", 92 | "OnStepActionStderr", 93 | ]: 94 | await last_msg.send() 95 | new_msg = cl.Message( 96 | author="Octogen", 97 | language="text", 98 | content="", 99 | ) 100 | last_msg = new_msg 101 | last_type = "OnStepActionStdout" 102 | temp_content = last_msg.content + response["step_action_stdout"] 103 | new_content = process_char_stream(temp_content) 104 | last_msg.content = new_content 105 | await last_msg.update() 106 | elif response["step_type"] == "OnStepActionStderr": 107 | if last_type not in [ 108 | "OnStepActionStdout", 109 | "OnStepActionStderr", 110 | ]: 111 | await last_msg.send() 112 | new_msg = cl.Message( 113 | author="Octogen", language="text", content="" 114 | ) 115 | last_msg = new_msg 116 | last_type = "OnStepActionStderr" 117 | temp_content = last_msg.content + response["step_action_stderr"] 118 | new_content = process_char_stream(temp_content) 119 | last_msg.content = new_content 120 | await last_msg.update() 121 | elif response["step_type"] == "OnStepActionEnd": 122 | await last_msg.send() 123 | last_msg = None 124 | last_type = "OnStepActionEnd" 125 | elif response["step_type"] == "OnFinalAnswer": 126 | await last_msg.send() 127 | last_msg = None 128 | last_type = "OnFinalAnswer" 129 | -------------------------------------------------------------------------------- /kernel/tests/kernel_client_tests.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | import json 8 | import os 9 | import asyncio 10 | import random 11 | import pytest 12 | import logging 13 | from og_kernel.kernel.kernel_mgr import KernelManager 14 | from og_kernel.kernel.kernel_client import KernelClient 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | class MockContext: 20 | """ 21 | Mock the grpc request context 22 | """ 23 | 24 | def done(self): 25 | return False 26 | 27 | 28 | @pytest.fixture 29 | def kernel_manager(): 30 | config_path = os.path.join("/tmp", str(random.randint(1, 100000))) 31 | workspace = os.path.join("/tmp", str(random.randint(1, 100000))) 32 | kernel_manager = KernelManager(config_path, workspace) 33 | kernel_manager.start() 34 | yield kernel_manager 35 | kernel_manager.stop() 36 | 37 | 38 | @pytest.fixture 39 | def ts_kernel_manager(): 40 | config_path = os.path.join("/tmp", str(random.randint(1, 100000))) 41 | workspace = os.path.join("/tmp", str(random.randint(1, 100000))) 42 | kernel_manager = KernelManager(config_path, workspace, "tslab") 43 | kernel_manager.start() 44 | yield kernel_manager 45 | kernel_manager.stop() 46 | 47 | 48 | @pytest.mark.asyncio 49 | async def test_watching(kernel_manager): 50 | kernel_client = KernelClient(kernel_manager.config_path) 51 | await kernel_client.start_client() 52 | logger.info("is alive %s", await kernel_client.is_alive()) 53 | 54 | async def on_message_fn(msg): 55 | if "text" in msg: 56 | assert msg["text"] == "Hello, world!" 57 | 58 | await kernel_client.watching(on_message_fn) 59 | for i in range(1): 60 | kernel_client.execute("print('Hello, world!')") 61 | await asyncio.sleep(1) 62 | await asyncio.sleep(10) 63 | await kernel_client.stop_watch() 64 | kernel_client.stop_client() 65 | 66 | 67 | @pytest.mark.asyncio 68 | async def test_result_occurs(kernel_manager): 69 | """Test stdout occurs""" 70 | kernel_client = KernelClient(kernel_manager.config_path) 71 | await kernel_client.start_client() 72 | logger.info("is alive %s", await kernel_client.is_alive()) 73 | code = """ 74 | 5 75 | """ 76 | kernel_client.execute(code) 77 | messages = [] 78 | context = MockContext() 79 | async for msg in kernel_client.read_response(context): 80 | if not msg: 81 | break 82 | messages.append(msg) 83 | logger.info(f"{messages}") 84 | filtered = list(filter(lambda x: x["msg_type"] == "execute_result", messages)) 85 | assert len(filtered) > 0 86 | await asyncio.sleep(2) 87 | await kernel_client.stop_watch() 88 | kernel_client.stop_client() 89 | 90 | 91 | @pytest.mark.asyncio 92 | async def test_stderr_occurs(kernel_manager): 93 | """Test stderr occurs""" 94 | kernel_client = KernelClient(kernel_manager.config_path) 95 | await kernel_client.start_client() 96 | logger.info("is alive %s", await kernel_client.is_alive()) 97 | code = """ 98 | import sys 99 | print('Hello world', file=sys.stderr) 100 | """ 101 | kernel_client.execute(code) 102 | messages = [] 103 | 104 | context = MockContext() 105 | async for msg in kernel_client.read_response(context): 106 | if not msg: 107 | break 108 | messages.append(msg) 109 | filtered = list(filter(lambda x: x["msg_type"] == "stream", messages)) 110 | assert len(filtered) > 0 111 | assert filtered[0]["content"]["name"] == "stderr" 112 | await asyncio.sleep(2) 113 | await kernel_client.stop_watch() 114 | kernel_client.stop_client() 115 | 116 | 117 | @pytest.mark.asyncio 118 | async def test_stdout_occurs(kernel_manager): 119 | """Test stdout occurs""" 120 | kernel_client = KernelClient(kernel_manager.config_path) 121 | await kernel_client.start_client() 122 | logger.info("is alive %s", await kernel_client.is_alive()) 123 | code = """ 124 | print("hello world!") 125 | """ 126 | kernel_client.execute(code) 127 | messages = [] 128 | 129 | context = MockContext() 130 | async for msg in kernel_client.read_response(context): 131 | if not msg: 132 | break 133 | messages.append(msg) 134 | filtered = list(filter(lambda x: x["msg_type"] == "stream", messages)) 135 | assert len(filtered) > 0 136 | assert filtered[0]["content"]["name"] == "stdout" 137 | await asyncio.sleep(2) 138 | await kernel_client.stop_watch() 139 | kernel_client.stop_client() 140 | 141 | 142 | @pytest.mark.asyncio 143 | async def test_syntax_exception_occurs(kernel_manager): 144 | """Test exception occurs""" 145 | kernel_client = KernelClient(kernel_manager.config_path) 146 | await kernel_client.start_client() 147 | logger.info("is alive %s", await kernel_client.is_alive()) 148 | code = """ 149 | a = 10 150 | b = 20 151 | if (a < b) 152 | print('a is less than b') 153 | """ 154 | kernel_client.execute(code) 155 | messages = [] 156 | 157 | context = MockContext() 158 | async for msg in kernel_client.read_response(context): 159 | if not msg: 160 | break 161 | messages.append(msg) 162 | assert len(list(filter(lambda x: x["msg_type"] == "error", messages))) > 0 163 | await asyncio.sleep(2) 164 | await kernel_client.stop_watch() 165 | kernel_client.stop_client() 166 | 167 | 168 | @pytest.mark.asyncio 169 | async def test_generate_pie_chart(kernel_manager): 170 | """Test generate pie output""" 171 | kernel_client = KernelClient(kernel_manager.config_path) 172 | await kernel_client.start_client() 173 | logger.info("is alive %s", await kernel_client.is_alive()) 174 | 175 | code = """ 176 | import matplotlib.pyplot as plt 177 | import numpy as np 178 | 179 | # Create a pie chart 180 | data = np.array([10, 20, 30, 40]) 181 | labels = ['Category 1', 'Category 2', 'Category 3', 'Category 4'] 182 | 183 | plt.pie(data, labels=labels, autopct='%1.1f%%') 184 | plt.title('Pie Chart') 185 | plt.show() 186 | """ 187 | kernel_client.execute(code) 188 | messages = [] 189 | 190 | context = MockContext() 191 | async for msg in kernel_client.read_response(context): 192 | if msg: 193 | logger.debug(f"{msg}") 194 | messages.append(msg) 195 | 196 | logger.info(f"{messages}") 197 | assert len(list(filter(lambda x: x["msg_type"] == "display_data", messages))) > 0 198 | await asyncio.sleep(2) 199 | await kernel_client.stop_watch() 200 | kernel_client.stop_client() 201 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 | 4 | 5 | ![GitHub Workflow Status (with event)](https://img.shields.io/github/actions/workflow/status/dbpunk-labs/octogen/ci.yaml) 6 | [![PyPI - Version](https://img.shields.io/pypi/v/og_chat)](https://pypi.org/project/og-chat/) 7 | ![PyPI - Downloads](https://img.shields.io/pypi/dm/og_chat?logo=pypi) 8 | [![Gitter](https://img.shields.io/gitter/room/octogen/%20)](https://app.gitter.im/#/room/#octogen:gitter.im) 9 | 10 | [中文](./README_zh_cn.md) 11 | 12 | > ## Octogen 13 | > an open-source code interpreter 14 | > 一款开源可本地部署的代码解释器 15 | 16 | ## News 17 | 18 | * 2023-10-24 🎉 Octogen [v0.5.0](https://github.com/dbpunk-labs/octogen/discussions) has been released 🎉 19 | 20 | https://github.com/dbpunk-labs/octogen/assets/8623385/7445cc4d-567e-4d1a-bedc-b5b566329c41 21 | 22 | 23 | |Supported OSs|Supported Interpreters|Supported Dev Enviroment| 24 | |----|-----|-----| 25 | | | | | 26 | 27 | 28 | ## Getting Started 29 | 30 | Requirement 31 | * python 3.10 and above 32 | * pip 33 | * [docker](https://www.docker.com/products/docker-desktop/) 24.0.0 and above, or [podman](https://podman.io/) 34 | 35 | > To deploy Octogen, the user needs permission to run Docker commands. 36 | > To use codellama, your host must have at least 8 CPUs and 16 GB of RAM. 37 | 38 | Install the octogen on your local computer 39 | 40 | 1. Install og_up 41 | 42 | ```bash 43 | pip install og_up 44 | ``` 45 | 46 | 2. Set up the Octogen service 47 | 48 | ``` 49 | og_up 50 | ``` 51 | You have the following options to select 52 | * OpenAI , recommanded for daily use 53 | * Azure OpenAI 54 | * CodeLlama, 55 | * Octogen agent services powered by GPT4 and Codellama 34B 56 | 57 | The default is using docker as container engine. use podman with flag `--use_podman` 58 | 59 | 3. Execute the command `og`, you will see the following output 60 | 61 | ``` 62 | Welcome to use octogen❤️ . To ask a programming question, simply type your question and press esc + enter 63 | You can use /help to look for help 64 | 65 | [1]🎧> 66 | ``` 67 | ## Development 68 | 69 | 70 | Prepare the environment 71 | 72 | ``` 73 | git clone https://github.com/dbpunk-labs/octogen.git 74 | cd octogen 75 | python3 -m venv octogen_venv 76 | source octogen_venv/bin/activate 77 | pip install -r requirements.txt 78 | ``` 79 | 80 | Run the sandbox including Agent with mock model and Kernel 81 | 82 | ``` 83 | $ bash start_sandbox.sh 84 | $ og 85 | 86 | Welcome to use octogen❤️ . To ask a programming question, simply type your question and press esc + 87 | enter 88 | Use /help for help 89 | 90 | [1]🎧>hello 91 | ╭─ 🐙Octogen ─────────────────────────────────────────────────────────────────────────────────────────╮ 92 | │ │ 93 | │ 0 🧠 how can I help you today? │ 94 | │ │ 95 | ╰─────────────────────────────────────────────────────────────────────────────────────────────────────╯ 96 | [2]🎧> 97 | 98 | ``` 99 | 100 | * To use openai for development, just update the config in the `start_sandbox.sh` with the example of [openai-env.example](./env_sample/openai_env.sample) 101 | * To use azure openai for development, just update the config in the `start_sandbox.sh` with the example of [azure-env.example](./env_sample/azure_env.sample) 102 | * To use codellama for development, just update the config in the `start_sandbox.sh` with the example of [codellama-env.example](./env_sample/codellama_env.sample) 103 | 104 | ## Supported API Service 105 | 106 | |name|type|status| installation| 107 | |----|-----|----------------|---| 108 | |[Openai GPT 3.5/4](https://openai.com/product#made-for-developers) |LLM| ✅ fully supported|use `og_up` then choose the `OpenAI`| 109 | |[Azure Openai GPT 3.5/4](https://azure.microsoft.com/en-us/products/ai-services/openai-service) |LLM| ✅ fully supported|use `og_up` then choose the `Azure OpenAI`| 110 | |[LLama.cpp Server](https://github.com/ggerganov/llama.cpp/tree/master/examples/server) |LLM| ✔️ supported | use `og_up` then choose the `CodeLlama` | 111 | |[Octopus Agent Service](https://octogen.dev) |Code Interpreter| ✅ supported | apply api key from [octogen.dev](https://www.octogen.dev/) then use `og_up` then choose the `Octogen` | 112 | 113 | 114 | ## The internal of local deployment 115 | 116 | 117 | ![octogen-internal](https://github.com/dbpunk-labs/octogen/assets/8623385/986f6805-44cf-4bc7-868f-1f6a987ca254) 118 | 119 | * Octogen Kernel: The code execution engine, based on notebook kernels. 120 | * Octogen Agent: Manages client requests, uses ReAct to process complex tasks, and stores user-assembled applications. 121 | * Octogen Terminal Cli: Accepts user requests, sends them to the Agent, and renders rich results. Currently supports Discord, iTerm2, and Kitty terminals. 122 | 123 | ## Features 124 | 125 | * Automatically execute AI-generated code in a Docker environment. 126 | * Experiment feature, render images in iTerm2 and kitty. 127 | * Upload files with the `/up` command and you can use it in your prompt 128 | * Experiment feature, assemble code blocks into an application and you can run the code directly by `/run` command 129 | * Support copying output to the clipboard with `/cc` command 130 | * Support prompt histories stored in the octopus cli 131 | 132 | if you have any feature suggestion. please create a discuession to talk about it 133 | 134 | ## Roadmap 135 | 136 | * [roadmap for v0.5.0](https://github.com/dbpunk-labs/octogen/issues/64) 137 | 138 | -------------------------------------------------------------------------------- /chat/tests/test_chat_function.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ """ 9 | from og_terminal.terminal_chat import parse_numbers 10 | from og_terminal.terminal_chat import handle_action_end 11 | from og_terminal.terminal_chat import handle_action_output 12 | from og_terminal.terminal_chat import handle_final_answer 13 | from og_terminal.terminal_chat import handle_typing 14 | from og_terminal.ui_block import TaskBlocks 15 | from og_proto import agent_server_pb2 16 | 17 | 18 | def test_parse_number(): 19 | test_text = "/cc0" 20 | numbers = parse_numbers(test_text) 21 | assert numbers 22 | assert numbers[0] == "0" 23 | 24 | 25 | def test_handle_final_answer_smoke_test(): 26 | images = [] 27 | values = [] 28 | task_state = agent_server_pb2.ContextState( 29 | output_token_count=10, 30 | llm_name="mock", 31 | total_duration=1, 32 | input_token_count=10, 33 | llm_response_duration=1000, 34 | ) 35 | respond_content = agent_server_pb2.TaskResponse( 36 | state=task_state, 37 | response_type=agent_server_pb2.TaskResponse.OnModelTypeText, 38 | typing_content=agent_server_pb2.TypingContent( 39 | content="hello world!", language="text" 40 | ), 41 | ) 42 | respond_final = agent_server_pb2.TaskResponse( 43 | state=task_state, 44 | response_type=agent_server_pb2.TaskResponse.OnFinalAnswer, 45 | final_answer=agent_server_pb2.FinalAnswer(answer=""), 46 | ) 47 | task_blocks = TaskBlocks(values) 48 | task_blocks.begin() 49 | handle_typing(task_blocks, respond_content) 50 | handle_final_answer(task_blocks, respond_final) 51 | segments = list(task_blocks.render()) 52 | assert len(segments) == 1, "bad segment count" 53 | assert segments[0][1] == "🧠" 54 | assert values[0] == "hello world!" 55 | 56 | 57 | def test_handle_action_end_boundary_test(): 58 | # Setup 59 | images = [] 60 | values = [] 61 | task_state = agent_server_pb2.ContextState( 62 | output_token_count=10, 63 | llm_name="mock", 64 | total_duration=1, 65 | input_token_count=10, 66 | llm_response_duration=1000, 67 | ) 68 | task_blocks = TaskBlocks(values) 69 | task_blocks.begin() 70 | 71 | # Create a response with a large number of output files 72 | respond = agent_server_pb2.TaskResponse( 73 | state=task_state, 74 | response_type=agent_server_pb2.TaskResponse.OnStepActionEnd, 75 | on_step_action_end=agent_server_pb2.OnStepActionEnd( 76 | output="", output_files=["test.png"] * 1000, has_error=False 77 | ), 78 | ) 79 | 80 | # Call the function 81 | handle_action_end(task_blocks, respond, images) 82 | 83 | # Check the results 84 | assert len(images) == 1000 85 | assert all(image == "test.png" for image in images) 86 | 87 | 88 | def test_handle_action_end_smoke_test(): 89 | images = [] 90 | values = [] 91 | task_state = agent_server_pb2.ContextState( 92 | output_token_count=10, 93 | llm_name="mock", 94 | total_duration=1, 95 | input_token_count=10, 96 | llm_response_duration=1000, 97 | ) 98 | 99 | respond_stdout = agent_server_pb2.TaskResponse( 100 | state=task_state, 101 | response_type=agent_server_pb2.TaskResponse.OnStepActionStreamStdout, 102 | console_stdout="hello world!", 103 | ) 104 | 105 | respond = agent_server_pb2.TaskResponse( 106 | state=task_state, 107 | response_type=agent_server_pb2.TaskResponse.OnStepActionEnd, 108 | on_step_action_end=agent_server_pb2.OnStepActionEnd( 109 | output="", output_files=["test.png"], has_error=False 110 | ), 111 | ) 112 | 113 | task_blocks = TaskBlocks(values) 114 | task_blocks.begin() 115 | handle_action_output(task_blocks, respond_stdout) 116 | handle_action_end(task_blocks, respond, images) 117 | segments = list(task_blocks.render()) 118 | assert len(segments) == 2, "bad segment count" 119 | assert segments[0][1] == "✅" 120 | assert images[0] == "test.png" 121 | assert values[0] == "hello world!" 122 | 123 | 124 | def test_error_handle_action_end(): 125 | images = [] 126 | values = [] 127 | task_state = agent_server_pb2.ContextState( 128 | output_token_count=10, 129 | llm_name="mock", 130 | total_duration=1, 131 | input_token_count=10, 132 | llm_response_duration=1000, 133 | ) 134 | task_blocks = TaskBlocks(values) 135 | task_blocks.begin() 136 | 137 | respond_stderr = agent_server_pb2.TaskResponse( 138 | state=task_state, 139 | response_type=agent_server_pb2.TaskResponse.OnStepActionStreamStderr, 140 | console_stderr="error", 141 | ) 142 | 143 | respond = agent_server_pb2.TaskResponse( 144 | state=task_state, 145 | response_type=agent_server_pb2.TaskResponse.OnStepActionEnd, 146 | on_step_action_end=agent_server_pb2.OnStepActionEnd( 147 | output="", output_files=["test.png"], has_error=True 148 | ), 149 | ) 150 | handle_action_output(task_blocks, respond_stderr) 151 | handle_action_end(task_blocks, respond, images) 152 | segments = list(task_blocks.render()) 153 | assert len(segments) == 2, "bad segment count" 154 | assert segments[0][1] == "❌" 155 | assert len(images) == 0 156 | assert values[0] == "\nerror" 157 | 158 | 159 | def test_handle_action_end_performance_test(): 160 | # Setup 161 | images = [] 162 | values = [] 163 | task_state = agent_server_pb2.ContextState( 164 | output_token_count=10, 165 | llm_name="mock", 166 | total_duration=1, 167 | input_token_count=10, 168 | llm_response_duration=1000, 169 | ) 170 | task_blocks = TaskBlocks(values) 171 | task_blocks.begin() 172 | 173 | # Create a large number of responses 174 | responses = [ 175 | agent_server_pb2.TaskResponse( 176 | state=task_state, 177 | response_type=agent_server_pb2.TaskResponse.OnStepActionEnd, 178 | on_step_action_end=agent_server_pb2.OnStepActionEnd( 179 | output="", 180 | output_files=[ 181 | f"test{i}.png" 182 | ], # Modify this line to create unique filenames 183 | has_error=False, 184 | ), 185 | ) 186 | for i in range(1000) 187 | ] 188 | 189 | # Call the function with each response 190 | for respond in responses: 191 | handle_action_end(task_blocks, respond, images) 192 | 193 | # Check the results 194 | assert len(images) == 1000 195 | assert all(image == f"test{i}.png" for i, image in enumerate(images)) 196 | -------------------------------------------------------------------------------- /agent/src/og_agent/agent_api_server.py: -------------------------------------------------------------------------------- 1 | # vim:fenc=utf-8 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | import sys 9 | import asyncio 10 | import uvicorn 11 | import json 12 | import logging 13 | from typing import List 14 | from enum import Enum 15 | from pydantic import BaseModel 16 | from fastapi import FastAPI, status, Response 17 | from og_sdk.agent_sdk import AgentProxySDK 18 | from og_proto import agent_server_pb2 19 | from fastapi.responses import StreamingResponse 20 | from fastapi.param_functions import Header, Annotated 21 | from dotenv import dotenv_values 22 | 23 | # the api server config 24 | config = dotenv_values(".env") 25 | 26 | LOG_LEVEL = ( 27 | logging.DEBUG if config.get("log_level", "info") == "debug" else logging.INFO 28 | ) 29 | logging.basicConfig( 30 | level=LOG_LEVEL, 31 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", 32 | handlers=[logging.StreamHandler(sys.stdout)], 33 | ) 34 | logger = logging.getLogger(__name__) 35 | 36 | app = FastAPI() 37 | # the agent endpoint 38 | listen_addr = "%s:%s" % ( 39 | config.get("rpc_host", "127.0.0.1"), 40 | config.get("rpc_port", "9528"), 41 | ) 42 | if config.get("rpc_host", "") == "0.0.0.0": 43 | listen_addr = "127.0.0.1:%s" % config.get("rpc_port", "9528") 44 | agent_sdk = AgentProxySDK(listen_addr) 45 | 46 | 47 | class StepResponseType(str, Enum): 48 | OnStepActionStart = "OnStepActionStart" 49 | OnStepTextTyping = "OnStepTextTyping" 50 | OnStepCodeTyping = "OnStepCodeTyping" 51 | OnStepActionStdout = "OnStepActionStdout" 52 | OnStepActionStderr = "OnStepActionStderr" 53 | OnStepActionEnd = "OnStepActionEnd" 54 | OnFinalAnswer = "OnFinalAnswer" 55 | 56 | 57 | class ContextState(BaseModel): 58 | output_token_count: int 59 | llm_name: str 60 | total_duration: int 61 | output_token_count: int 62 | llm_response_duration: int 63 | context_id: str | None = None 64 | 65 | @classmethod 66 | def new_from(cls, state): 67 | return cls( 68 | output_token_count=state.output_token_count, 69 | llm_name=state.llm_name, 70 | total_duration=state.total_duration, 71 | input_token_count=state.input_token_count, 72 | llm_response_duration=state.llm_response_duration, 73 | ) 74 | 75 | 76 | class StepActionEnd(BaseModel): 77 | output: str 78 | output_files: List[str] 79 | has_error: bool 80 | 81 | @classmethod 82 | def new_from(cls, step_action_end: agent_server_pb2.OnStepActionEnd): 83 | return cls( 84 | output=step_action_end.output, 85 | output_files=step_action_end.output_files, 86 | has_error=step_action_end.has_error, 87 | ) 88 | 89 | 90 | class FinalAnswer(BaseModel): 91 | answer: str 92 | 93 | @classmethod 94 | def new_from(cls, final_answer: agent_server_pb2.FinalAnswer): 95 | return cls(answer=final_answer.answer) 96 | 97 | 98 | class StepActionStart(BaseModel): 99 | input: str 100 | tool: str 101 | 102 | @classmethod 103 | def new_from(cls, step_action_start: agent_server_pb2.OnStepActionStart): 104 | return cls(input=step_action_start.input, tool=step_action_start.tool) 105 | 106 | 107 | class StepResponse(BaseModel): 108 | step_type: StepResponseType 109 | step_state: ContextState 110 | typing_content: str | None = None 111 | step_action_stdout: str | None = None 112 | step_action_stderr: str | None = None 113 | step_action_start: StepActionStart | None = None 114 | step_action_end: StepActionEnd | None = None 115 | final_answer: FinalAnswer | None = None 116 | 117 | @classmethod 118 | def new_from(cls, response: agent_server_pb2.TaskResponse): 119 | if response.response_type == agent_server_pb2.TaskResponse.OnStepActionStart: 120 | return cls( 121 | step_type=StepResponseType.OnStepActionStart, 122 | step_state=ContextState.new_from(response.state), 123 | step_action_start=StepActionStart.new_from( 124 | response.on_step_action_start 125 | ), 126 | ) 127 | elif response.response_type == agent_server_pb2.TaskResponse.OnModelTypeCode: 128 | return cls( 129 | step_type=StepResponseType.OnStepCodeTyping, 130 | step_state=ContextState.new_from(response.state), 131 | typing_content=response.typing_content.content, 132 | ) 133 | 134 | elif response.response_type == agent_server_pb2.TaskResponse.OnModelTypeText: 135 | return cls( 136 | step_type=StepResponseType.OnStepTextTyping, 137 | step_state=ContextState.new_from(response.state), 138 | typing_content=response.typing_content.content, 139 | ) 140 | elif ( 141 | response.response_type 142 | == agent_server_pb2.TaskResponse.OnStepActionStreamStdout 143 | ): 144 | return cls( 145 | step_type=StepResponseType.OnStepActionStdout, 146 | step_state=ContextState.new_from(response.state), 147 | step_action_stdout=response.console_stdout, 148 | ) 149 | elif ( 150 | response.response_type 151 | == agent_server_pb2.TaskResponse.OnStepActionStreamStderr 152 | ): 153 | return cls( 154 | step_type=StepResponseType.OnStepActionStderr, 155 | step_state=ContextState.new_from(response.state), 156 | step_action_stderr=response.console_stderr, 157 | ) 158 | elif response.response_type == agent_server_pb2.TaskResponse.OnStepActionEnd: 159 | return cls( 160 | step_type=StepResponseType.OnStepActionEnd, 161 | step_state=ContextState.new_from(response.state), 162 | step_action_end=StepActionEnd.new_from(response.on_step_action_end), 163 | ) 164 | elif response.response_type == agent_server_pb2.TaskResponse.OnFinalAnswer: 165 | return cls( 166 | step_type=StepResponseType.OnFinalAnswer, 167 | step_state=ContextState.new_from(response.state), 168 | final_answer=FinalAnswer.new_from(response.final_answer), 169 | ) 170 | 171 | 172 | class TaskRequest(BaseModel): 173 | prompt: str 174 | token_limit: int 175 | llm_model_name: str 176 | input_files: List[str] 177 | context_id: str 178 | 179 | 180 | async def run_task(task: TaskRequest, key): 181 | async for respond in agent_sdk.prompt( 182 | task.prompt, key, files=task.input_files, context_id=task.context_id 183 | ): 184 | response = StepResponse.new_from(respond).model_dump(exclude_none=True) 185 | yield "data: %s\n" % json.dumps(response) 186 | 187 | 188 | @app.post("/process") 189 | async def process_task( 190 | task: TaskRequest, 191 | response: Response, 192 | api_token: Annotated[str | None, Header()] = None, 193 | ): 194 | if api_token is None: 195 | response.status_code = status.HTTP_401_UNAUTHORIZED 196 | return 197 | response.status_code = status.HTTP_200_OK 198 | response.media_type = "text/event-stream" 199 | agent_sdk.connect() 200 | return StreamingResponse(run_task(task, api_token)) 201 | 202 | 203 | async def run_server(): 204 | logger.info(f"connect the agent server at {listen_addr}") 205 | port = int(config.get("rpc_port", "9528")) + 1 206 | server_config = uvicorn.Config( 207 | app, host=config.get("rpc_host", "127.0.0.1"), port=port 208 | ) 209 | server = uvicorn.Server(server_config) 210 | await server.serve() 211 | 212 | 213 | def run_app(): 214 | asyncio.run(run_server()) 215 | -------------------------------------------------------------------------------- /chat/src/og_terminal/ui_block.py: -------------------------------------------------------------------------------- 1 | # vim:fenc=utf-8 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ """ 9 | 10 | from rich.markdown import Markdown 11 | from og_sdk.utils import process_char_stream 12 | from rich.spinner import Spinner 13 | from rich.syntax import Syntax 14 | 15 | 16 | class BaseBlock: 17 | 18 | def __init__(self, index): 19 | self.index = index 20 | self.finished = False 21 | self.has_error = False 22 | self.emoji = "" 23 | 24 | def is_finished(self): 25 | return self.finished 26 | 27 | def get_index(self): 28 | return self.index 29 | 30 | def finish(self, has_error=False): 31 | self.finished = True 32 | self.has_error = has_error 33 | 34 | def get_status(self): 35 | if self.has_error: 36 | return "❌" 37 | if self.finished: 38 | return self.emoji 39 | else: 40 | return Spinner("dots", style="status.spinner", speed=1.0, text="") 41 | 42 | def set_emoji(self, emoji): 43 | self.emoji = emoji 44 | 45 | 46 | class StreamingBlock(BaseBlock): 47 | 48 | def __init__(self, index, content): 49 | super().__init__(index) 50 | self.content = content 51 | 52 | def append(self, new_content): 53 | if self.finished: 54 | return 55 | tmp_content = self.content + new_content 56 | self.content = process_char_stream(tmp_content) 57 | 58 | 59 | class MarkdownBlock(StreamingBlock): 60 | 61 | def __init__(self, index, content): 62 | super().__init__(index, content) 63 | self.set_emoji("🧠") 64 | 65 | def render(self): 66 | if self.finished: 67 | return Markdown(self.content) 68 | else: 69 | return Markdown(self.content + "█") 70 | 71 | 72 | class TerminalBlock(StreamingBlock): 73 | 74 | def __init__(self, index): 75 | super().__init__(index, "") 76 | self.set_emoji("✅") 77 | self.terminal_stdout = "" 78 | self.terminal_stderr = "" 79 | 80 | def render(self): 81 | output = self.terminal_stdout 82 | if self.terminal_stderr: 83 | output += "\n" + self.terminal_stderr 84 | if self.finished: 85 | return Syntax(output, "text", line_numbers=True) 86 | else: 87 | return Syntax(output + "█", "text", line_numbers=True) 88 | 89 | def write(self, terminal_stdout, terminal_stderr): 90 | if self.finished: 91 | return 92 | if terminal_stdout: 93 | tmp_content = self.terminal_stdout + terminal_stdout 94 | self.terminal_stdout = process_char_stream(tmp_content) 95 | if terminal_stderr: 96 | tmp_content = self.terminal_stderr + terminal_stderr 97 | self.terminal_stderr = process_char_stream(tmp_content) 98 | output = self.terminal_stdout 99 | if self.terminal_stderr: 100 | output += "\n" + self.terminal_stderr 101 | self.content = output 102 | 103 | 104 | class CodeBlock(StreamingBlock): 105 | 106 | def __init__(self, index, content, language): 107 | super().__init__(index, content) 108 | self.language = language 109 | self.set_emoji("📖") 110 | 111 | def render(self): 112 | if self.finished: 113 | return Syntax(self.content, self.language, line_numbers=True) 114 | else: 115 | return Syntax(self.content + "█", self.language, line_numbers=True) 116 | 117 | 118 | class LoadingBlock(BaseBlock): 119 | 120 | def __init__(self, index): 121 | super().__init__(index) 122 | 123 | def render(self): 124 | return "" 125 | 126 | 127 | class UploadFilesBlock(BaseBlock): 128 | 129 | def __init__(self, index, filenames): 130 | super().__init__(index) 131 | self.filenames = filenames 132 | self.file_states = {} 133 | 134 | def update_progress(self, filename, uploaded, total): 135 | self.file_states[filename] = (uploaded, total) 136 | 137 | 138 | class TaskBlocks: 139 | 140 | def __init__(self, values): 141 | self.blocks = [] 142 | self.values = values 143 | 144 | def begin(self): 145 | self.blocks.append(LoadingBlock(0)) 146 | 147 | def add_terminal(self, terminal_stdout, terminal_stderr): 148 | last_block = self.blocks[-1] 149 | if isinstance(last_block, LoadingBlock): 150 | self.blocks.pop() 151 | block = TerminalBlock(len(self.values)) 152 | block.write(terminal_stdout, terminal_stderr) 153 | self.blocks.append(block) 154 | self.values.append(block.content) 155 | elif isinstance(last_block, TerminalBlock): 156 | if last_block.is_finished(): 157 | block = TerminalBlock(len(self.values)) 158 | block.write(terminal_stdout, terminal_stderr) 159 | self.blocks.append(block) 160 | self.values.append(block.content) 161 | else: 162 | last_block.write(terminal_stdout, terminal_stderr) 163 | self.values[last_block.get_index()] = last_block.content 164 | else: 165 | last_block.finish() 166 | block = TerminalBlock(len(self.values)) 167 | block.write(terminal_stdout, terminal_stderr) 168 | self.blocks.append(block) 169 | self.values.append(block.content) 170 | 171 | def add_markdown(self, content): 172 | last_block = self.blocks[-1] 173 | if isinstance(last_block, LoadingBlock): 174 | self.blocks.pop() 175 | self.blocks.append(MarkdownBlock(len(self.values), content)) 176 | self.values.append(content) 177 | 178 | elif isinstance(last_block, MarkdownBlock): 179 | if last_block.is_finished(): 180 | self.blocks.append(MarkdownBlock(len(self.values), content)) 181 | self.values.append(content) 182 | else: 183 | last_block.append(content) 184 | self.values[last_block.get_index()] = last_block.content 185 | else: 186 | last_block.finish() 187 | self.blocks.append(MarkdownBlock(len(self.values), content)) 188 | self.values.append(content) 189 | 190 | def add_loading(self): 191 | last_block = self.blocks[-1] 192 | if isinstance(last_block, LoadingBlock) and not last_block.is_finished(): 193 | return 194 | self.blocks.append(LoadingBlock(0)) 195 | 196 | def finish_current_all_blocks(self): 197 | for block in self.blocks: 198 | if block.is_finished(): 199 | continue 200 | block.finish() 201 | 202 | def get_last_block(self): 203 | return self.blocks[-1] 204 | 205 | def add_code(self, code, language): 206 | last_block = self.blocks[-1] 207 | if isinstance(last_block, LoadingBlock): 208 | self.blocks.pop() 209 | self.blocks.append(CodeBlock(len(self.values), code, language)) 210 | self.values.append(code) 211 | elif isinstance(last_block, CodeBlock): 212 | if last_block.is_finished(): 213 | self.blocks.append(CodeBlock(len(self.values), code, language)) 214 | self.values.append(code) 215 | else: 216 | last_block.append(code) 217 | last_block.language = language 218 | self.values[last_block.get_index()] = last_block.content 219 | else: 220 | last_block.finish() 221 | self.blocks.append(CodeBlock(len(self.values), code, language)) 222 | self.values.append(code) 223 | 224 | def render(self): 225 | for block in self.blocks: 226 | if isinstance(block, LoadingBlock) and block.is_finished(): 227 | continue 228 | yield (block.get_index(), block.get_status(), block.render()) 229 | -------------------------------------------------------------------------------- /chat/src/og_discord/discord_chat.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 imotai 2 | # SPDX-FileContributor: imotai 3 | # 4 | # SPDX-License-Identifier: Elastic-2.0 5 | 6 | """ """ 7 | 8 | import discord 9 | import asyncio 10 | import logging 11 | import json 12 | import sys 13 | import os 14 | import click 15 | from datetime import datetime 16 | from dotenv import dotenv_values 17 | from og_proto import common_pb2 18 | from og_sdk.agent_sdk import AgentSDK 19 | 20 | LOG_LEVEL = logging.INFO 21 | logging.basicConfig( 22 | level=LOG_LEVEL, 23 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", 24 | handlers=[logging.StreamHandler(sys.stdout)], 25 | ) 26 | logger = logging.getLogger(__name__) 27 | 28 | 29 | class OctogenDiscordBot(discord.Client): 30 | 31 | def __init__(self, octogen_sdk, filedir, **kwargs): 32 | discord.Client.__init__(self, **kwargs) 33 | self.octogen_sdk = octogen_sdk 34 | self.filedir = filedir 35 | 36 | def handle_action_start(self, respond, saved_images): 37 | """Run on agent action.""" 38 | segments = [] 39 | if not respond.on_agent_action: 40 | return segments 41 | action = respond.on_agent_action 42 | if not action.input: 43 | return segments 44 | logger.info("handle action start return") 45 | arguments = json.loads(action.input) 46 | if action.tool == "execute_python_code" and action.input: 47 | explanation = arguments["explanation"] 48 | code = arguments["code"] 49 | saved_images.extend(arguments.get("saved_filenames", [])) 50 | mk = f"""{explanation}\n 51 | ```python 52 | {code} 53 | ```""" 54 | segments.append(mk) 55 | return segments 56 | 57 | def handle_final_answer(self, respond): 58 | segments = [] 59 | if not respond.final_respond: 60 | return segments 61 | answer = respond.final_respond.answer 62 | if not answer: 63 | return segments 64 | state = "token:%s iteration:%s model:%s" % ( 65 | respond.token_usage, 66 | respond.iteration, 67 | respond.model_name, 68 | ) 69 | segments.append("%s\n%s" % (answer, state)) 70 | return segments 71 | 72 | def handle_action_output(self, respond, saved_images): 73 | segments = [] 74 | if not respond.on_agent_action_end: 75 | return segments 76 | mk = respond.on_agent_action_end.output 77 | if not mk: 78 | return segments 79 | saved_images.extend(respond.on_agent_action_end.output_files) 80 | segments.append(mk) 81 | return segments 82 | 83 | async def download_files(self, images): 84 | for image in images: 85 | await self.octogen_sdk.download_file(image, self.filedir) 86 | 87 | async def on_ready(self): 88 | logger.info(f"Logged in as {self.user} (ID: {self.user.id})") 89 | 90 | async def run_app(self, name, message): 91 | saved_images = [] 92 | async for respond in self.octogen_sdk.run(name): 93 | if not respond: 94 | break 95 | if respond.on_agent_action_end: 96 | segments = self.handle_action_output(respond, saved_images) 97 | msg = "".join(segments) 98 | if msg: 99 | await message.channel.send(msg) 100 | if respond.on_agent_action: 101 | segments = self.handle_action_start(respond, saved_images) 102 | msg = "".join(segments) 103 | if msg: 104 | await message.channel.send(msg) 105 | saved_images = list(set(saved_images)) 106 | if saved_images: 107 | await self.download_files(saved_images) 108 | for filename in saved_images: 109 | fullpath = "%s/%s" % (self.filedir, filename) 110 | await message.channel.send("", file=discord.File(fullpath)) 111 | break 112 | 113 | async def show_apps(self): 114 | header = """Apps 115 | """ 116 | rows = [] 117 | apps = await self.octogen_sdk.query_apps() 118 | for index, app in enumerate(apps.apps): 119 | ctime = datetime.fromtimestamp(app.ctime).strftime("%m/%d/%Y") 120 | rows.append(f"{index+1}.{app.name}") 121 | table = header + "\n".join(rows) 122 | return table 123 | 124 | async def on_message(self, message): 125 | # we do not want the bot to reply to itself 126 | try: 127 | if message.author.id == self.user.id: 128 | return 129 | if message.content.find("/apps") >= 0: 130 | apps = await self.show_apps() 131 | await message.channel.send(apps) 132 | return 133 | content = message.content 134 | if content.find("/run") >= 0: 135 | name = content.split(" ")[1] 136 | await self.run_app(name, message) 137 | return 138 | await message.channel.send("working...") 139 | files = [] 140 | for att in message.attachments: 141 | 142 | async def generate_chunk(att): 143 | # TODO split 144 | chunk = await att.read() 145 | yield common_pb2.FileChunk(buffer=chunk, filename=att.filename) 146 | 147 | await sdk.upload_binary(generate_chunk(att), att.filename) 148 | files.append("uploaded " + att.filename) 149 | if files: 150 | prompt = message.content + "\n" + "\n".join(files) 151 | else: 152 | prompt = message.content 153 | try: 154 | async for respond in self.octogen_sdk.prompt(prompt): 155 | if not respond: 156 | break 157 | logger.info(f"{respond}") 158 | if respond.on_agent_action_end: 159 | saved_images = [] 160 | segments = self.handle_action_output(respond, saved_images) 161 | msg = "".join(segments) 162 | logger.info(f"action output {msg}") 163 | if msg: 164 | if saved_images: 165 | await self.download_files(saved_images) 166 | for filename in saved_images: 167 | fullpath = "%s/%s" % (self.filedir, filename) 168 | await message.channel.send( 169 | msg, file=discord.File(fullpath) 170 | ) 171 | break 172 | else: 173 | await message.channel.send(msg) 174 | if respond.on_agent_action: 175 | saved_images = [] 176 | segments = self.handle_action_start(respond, saved_images) 177 | msg = "".join(segments) 178 | logger.info(f"action start {msg}") 179 | if msg: 180 | await message.channel.send(msg) 181 | if respond.final_respond: 182 | segments = self.handle_final_answer(respond) 183 | msg = "".join(segments) 184 | logger.info(f"final answer {msg}") 185 | if msg: 186 | await message.channel.send(msg) 187 | except Exception as ex: 188 | logger.error(f"fail to get file {ex}") 189 | await message.channel.send("I am sorry for the internal error") 190 | except Exception as ex: 191 | logging.exception(ex) 192 | 193 | 194 | async def app(): 195 | octogen_discord_bot_dir = "~/.octogen_discord_bot" 196 | if octogen_discord_bot_dir.find("~") == 0: 197 | real_octogen_dir = octogen_discord_bot_dir.replace("~", os.path.expanduser("~")) 198 | else: 199 | real_octogen_dir = octogen_discord_bot_dir 200 | if not os.path.exists(real_octogen_dir): 201 | os.mkdir(real_octogen_dir) 202 | octogen_config = dotenv_values(real_octogen_dir + "/config") 203 | filedir = real_octogen_dir + "/data" 204 | if not os.path.exists(filedir): 205 | os.mkdir(filedir) 206 | sdk = AgentSDK(octogen_config["endpoint"], octogen_config["api_key"]) 207 | sdk.connect() 208 | intents = discord.Intents.default() 209 | intents.message_content = True 210 | client = OctogenDiscordBot(sdk, filedir, intents=intents) 211 | await client.start(octogen_config["discord_bot_token"]) 212 | 213 | 214 | def run_app(): 215 | asyncio.run(app()) 216 | -------------------------------------------------------------------------------- /agent/tests/openai_agent_tests.py: -------------------------------------------------------------------------------- 1 | # vim:fenc=utf-8 2 | 3 | # SPDX-FileCopyrightText: 2023 imotai 4 | # SPDX-FileContributor: imotai 5 | # 6 | # SPDX-License-Identifier: Elastic-2.0 7 | 8 | """ """ 9 | 10 | import json 11 | import logging 12 | import pytest 13 | from og_sdk.kernel_sdk import KernelSDK 14 | from og_agent import openai_agent 15 | from og_proto.agent_server_pb2 import ProcessOptions, TaskResponse, ProcessTaskRequest 16 | from openai.openai_object import OpenAIObject 17 | import asyncio 18 | import pytest_asyncio 19 | 20 | api_base = "127.0.0.1:9528" 21 | api_key = "ZCeI9cYtOCyLISoi488BgZHeBkHWuFUH" 22 | 23 | logger = logging.getLogger(__name__) 24 | 25 | 26 | class PayloadStream: 27 | 28 | def __init__(self, payload): 29 | self.payload = payload 30 | 31 | def __aiter__(self): 32 | # create an iterator of the input keys 33 | self.iter_keys = iter(self.payload) 34 | return self 35 | 36 | async def __anext__(self): 37 | try: 38 | k = next(self.iter_keys) 39 | obj = OpenAIObject() 40 | delta = OpenAIObject() 41 | content = OpenAIObject() 42 | content.content = k 43 | delta.delta = content 44 | obj.choices = [delta] 45 | return obj 46 | except StopIteration: 47 | # raise stopasynciteration at the end of iterator 48 | raise StopAsyncIteration 49 | 50 | 51 | class FunctionCallPayloadStream: 52 | 53 | def __init__(self, name, arguments): 54 | self.name = name 55 | self.arguments = arguments 56 | 57 | def __aiter__(self): 58 | # create an iterator of the input keys 59 | self.iter_keys = iter(self.arguments) 60 | return self 61 | 62 | async def __anext__(self): 63 | try: 64 | k = next(self.iter_keys) 65 | obj = OpenAIObject() 66 | delta = OpenAIObject() 67 | function_para = OpenAIObject() 68 | function_para.name = self.name 69 | function_para.arguments = k 70 | function_call = OpenAIObject() 71 | function_call.function_call = function_para 72 | delta.delta = function_call 73 | obj.choices = [delta] 74 | return obj 75 | except StopIteration: 76 | # raise stopasynciteration at the end of iterator 77 | raise StopAsyncIteration 78 | 79 | 80 | class MockContext: 81 | 82 | def done(self): 83 | return False 84 | 85 | 86 | class MultiCallMock: 87 | 88 | def __init__(self, responses): 89 | self.responses = responses 90 | self.index = 0 91 | 92 | def call(self, *args, **kwargs): 93 | if self.index >= len(self.responses): 94 | raise Exception("no more response") 95 | self.index += 1 96 | logger.debug("call index %d", self.index) 97 | return self.responses[self.index - 1] 98 | 99 | 100 | @pytest.fixture 101 | def kernel_sdk(): 102 | endpoint = ( 103 | "localhost:9527" # Replace with the actual endpoint of your test gRPC server 104 | ) 105 | return KernelSDK(endpoint, "ZCeI9cYtOCyLISoi488BgZHeBkHWuFUH") 106 | 107 | 108 | @pytest.mark.asyncio 109 | async def test_openai_agent_call_execute_bash_code(mocker, kernel_sdk): 110 | kernel_sdk.connect() 111 | arguments = { 112 | "explanation": "the hello world in bash", 113 | "code": "echo 'hello world'", 114 | "saved_filenames": [], 115 | "language": "bash", 116 | } 117 | stream1 = FunctionCallPayloadStream("execute", json.dumps(arguments)) 118 | sentence = "The output 'hello world' is the result" 119 | stream2 = PayloadStream(sentence) 120 | call_mock = MultiCallMock([stream1, stream2]) 121 | with mocker.patch( 122 | "og_agent.openai_agent.openai.ChatCompletion.acreate", 123 | side_effect=call_mock.call, 124 | ) as mock_openai: 125 | agent = openai_agent.OpenaiAgent("gpt4", kernel_sdk, is_azure=False) 126 | queue = asyncio.Queue() 127 | task_opt = ProcessOptions( 128 | streaming=True, 129 | llm_name="gpt4", 130 | input_token_limit=100000, 131 | output_token_limit=100000, 132 | timeout=5, 133 | ) 134 | request = ProcessTaskRequest( 135 | input_files=[], 136 | task="write a hello world in bash", 137 | context_id="", 138 | options=task_opt, 139 | ) 140 | await agent.arun(request, queue, MockContext(), task_opt) 141 | responses = [] 142 | while True: 143 | try: 144 | response = await queue.get() 145 | if not response: 146 | break 147 | responses.append(response) 148 | except asyncio.QueueEmpty: 149 | break 150 | logger.info(responses) 151 | console_output = list( 152 | filter( 153 | lambda x: x.response_type == TaskResponse.OnStepActionStreamStdout, 154 | responses, 155 | ) 156 | ) 157 | assert len(console_output) == 1, "bad console output count" 158 | assert console_output[0].console_stdout == "hello world\n", "bad console output" 159 | 160 | 161 | @pytest.mark.asyncio 162 | async def test_openai_agent_call_execute_python_code(mocker, kernel_sdk): 163 | kernel_sdk.connect() 164 | arguments = { 165 | "explanation": "the hello world in python", 166 | "code": "print('hello world')", 167 | "language": "python", 168 | "saved_filenames": [], 169 | } 170 | stream1 = FunctionCallPayloadStream("execute", json.dumps(arguments)) 171 | sentence = "The output 'hello world' is the result" 172 | stream2 = PayloadStream(sentence) 173 | call_mock = MultiCallMock([stream1, stream2]) 174 | with mocker.patch( 175 | "og_agent.openai_agent.openai.ChatCompletion.acreate", 176 | side_effect=call_mock.call, 177 | ) as mock_openai: 178 | agent = openai_agent.OpenaiAgent("gpt4", kernel_sdk, is_azure=False) 179 | queue = asyncio.Queue() 180 | task_opt = ProcessOptions( 181 | streaming=True, 182 | llm_name="gpt4", 183 | input_token_limit=100000, 184 | output_token_limit=100000, 185 | timeout=5, 186 | ) 187 | request = ProcessTaskRequest( 188 | input_files=[], 189 | task="write a hello world in python", 190 | context_id="", 191 | options=task_opt, 192 | ) 193 | await agent.arun(request, queue, MockContext(), task_opt) 194 | responses = [] 195 | while True: 196 | try: 197 | response = await queue.get() 198 | if not response: 199 | break 200 | responses.append(response) 201 | except asyncio.QueueEmpty: 202 | break 203 | logger.info(responses) 204 | console_output = list( 205 | filter( 206 | lambda x: x.response_type == TaskResponse.OnStepActionStreamStdout, 207 | responses, 208 | ) 209 | ) 210 | assert len(console_output) == 1, "bad console output count" 211 | assert console_output[0].console_stdout == "hello world\n", "bad console output" 212 | 213 | 214 | @pytest.mark.asyncio 215 | async def test_openai_agent_smoke_test(mocker, kernel_sdk): 216 | sentence = "Hello, how can I help you?" 217 | stream = PayloadStream(sentence) 218 | with mocker.patch( 219 | "og_agent.openai_agent.openai.ChatCompletion.acreate", return_value=stream 220 | ) as mock_openai: 221 | agent = openai_agent.OpenaiAgent("gpt4", kernel_sdk, is_azure=False) 222 | queue = asyncio.Queue() 223 | task_opt = ProcessOptions( 224 | streaming=True, 225 | llm_name="gpt4", 226 | input_token_limit=100000, 227 | output_token_limit=100000, 228 | timeout=5, 229 | ) 230 | request = ProcessTaskRequest( 231 | input_files=[], task="hello", context_id="", options=task_opt 232 | ) 233 | await agent.arun(request, queue, MockContext(), task_opt) 234 | responses = [] 235 | while True: 236 | try: 237 | response = await queue.get() 238 | if not response: 239 | break 240 | responses.append(response) 241 | except asyncio.QueueEmpty: 242 | break 243 | logger.info(responses) 244 | assert len(responses) == len(sentence) + 1, "bad response count" 245 | assert ( 246 | responses[-1].response_type == TaskResponse.OnFinalAnswer 247 | ), "bad response type" 248 | assert responses[-1].state.input_token_count == 153 249 | assert responses[-1].state.output_token_count == 8 250 | --------------------------------------------------------------------------------