├── .github └── workflows │ ├── docs.yaml │ ├── python-vuln.yaml │ └── release.yml ├── .gitignore ├── LICENSE ├── README.md ├── bandit.yaml ├── docs ├── function-indexing │ └── overview.md ├── index.md └── python-functions │ └── python-function-calling.md ├── func_ai ├── __init__.py ├── function_indexer.py ├── ui_demos │ ├── __init__.py │ └── api_qa.py ├── utils │ ├── __init__.py │ ├── api_qa_system_prompt.txt │ ├── common.py │ ├── jinja_template_functions.py │ ├── llm_tools.py │ ├── openapi_function_parser.py │ └── py_function_parser.py └── workflow_creator.py ├── mkdocs.yml ├── poetry.lock ├── pyproject.toml ├── pytest.ini └── tests ├── __init__.py ├── jupyter └── doc-example-testing.ipynb ├── local_inf.py ├── template.xml ├── template2.txt ├── test_decorators.py ├── test_function_indexer.py ├── test_function_indexing.py ├── test_openapi_function_parser.py ├── test_parser.py └── test_template.py /.github/workflows/docs.yaml: -------------------------------------------------------------------------------- 1 | name: docs 2 | on: 3 | push: 4 | branches: 5 | - develop 6 | - main 7 | permissions: 8 | contents: write 9 | jobs: 10 | deploy: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v3 14 | - uses: actions/setup-python@v4 15 | with: 16 | python-version: 3.x 17 | - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV 18 | - uses: actions/cache@v3 19 | with: 20 | key: mkdocs-material-${{ env.cache_id }} 21 | path: .cache 22 | restore-keys: | 23 | mkdocs-material- 24 | - run: pip install mkdocs-material 25 | - run: mkdocs gh-deploy --force 26 | -------------------------------------------------------------------------------- /.github/workflows/python-vuln.yaml: -------------------------------------------------------------------------------- 1 | name: Python Vulnerability Scan 2 | on: 3 | push: 4 | branches: 5 | - '*' 6 | - '*/**' 7 | paths: 8 | - func_ai/** 9 | 10 | jobs: 11 | bandit-scan: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0 16 | - uses: amikos-tech/py-vulnerability-scanner@main 17 | with: 18 | input-dir: './func_ai' 19 | format: 'json' 20 | bandit-config: 'bandit.yaml' 21 | - name: Upload Bandit Report 22 | uses: actions/upload-artifact@v3 23 | with: 24 | name: bandit-report 25 | path: | 26 | ./bandit-*.json -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python Package 2 | 3 | on: 4 | release: 5 | types: [created] 6 | permissions: 7 | actions: write 8 | jobs: 9 | build-n-publish: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | 14 | - name: Set up Python 3.10 15 | uses: actions/setup-python@v2 16 | with: 17 | python-version: '3.10' 18 | 19 | - name: Install dependencies 20 | run: | 21 | set -e 22 | python -m pip install --upgrade pip 23 | curl -sSL https://install.python-poetry.org | python3 - 24 | 25 | shell: bash 26 | # - name: Version bump 27 | # run: | 28 | # set -e 29 | # poetry version ${{ github.event.release.tag_name }} 30 | # git add ./pyproject.toml 31 | # git config --global user.name "Release Bot" 32 | # git config --global user.email "opensource@amikos.tech" 33 | # git commit -m "Change version to ${{ github.event.release.tag_name }}" --allow-empty 34 | # git push origin HEAD:main 35 | # shell: bash 36 | 37 | - name: Publish package to PyPI 38 | run: | 39 | set -e 40 | poetry config pypi-token.pypi ${{ secrets.PYPI_API_TOKEN }} 41 | poetry publish -n --build 42 | shell: bash 43 | # poetry config repositories.publish ${{ inputs.PUBLISH_REGISTRY }} 44 | # poetry publish -p ${{ secrets.PYPI_API_TOKEN }} -u ${{ inputs.PUBLISH_REGISTRY_USERNAME }} -r publish --build 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | build/ 8 | develop-eggs/ 9 | dist/ 10 | downloads/ 11 | eggs/ 12 | .eggs/ 13 | lib/ 14 | lib64/ 15 | parts/ 16 | sdist/ 17 | var/ 18 | wheels/ 19 | share/python-wheels/ 20 | *.egg-info/ 21 | .installed.cfg 22 | *.egg 23 | MANIFEST 24 | *.manifest 25 | *.spec 26 | pip-log.txt 27 | pip-delete-this-directory.txt 28 | htmlcov/ 29 | .tox/ 30 | .nox/ 31 | .coverage 32 | .coverage.* 33 | .cache 34 | nosetests.xml 35 | coverage.xml 36 | *.cover 37 | *.py,cover 38 | .hypothesis/ 39 | .pytest_cache/ 40 | cover/ 41 | *.mo 42 | *.pot 43 | *.log 44 | local_settings.py 45 | db.sqlite3 46 | db.sqlite3-journal 47 | instance/ 48 | .webassets-cache 49 | .scrapy 50 | docs/_build/ 51 | .pybuilder/ 52 | target/ 53 | .ipynb_checkpoints 54 | profile_default/ 55 | ipython_config.py 56 | .pdm.toml 57 | __pypackages__/ 58 | celerybeat-schedule 59 | celerybeat.pid 60 | *.sage.py 61 | .env 62 | .venv 63 | env/ 64 | venv/ 65 | ENV/ 66 | env.bak/ 67 | venv.bak/ 68 | .spyderproject 69 | .spyproject 70 | .ropeproject 71 | /site 72 | .mypy_cache/ 73 | .dmypy.json 74 | dmypy.json 75 | .pyre/ 76 | .pytype/ 77 | cython_debug/ 78 | report.* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2023 Amikos Tech Ltd. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI Functional Catalog 2 | 3 | Your OpenAI function calling on steroids 4 | 5 | Features: 6 | 7 | - Index any python function and use it in your AI workflows 8 | - Index any CLI command and use it in your AI workflows 9 | - Index any API endpoint and use it in your AI workflows 10 | 11 | ## Installation 12 | 13 | With pip: 14 | 15 | ```bash 16 | pip install func-ai 17 | ``` 18 | 19 | With poetry: 20 | 21 | ```bash 22 | poetry add func-ai 23 | ``` 24 | 25 | ## Usage 26 | 27 | ### Pydantic Class Mapping 28 | 29 | ```python 30 | from pydantic import Field 31 | 32 | from func_ai.utils.llm_tools import OpenAIInterface, OpenAISchema 33 | 34 | 35 | class User(OpenAISchema): 36 | """ 37 | This is a user 38 | """ 39 | id: int = Field(None, description="The user's id") 40 | name: str = Field(..., description="The user's name") 41 | 42 | 43 | def test_user_openai_schema(): 44 | print(User.from_prompt(prompt="Create a user with id 100 and name Jimmy", llm_interface=OpenAIInterface()).json()) 45 | """ 46 | Returns: {"id": 100, "name": "Jimmy"} 47 | """ 48 | 49 | ``` 50 | 51 | ### OpenAPI Mapping 52 | 53 | ```python 54 | from dotenv import load_dotenv 55 | 56 | from func_ai.utils.llm_tools import OpenAIInterface 57 | from func_ai.utils.openapi_function_parser import OpenAPISpecOpenAIWrapper 58 | 59 | load_dotenv() 60 | _spec = OpenAPISpecOpenAIWrapper.from_url('http://petstore.swagger.io/v2/swagger.json', 61 | llm_interface=OpenAIInterface()) 62 | print(_spec.from_prompt("Get pet with id 10", "getPetById").last_call) 63 | """ 64 | 2023-07-03 10:43:04 DEBUG Starting new HTTP connection (1): petstore.swagger.io:80 65 | 2023-07-03 10:43:04 DEBUG http://petstore.swagger.io:80 "GET /v2/swagger.json HTTP/1.1" 301 134 66 | 2023-07-03 10:43:04 DEBUG Starting new HTTPS connection (1): petstore.swagger.io:443 67 | 2023-07-03 10:43:04 DEBUG https://petstore.swagger.io:443 "GET /v2/swagger.json HTTP/1.1" 200 None 68 | 2023-07-03 10:43:04 DEBUG Prompt: Get pet with id 10 69 | 2023-07-03 10:43:04 DEBUG message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions 70 | 2023-07-03 10:43:04 DEBUG api_version=None data='{"model": "gpt-3.5-turbo-0613", "messages": [{"role": "user", "content": "Get pet with id 10"}], "functions": [{"name": "getPetById", "description": "Find pet by IDReturns a single pet", "parameters": {"type": "object", "properties": {"petId": {"description": "ID of pet to return", "type": "string", "in": "path"}}, "required": ["petId"]}}], "function_call": "auto", "temperature": 0.0, "top_p": 1.0, "frequency_penalty": 0.0, "presence_penalty": 0.0, "max_tokens": 256}' message='Post details' 71 | 2023-07-03 10:43:04 DEBUG Converted retries value: 2 -> Retry(total=2, connect=None, read=None, redirect=None, status=None) 72 | 2023-07-03 10:43:05 DEBUG Starting new HTTPS connection (1): api.openai.com:443 73 | 2023-07-03 10:43:06 DEBUG https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None 74 | 2023-07-03 10:43:06 DEBUG message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=876 request_id=f38d1625ae785681b53686492fd1d7e3 response_code=200 75 | 2023-07-03 10:43:06 DEBUG Starting new HTTPS connection (1): petstore.swagger.io:443 76 | 2023-07-03 10:43:07 DEBUG https://petstore.swagger.io:443 "GET /v2/pet/10 HTTP/1.1" 200 None 77 | PASSED [100%]{'function_call': JSON: { 78 | "name": "getPetById", 79 | "arguments": "{\n \"petId\": \"10\"\n}" 80 | }, 'function_response': {'role': 'function', 'name': 'getPetById', 'content': '{\'status_code\': 200, \'response\': \'{"id":10,"category":{"id":10,"name":"sample string"},"name":"doggie","photoUrls":["sample 1","sample 2","sample 3"],"tags":[{"id":10,"name":"sample string"},{"id":10,"name":"sample string"}],"status":"available"}\'}'}} 81 | 82 | """ 83 | ``` 84 | 85 | > Note: The above example is still in beta and is not production ready. 86 | 87 | ### Jinja2 Templating 88 | 89 | ```python 90 | from dotenv import load_dotenv 91 | from func_ai.utils.jinja_template_functions import JinjaOpenAITemplateFunction 92 | from func_ai.utils.llm_tools import OpenAIInterface 93 | load_dotenv() 94 | ji = JinjaOpenAITemplateFunction.from_string_template("Name: {{ NAME }} \n Age: {{ AGE }}", OpenAIInterface()) 95 | resp = ji.render_from_prompt("John is 20 years old") 96 | assert "Name: John" in resp 97 | assert "Age: 20" in resp 98 | # prints 99 | """ 100 | Name: John 101 | Age: 20 102 | """ 103 | ``` 104 | 105 | ### Jinja2 Templating 106 | 107 | ```python 108 | from dotenv import load_dotenv 109 | from func_ai.utils.jinja_template_functions import JinjaOpenAITemplateFunction 110 | from func_ai.utils.llm_tools import OpenAIInterface 111 | load_dotenv() 112 | ji = JinjaOpenAITemplateFunction.from_string_template("Name: {{ NAME }} \n Age: {{ AGE }}", OpenAIInterface()) 113 | resp = ji.render_from_prompt("John is 20 years old") 114 | assert "Name: John" in resp 115 | assert "Age: 20" in resp 116 | # prints 117 | """ 118 | Name: John 119 | Age: 20 120 | """ 121 | ``` 122 | ### OpenAPI Spec Chat Bot 123 | 124 | This example starts a `gradio` server that allows you to interact with the OpenAPI spec. 125 | 126 | ```python 127 | import gradio as gr 128 | from dotenv import load_dotenv 129 | 130 | from func_ai.utils.llm_tools import OpenAIInterface 131 | from func_ai.utils.openapi_function_parser import OpenAPISpecOpenAIWrapper 132 | 133 | _chat_message = [] 134 | 135 | _spec = None 136 | 137 | 138 | def add_text(history, text): 139 | global _chat_message 140 | history = history + [(text, None)] 141 | _chat_message.append(_spec.api_qa(text, max_tokens=500)) 142 | return history, "" 143 | 144 | 145 | def add_file(history, file): 146 | history = history + [((file.name,), None)] 147 | return history 148 | 149 | 150 | def bot(history): 151 | global _chat_message 152 | # print(temp_callback_handler.get_output()) 153 | # response = temp_callback_handler.get_output()['output'] 154 | history[-1][1] = _chat_message[-1] 155 | return history 156 | 157 | 158 | with gr.Blocks() as demo: 159 | chatbot = gr.Chatbot([], elem_id="chatbot").style(height=1500) 160 | 161 | with gr.Row(): 162 | with gr.Column(scale=1): 163 | txt = gr.Textbox( 164 | show_label=False, 165 | placeholder="Enter text and press enter", 166 | ).style(container=False) 167 | txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then( 168 | bot, chatbot, chatbot 169 | ) 170 | 171 | if __name__ == "__main__": 172 | load_dotenv() 173 | _spec = OpenAPISpecOpenAIWrapper.from_url('http://petstore.swagger.io/v2/swagger.json', 174 | llm_interface=OpenAIInterface(), index=True) 175 | demo.launch() 176 | ``` 177 | 178 | ## Inspiration 179 | 180 | - https://github.com/jxnl/openai_function_call 181 | - https://github.com/rizerphe/openai-functions 182 | - https://github.com/aurelio-labs/funkagent -------------------------------------------------------------------------------- /bandit.yaml: -------------------------------------------------------------------------------- 1 | # FILE: bandit.yaml 2 | exclude_dirs: ['tests', 'docs', 'build', 'dist', '.git', '.tox', '.venv', 'venv', 'env'] 3 | tests: [] 4 | skips: [] -------------------------------------------------------------------------------- /docs/function-indexing/overview.md: -------------------------------------------------------------------------------- 1 | # Function Indexing 2 | 3 | The library supports function indexing (with some limitations). This means that you can index your functions and then 4 | query them using the `func-ai` library. This is useful if you want to query your functions using natural language and 5 | especially when you have a lot of functions which cannot fit in LLM context. 6 | 7 | The Function Indexer (FI) relies on chromadb vector store to store function descriptions and then perform semantic 8 | search on those descriptions to find the most relevant functions. 9 | 10 | Limitations: 11 | 12 | - partials while supported for indexing and function wrapping using `OpenAIFunctionWrapper` cannot be rehydrated in the 13 | index once it is reloaded (e.g. app restart). The suggested workaround is at app/script startup to reindex the 14 | partials which will not re-add them in the index but will only rehydrate them in the index map. 15 | 16 | ## Usage 17 | 18 | ```python 19 | 20 | import chromadb 21 | from chromadb import Settings 22 | from dotenv import load_dotenv 23 | 24 | from func_ai.function_indexer import FunctionIndexer 25 | 26 | 27 | def function_to_index(a: int, b: int) -> int: 28 | """ 29 | This is a function that adds two numbers 30 | 31 | :param a: First number 32 | :param b: Second number 33 | :return: Sum of a and b 34 | """ 35 | return a + b 36 | 37 | 38 | def another_function_to_index() -> str: 39 | """ 40 | This is a function returns hello world 41 | 42 | :return: Hello World 43 | """ 44 | 45 | return "Hello World" 46 | 47 | 48 | def test_function_indexer_init_no_args_find_function_enhanced_summary(): 49 | load_dotenv() 50 | _indexer = FunctionIndexer(chroma_client=chromadb.PersistentClient(settings=Settings(allow_reset=True))) 51 | _indexer.reset_function_index() 52 | _indexer.index_functions([function_to_index, another_function_to_index], enhanced_summary=True) 53 | _results = _indexer.find_functions("Add two numbers", max_results=10, similarity_threshold=0.2) 54 | assert len(_results) == 1 55 | assert _results[0].function(1, 2) == 3 56 | 57 | 58 | if __name__ == "__main__": 59 | test_function_indexer_init_no_args_find_function_enhanced_summary() 60 | ``` 61 | 62 | The above code shows how to use the two main functions of the Function Indexer: 63 | 64 | - `index_functions` which indexes a list of functions 65 | - `find_functions` which finds functions based on a query string 66 | 67 | ## API Docs 68 | 69 | ### FunctionIndexer 70 | 71 | Init args: 72 | 73 | - `chroma_client`: A chromadb client to use for storing the function index. If not provided a new client will be created 74 | using the default settings (e.g. `chromadb.PersistentClient(settings=Settings(allow_reset=True))`). 75 | - `llm_interface`: An LLM interface to use for function wrapping. If not provided a new LLM interface will be created 76 | using the default settings (e.g. `OpenAIInterface()`). 77 | - `embedding_function`: A function that takes a string and returns an embedding. If not provided the default embedding 78 | function will be used (e.g. `embedding_functions.OpenAIEmbeddingFunction()`). 79 | - `collection_name`: The name of the collection to use for storing the function index. If not provided the defaults 80 | to `function_index`. 81 | 82 | > Note: You should always initialize your FunctionIndexer with the same embedding function 83 | 84 | #### `index_functions` 85 | 86 | Args: 87 | 88 | - `functions`: A list of functions to index 89 | - `enhanced_summary`: If True the function summary will be enhanced with the function docstring. Defaults to False. 90 | - `llm_interface`: An LLM interface to use for function wrapping. If not provided the one used in Indexer init will be 91 | used 92 | 93 | #### `find_functions` 94 | 95 | Args: 96 | 97 | - `query`: The query string to use for finding functions 98 | - `max_results`: The maximum number of results to return. Defaults to 2. 99 | - `similarity_threshold`: The similarity threshold to use for filtering results. Defaults to 1.0. 100 | 101 | Returns a named tuple `SearchResult` with the following fields: 102 | 103 | - `function`: The function actual function that can be directly called 104 | - `name`: The function name 105 | - `wrapper`: The `OpenAIFunctionWrapper` function wrapper 106 | - `distance`: The distance of the function from the query string 107 | 108 | > Note: The returned list is sorted by distance in ascending order (e.i. the first result is the closest to the query) 109 | 110 | #### `functions_summary` 111 | 112 | Returns: A dictionary containing function names and their descriptions. 113 | 114 | #### `index_wrapper_functions` 115 | 116 | This identical to `index_functions` but the list of functions is a list of `OpenAIFunctionWrapper` objects. -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Welcome to AI Functional Catalog 2 | 3 | AI FunCat (or func-ai) is a library to help you build a catalog of reusable functions and interact with them using LLMs 4 | (for now only OpenAI is supported) 5 | -------------------------------------------------------------------------------- /docs/python-functions/python-function-calling.md: -------------------------------------------------------------------------------- 1 | # Python Function Calling 2 | 3 | In this article we'll cover how to call Python functions using `func-ai`. 4 | 5 | ## Pre-requisites 6 | 7 | Before you begin make sure you have the following: 8 | 9 | - `func-ai` installed (`pip install func-ai`) 10 | - An OpenAI API key set in the `OPENAI_API_KEY` environment variable (you can have a `.env` file in the current 11 | directory with `OPENAI_API_KEY=` and then use load_dotenv() to load the environment variables from the 12 | file) 13 | 14 | ## Calling Python functions using OpenAI API 15 | 16 | First let's define a python function we want to call using LLM: 17 | 18 | ```python 19 | def add_two_numbers(a: int, b: int) -> int: 20 | """ 21 | Adds two numbers 22 | 23 | :param a: The first number 24 | :param b: The second number 25 | :return: The sum of the two numbers 26 | """ 27 | return a + b 28 | ``` 29 | 30 | A few key points about how functions we want to expose to LLMs should be defined: 31 | 32 | - The function MUST have type-hints for all parameters and the return value. This helps LLMs understand what the 33 | function does and how to call it. 34 | - The function MUST have a docstring. The docstring and in particular the description is used by the LLM to identify the 35 | function to call. 36 | - The function docstring MUST contain parameters and their descriptions. This helps LLMs understand what parameters the 37 | function takes and what they are used for. 38 | 39 | Now let's convert the above function so that it can be called using OpenAI function calling capability: 40 | 41 | ```python 42 | from func_ai.utils.py_function_parser import func_to_json 43 | 44 | _json_fun = func_to_json(add_two_numbers) 45 | ``` 46 | 47 | In the above snippet we use `func_to_json` to convert the python function to a dictionary that can be passed to OpenAI 48 | API. 49 | 50 | Now let's do some prompting to see how the function can be called: 51 | 52 | ```python 53 | import openai 54 | import json 55 | from dotenv import load_dotenv 56 | 57 | load_dotenv() 58 | 59 | 60 | def call_openai(_messages, _functions: list = None): 61 | if _functions: 62 | _open_ai_resp = openai.ChatCompletion.create( 63 | model="gpt-3.5-turbo-0613", 64 | messages=_messages, 65 | functions=_functions, 66 | function_call="auto", 67 | temperature=0.0, 68 | top_p=1.0, 69 | frequency_penalty=0.0, 70 | presence_penalty=0.0, 71 | max_tokens=256, 72 | ) 73 | else: 74 | _open_ai_resp = openai.ChatCompletion.create( 75 | model="gpt-3.5-turbo-0613", 76 | messages=_messages, 77 | temperature=0.5, 78 | top_p=1.0, 79 | frequency_penalty=0.0, 80 | presence_penalty=0.0, 81 | max_tokens=256, 82 | ) 83 | return _open_ai_resp["choices"][0]["message"] 84 | 85 | 86 | _messages = [{"role": "system", 87 | "content": "You are a helpful automation system that helps users to perform a variety of supported tasks."}, 88 | {"role": "user", "content": "I want to add 5 and 10"}] 89 | _functions = [_json_fun] 90 | response = call_openai(_messages, _functions) 91 | if "function_call" in response: 92 | _result = add_two_numbers(**json.loads(response["function_call"]["arguments"])) 93 | print(f"Result: {_result}") 94 | _function_call_llm_response = { 95 | "role": "function", 96 | "name": response["function_call"]["name"], 97 | "content": f"Result: {_result}", 98 | } 99 | _messages.append(_function_call_llm_response) 100 | print(call_openai(_messages)) 101 | ``` 102 | 103 | The above snippet will print the following: 104 | 105 | ```text 106 | Result: 15 107 | { 108 | "role": "assistant", 109 | "content": "The sum of 5 and 10 is 15." 110 | } 111 | ``` 112 | 113 | Let's break down the above snippet: 114 | 115 | - First we define a function `call_openai` that takes a list of messages and a list of functions to call. The function 116 | uses the `openai.ChatCompletion.create` API to call OpenAI and get a response. 117 | - Next we define a list of messages that we want to send to OpenAI. The first message is a system message that describes 118 | what the system does. The second message is a user message that tells the system what the user wants to do. 119 | - Next we define a list of functions that we want to expose to OpenAI. In this case we only have one function. 120 | - Next we call the `call_openai` function with the messages and functions. The response from OpenAI is stored in the 121 | `response` variable. 122 | - Next we check if the response contains a `function_call` key. If it does then we know that OpenAI has called our 123 | function and we can get the result from the `function_call` key. 124 | - Next we print the result of the function call. 125 | - Next we create a new message that contains the result of the function call and append it to the list of messages. 126 | - Finally we call the `call_openai` function again with the updated list of messages. This time OpenAI will respond with 127 | a message that contains the result of the function call. 128 | 129 | !!! note "Non-Production Example" 130 | 131 | The above is a naive example of how you can use the `func-ai` library to convert your python functions and use them 132 | with OpenAI. `func-ai` offer much more advanced mechanisms to help you build a production ready code. Please check 133 | other articles in the documentation to learn more or get in touch [with us](mailto:info@amikos.tech) if you need help. 134 | 135 | ## Working with `functools.partial` 136 | 137 | Python `functools` library offers the ability to create partial functions with some of the parameters already set. This 138 | is particularly useful in cases where you have either static parameter you want to configure, sensitive parameter such a 139 | secret or a state object (e.g. DB connection) in which case you either cannot or do not want to send that info to 140 | OpenAI. `partial` to the rescue! 141 | 142 | Let's create a new function called `query_db` where we want our DB driver to be a fixed parameter and not passed to the 143 | LLM: 144 | 145 | > Note: We make the assumption that `call_openai` function is already defined as per the previous example. 146 | 147 | ```python 148 | from functools import partial 149 | from func_ai.utils.py_function_parser import func_to_json 150 | import json 151 | 152 | 153 | def query_db(db_driver: object, query: str) -> str: 154 | """ 155 | Queries the database 156 | 157 | :param db_driver: The database driver to use 158 | :param query: The query to execute 159 | :return: The result of the query 160 | """ 161 | return f"Querying {db_driver} with query {query}" 162 | 163 | 164 | _partial_fun = partial(query_db, db_driver="MySQL") 165 | _json_fun = func_to_json(_partial_fun) 166 | _messages = [{"role": "system", 167 | "content": "You are a helpful automation system that helps users to perform a variety of supported tasks."}, 168 | {"role": "user", "content": "Query the db for quarterly sales."}] 169 | _functions = [_json_fun] 170 | response = call_openai(_messages, _functions) 171 | if "function_call" in response: 172 | _result = _partial_fun(**json.loads(response["function_call"]["arguments"])) 173 | print(f"Result: {_result}") 174 | _function_call_llm_response = { 175 | "role": "function", 176 | "name": response["function_call"]["name"], 177 | "content": f"Result: {_result}", 178 | } 179 | _messages.append(_function_call_llm_response) 180 | print(call_openai(_messages)) 181 | ``` 182 | 183 | The above snippet will print the following: 184 | 185 | ```text 186 | Result: Querying MySQL with query SELECT * FROM sales WHERE date >= '2021-01-01' AND date <= '2021-12-31' 187 | { 188 | "role": "assistant", 189 | "content": "Here are the quarterly sales for the year 2021:\n\n1st Quarter: $XXX\n2nd Quarter: $XXX\n3rd Quarter: $XXX\n4th Quarter: $XXX\n\nPlease let me know if there's anything else I can assist you with!" 190 | } 191 | ``` 192 | 193 | The example above is very similar to our previous example except that this time we have fixed the `db_driver` parameter 194 | which gives you that very important security and privacy aspect especially when playing around with LLMs on the open 195 | internet. 196 | 197 | ## Function Wrapper 198 | 199 | `func-ai` also offers a function wrapper that you can use to wrap your functions and expose them to OpenAI. The wrapper 200 | takes care of all the heavy lifting for you. Here is a very short example of how you can use the wrapper: 201 | 202 | ```python 203 | from dotenv import load_dotenv 204 | from func_ai.utils import OpenAIFunctionWrapper, OpenAIInterface 205 | 206 | load_dotenv() 207 | 208 | 209 | def say_hello(name: str): 210 | """ 211 | This is a function that says hello to the user 212 | 213 | :param name: Name of the person to say hello to 214 | :return: 215 | """ 216 | print(f"Hello {name}!") 217 | 218 | 219 | _func_wrap = OpenAIFunctionWrapper.from_python_function(say_hello, OpenAIInterface()) 220 | 221 | _func_wrap.from_prompt("Say hello to John") 222 | ``` 223 | 224 | The above snippet will print the following: 225 | 226 | ```text 227 | Hello John! 228 | ``` 229 | 230 | Let's break down the above snippet: 231 | 232 | - First we import the `load_dotenv` function from the `dotenv` library. This is used to load the environment variables 233 | from the `.env` file. 234 | - Next we import the `OpenAIFunctionWrapper` and `OpenAIInterface` classes from the `func_ai.utils` module. 235 | - Next we define a function called `say_hello` that takes a `name` parameter and prints `Hello {name}!` to the console. 236 | - Next we create an instance of the `OpenAIFunctionWrapper` class by calling the `from_python_function` method and 237 | passing in the `say_hello` function and an instance of the `OpenAIInterface` class. 238 | - Finally we call the `from_prompt` method on the `OpenAIFunctionWrapper` instance and pass in the prompt that we want to 239 | send to OpenAI. 240 | 241 | It is also possible to use partials with the wrapper like so: 242 | 243 | ```python 244 | from functools import partial 245 | _func_wrap = OpenAIFunctionWrapper.from_python_function(partial(say_hello,name="World"), OpenAIInterface()) 246 | 247 | _func_wrap.from_prompt("Say hello") 248 | ``` 249 | 250 | The above snippet will print the following: 251 | 252 | ```text 253 | Hello World! 254 | ``` 255 | 256 | !!! note "Further Examples" 257 | 258 | For more examples check jupyter notebooks in the `tests/jupyter/` folder. -------------------------------------------------------------------------------- /func_ai/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amikos-tech/func-ai/6d86446e1010c280d98fce7d865283c2486cde5d/func_ai/__init__.py -------------------------------------------------------------------------------- /func_ai/function_indexer.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function indexer module is responsible for making functions searchable 3 | """ 4 | import importlib 5 | import inspect 6 | import logging 7 | import os 8 | from collections import namedtuple 9 | 10 | import chromadb 11 | import openai 12 | from chromadb import Settings 13 | from chromadb.api import EmbeddingFunction 14 | from chromadb.utils import embedding_functions 15 | 16 | from func_ai.utils.llm_tools import OpenAIFunctionWrapper, OpenAIInterface, LLMInterface 17 | from func_ai.utils.py_function_parser import func_to_json 18 | 19 | logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG) 20 | logger = logging.getLogger(__name__) 21 | 22 | SearchResult = namedtuple('SearchResult', ['name', 'wrapper', 'function', 'distance']) 23 | 24 | 25 | class FunctionIndexer(object): 26 | """ 27 | Index functions 28 | """ 29 | 30 | def __init__(self, llm_interface: LLMInterface = OpenAIInterface(), 31 | chroma_client: chromadb.Client = chromadb.PersistentClient(settings=Settings(allow_reset=True)), 32 | embedding_function: EmbeddingFunction = None, 33 | collection_name: str = "function_index", **kwargs) -> None: 34 | """ 35 | Initialize function indexer 36 | :param db_path: The path where to store the database 37 | :param collection_name: The name of the collection 38 | :param kwargs: Additional arguments 39 | """ 40 | # self._client = chromadb.PersistentClient(path=db_path, settings=Settings( 41 | # anonymized_telemetry=False, 42 | # allow_reset=True, 43 | # )) 44 | self._client = chroma_client 45 | openai.api_key = kwargs.get("openai_api_key", os.getenv("OPENAI_API_KEY")) 46 | if embedding_function is None: 47 | self._embedding_function = embedding_functions.OpenAIEmbeddingFunction() 48 | else: 49 | self._embedding_function = embedding_function 50 | self.collection_name = collection_name 51 | self._init_collection() 52 | 53 | self._llm_interface = llm_interface 54 | self._fns_map = {} 55 | self._fns_index_map = {} 56 | self._open_ai_function_map = [] 57 | self._functions = {} 58 | _get_results = self._collection.get() 59 | if _get_results is not None: 60 | for idx, m in enumerate(_get_results['metadatas']): 61 | if "is_partial" in m and bool(m["is_partial"]): 62 | logger.warning( 63 | f"Found partial function {m['name']}. This function will not be rehydrated into the index.") 64 | continue 65 | self._functions[m["hash"]] = OpenAIFunctionWrapper.from_python_function( 66 | func=FunctionIndexer.function_from_ref(m["identifier"]), llm_interface=self._llm_interface) 67 | 68 | def _init_collection(self) -> None: 69 | self._collection = self._client.get_or_create_collection(name=self.collection_name, 70 | metadata={"hnsw:space": "cosine"}, 71 | embedding_function=self._embedding_function) 72 | 73 | @staticmethod 74 | def function_from_ref(ref_identifier: str) -> callable: 75 | """ 76 | Get function from reference 77 | :param ref_identifier: The reference identifier 78 | :return: The function 79 | """ 80 | parts = ref_identifier.split('.') 81 | _fn = parts[-1] 82 | _mod = "" 83 | _last_mod = "" 84 | _module = None 85 | for pt in parts[:-1]: 86 | try: 87 | _last_mod = str(_mod) 88 | _mod += pt 89 | _module = importlib.import_module(_mod) 90 | _mod += "." 91 | # module = importlib.import_module('.'.join(parts[:-1])) 92 | # function = getattr(module, _fn) 93 | except ModuleNotFoundError: 94 | print("Last module: ", _last_mod) 95 | _module = importlib.import_module(_last_mod[:-1] if _last_mod.endswith(".") else _last_mod) 96 | _module = getattr(_module, pt) 97 | # print(f"Module: {getattr(module, pt)}") 98 | # module = importlib.import_module('.'.join(parts[:-1])) 99 | if _module is None: 100 | raise ModuleNotFoundError(f"Could not find module {_mod}") 101 | _fn = _module 102 | part = parts[-1] 103 | _fn = getattr(_fn, part) 104 | return _fn 105 | 106 | def reset_function_index(self) -> None: 107 | """ 108 | Reset function index 109 | 110 | :return: 111 | """ 112 | 113 | self._client.reset() 114 | self._init_collection() 115 | 116 | def index_functions(self, functions: list[callable or OpenAIFunctionWrapper], 117 | llm_interface: LLMInterface = None, 118 | enhanced_summary: bool = False) -> None: 119 | """ 120 | Index one or more functions 121 | Note: Function uniqueness is not checked in this version 122 | 123 | :param llm_interface: The LLM interface 124 | :param functions: The functions to index 125 | :param enhanced_summary: Whether to use enhanced summary 126 | :return: 127 | """ 128 | _fn_llm_interface = llm_interface if llm_interface is not None else self._llm_interface 129 | _wrapped_functions = [ 130 | OpenAIFunctionWrapper.from_python_function(func=f, llm_interface=_fn_llm_interface) for f 131 | in functions if not isinstance(f, OpenAIFunctionWrapper)] 132 | _wrapped_functions.extend([f for f in functions if isinstance(f, OpenAIFunctionWrapper)]) 133 | _fn_hashes = [f.hash for f in _wrapped_functions] 134 | _existing_fn_results = self._collection.get(ids=_fn_hashes) 135 | print(_existing_fn_results) 136 | # filter wrapped functions that are already in the index 137 | _original_wrapped_functions = _wrapped_functions.copy() 138 | _wrapped_functions = [f for f in _wrapped_functions if f.hash not in _existing_fn_results["ids"]] 139 | if len(_wrapped_functions) == 0: 140 | logger.info("No new functions to index") 141 | self._functions.update( 142 | {f.hash: f for f in _original_wrapped_functions}) # we only rehydrate that are already in the index 143 | return 144 | _docs = [] 145 | _metadatas = [] 146 | _ids = [] 147 | _function_summarizer = OpenAIInterface(max_tokens=200) 148 | for f in _wrapped_functions: 149 | if enhanced_summary: 150 | _function_summarizer.add_conversation_message( 151 | {"role": "system", 152 | "content": "You are an expert summarizer. Your purpose is to provide a good summary of the function so that the user can add the summary in an embedding database which will them be searched."}) 153 | _fsummary = _function_summarizer.send(f"Summarize the function below.\n\n{inspect.getsource(f.func)}") 154 | _docs.append(f"{_fsummary['content']}") 155 | _function_summarizer.conversation_store.clear() 156 | else: 157 | _docs.append(f"{f.description}") 158 | _metadatas.append( 159 | {"name": f.name, "identifier": f.identifier, "hash": f.hash, "is_partial": str(f.is_partial), 160 | **f.metadata_dict}) 161 | _ids.append(f.hash) 162 | 163 | self._collection.add(documents=_docs, 164 | metadatas=_metadatas, 165 | ids=_ids) 166 | self._functions.update({f.hash: f for f in _wrapped_functions}) 167 | 168 | def index_wrapper_functions(self, functions: list[OpenAIFunctionWrapper], 169 | llm_interface: LLMInterface = None, 170 | enhanced_summary: bool = False) -> None: 171 | """ 172 | Index one or more functions 173 | Note: Function uniqueness is not checked in this version 174 | :param functions: The functions to index 175 | :param llm_interface: The LLM interface 176 | :param enhanced_summary: Whether to use enhanced summary 177 | :return: None 178 | """ 179 | self.index_functions(functions=functions, llm_interface=llm_interface, enhanced_summary=enhanced_summary) 180 | 181 | def get_ai_fn_abbr_map(self) -> dict[str, str]: 182 | """ 183 | Get AI function abbreviated map 184 | 185 | :return: Map of function name (key) and description (value) 186 | """ 187 | 188 | return {f['name']: f['description'] for f in self._open_ai_function_map} 189 | 190 | def functions_summary(self) -> dict[str, str]: 191 | """ 192 | Get functions summary 193 | 194 | :return: Map of function name (key) and description (value) 195 | """ 196 | return {f.name: f.description for f in self._functions.values()} 197 | 198 | def find_functions(self, query: str, max_results: int = 2, similarity_threshold: float = 1.0) -> list[SearchResult]: 199 | """ 200 | Find functions by description 201 | 202 | :param query: Query string 203 | :param max_results: Maximum number of results 204 | :param similarity_threshold: Similarity threshold - a cut-off threshold for the similarity score - default is 1.0 (very loose match) 205 | :return: 206 | """ 207 | _response = [] 208 | # print(self._functions.keys()) 209 | res = self._collection.query(query_texts=[query], n_results=max_results) 210 | # print(f"Got results from sematic search: {res}") 211 | for idx, _ in enumerate(res['documents'][0]): 212 | print(f"Distance: {res['distances'][0][idx]} vs threshold: {similarity_threshold}") 213 | if res['distances'][0][idx] <= similarity_threshold: 214 | _search_res = SearchResult(name=res['metadatas'][0][idx]['name'], 215 | function=self._functions[res['metadatas'][0][idx]['hash']].func, 216 | wrapper=self._functions[res['metadatas'][0][idx]['hash']], 217 | distance=res['distances'][0][idx]) 218 | _response.append(_search_res) 219 | 220 | _response.sort(key=lambda x: x.distance) 221 | return _response 222 | 223 | @staticmethod 224 | def get_functions(functions: list[callable]) -> (list, dict): 225 | """ 226 | Get functions and function map. 227 | 228 | Note: Right now this is a naive implementation as it ignores modules and file paths. 229 | 230 | :param functions: List of functions 231 | :return: List of converted functions and function map 232 | """ 233 | 234 | _converted_functions = [func_to_json(_f) for _f in functions] 235 | _function_map = {f.__name__: f for f in functions} 236 | _function_index_map = {f.__name__: func_to_json(f) for f in functions} 237 | return _converted_functions, _function_map, _function_index_map 238 | -------------------------------------------------------------------------------- /func_ai/ui_demos/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amikos-tech/func-ai/6d86446e1010c280d98fce7d865283c2486cde5d/func_ai/ui_demos/__init__.py -------------------------------------------------------------------------------- /func_ai/ui_demos/api_qa.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | from dotenv import load_dotenv 3 | 4 | from func_ai.utils.llm_tools import OpenAIInterface 5 | from func_ai.utils.openapi_function_parser import OpenAPISpecOpenAIWrapper 6 | 7 | _chat_message = [] 8 | 9 | _spec = None 10 | 11 | 12 | def add_text(history, text): 13 | global _chat_message 14 | history = history + [(text, None)] 15 | _chat_message.append(_spec.api_qa(text, max_tokens=500)) 16 | return history, "" 17 | 18 | 19 | def add_file(history, file): 20 | history = history + [((file.name,), None)] 21 | return history 22 | 23 | 24 | def bot(history): 25 | global _chat_message 26 | # print(temp_callback_handler.get_output()) 27 | # response = temp_callback_handler.get_output()['output'] 28 | history[-1][1] = _chat_message[-1] 29 | return history 30 | 31 | 32 | with gr.Blocks() as demo: 33 | chatbot = gr.Chatbot([], elem_id="chatbot").style(height=1500) 34 | 35 | with gr.Row(): 36 | with gr.Column(scale=1): 37 | txt = gr.Textbox( 38 | show_label=False, 39 | placeholder="Enter text and press enter", 40 | ).style(container=False) 41 | txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then( 42 | bot, chatbot, chatbot 43 | ) 44 | 45 | if __name__ == "__main__": 46 | load_dotenv() 47 | _spec = OpenAPISpecOpenAIWrapper.from_url('http://petstore.swagger.io/v2/swagger.json', 48 | llm_interface=OpenAIInterface(), index=True) 49 | demo.launch() 50 | -------------------------------------------------------------------------------- /func_ai/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains utility functions for the func_ai package. 3 | """ 4 | from .llm_tools import OpenAISchema, OpenAIFunctionWrapper, OpenAIInterface 5 | -------------------------------------------------------------------------------- /func_ai/utils/api_qa_system_prompt.txt: -------------------------------------------------------------------------------- 1 | You are an API expert. Your goal is to assist the user in using an API that he/she is not familiar with. 2 | The user will provide commands which you will use to find out information about an API 3 | Then the user will ask you questions about the API and you will answer them. 4 | 5 | Rules: 6 | 1. You will only answer questions about the API 7 | 2. You will keep your output only to the essential information -------------------------------------------------------------------------------- /func_ai/utils/common.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | 3 | 4 | def arg_in_func(func, arg_name): 5 | # Get the signature of the function 6 | signature = inspect.signature(func) 7 | 8 | # Get the parameters of the function from the signature 9 | parameters = signature.parameters 10 | 11 | # Check if the arg_name is in the parameters 12 | return arg_name in parameters 13 | -------------------------------------------------------------------------------- /func_ai/utils/jinja_template_functions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Jinja2 Template Functions 3 | """ 4 | import json 5 | 6 | from jinja2 import Template, Environment, FileSystemLoader, meta, DictLoader 7 | 8 | from func_ai.utils.llm_tools import LLMInterface 9 | 10 | 11 | class JinjaOpenAITemplateFunction: 12 | 13 | def __init__(self, environment: Environment, llm_interface: LLMInterface, **kwargs): 14 | self._lm_interface = llm_interface 15 | self._environment = environment 16 | if "filters" in kwargs and isinstance(kwargs["filters"], dict): 17 | self._environment.filters = kwargs["filters"] 18 | # self._lm_interface.conversation_store.add_system_message("""You are a code helper. Your goal is to help the user to convert a jinja template into a list of parameters. 19 | # 20 | # User will provide a jinja template as input. 21 | # 22 | # Your task is to extract the jinja parameters and return them in a bullet list. 23 | # 24 | # Do not return anything other than the bullet list. 25 | # Return just the parameter names and nothing else. 26 | # Return only jinja2 template parameters and nothing else. 27 | # """) 28 | 29 | def render_from_prompt(self, prompt: str, template_name: str = "template"): 30 | """ 31 | This function renders the jinja template with the given prompt 32 | :param prompt: 33 | :return: 34 | """ 35 | self._environment.get_template(template_name) 36 | source = self._environment.loader.get_source(self._environment, template_name)[0] 37 | ast = self._environment.parse(source) 38 | 39 | # Get the undeclared variables 40 | _template_vars = meta.find_undeclared_variables(ast) 41 | _response = self._lm_interface.send(prompt=prompt, 42 | #TODO: Move this to OpenAIFunctionWrapper 43 | functions=[{"name": "render_template", 44 | "description": "Render a template with given parameters", 45 | "parameters": { 46 | "type": "object", 47 | "properties": {k: {"type": "string", 48 | "description": f"{k}"} for 49 | k 50 | in _template_vars}, 51 | "required": list( 52 | _template_vars)}}]) # TODO this is a bug should be moved inside parameters 53 | if "function_call" in _response: 54 | args = json.loads(_response["function_call"]["arguments"]) 55 | return self._environment.get_template(template_name).render(**args) 56 | 57 | @classmethod 58 | def from_string_template(cls, template_string: str, llm_interface: LLMInterface, 59 | **kwargs) -> "JinjaOpenAITemplateFunction": 60 | """ 61 | This function takes a jinja2 template string and returns a function that can be used to render the template 62 | :param template_string: Jinja2 template string 63 | :param llm_interface: LLMInterface 64 | :param kwargs: Additional arguments 65 | :return: 66 | """ 67 | return cls(environment=Environment( 68 | loader=DictLoader({'template': template_string}), autoescape=True), 69 | llm_interface=llm_interface, **kwargs) 70 | 71 | @classmethod 72 | def from_environment(cls, environment: Environment, llm_interface: LLMInterface, 73 | **kwargs) -> "JinjaOpenAITemplateFunction": 74 | """ 75 | This function takes a jinja2 template string and returns a function that can be used to render the template 76 | :param environment: Jinja2 Environment 77 | :param llm_interface: LLMInterface 78 | :param kwargs: Additional arguments 79 | :return: 80 | """ 81 | return cls(environment=environment, llm_interface=llm_interface, **kwargs) 82 | 83 | @classmethod 84 | def from_template_file(cls, template_file: str, llm_interface: LLMInterface, 85 | **kwargs) -> "JinjaOpenAITemplateFunction": 86 | """ 87 | This function takes a jinja2 template string and returns a function that can be used to render the template 88 | 89 | :param template_file: Path to the template file 90 | :param llm_interface: LLMInterface 91 | :param kwargs: Additional arguments 92 | :return: 93 | """ 94 | with open(template_file) as f: 95 | template_string = f.read() 96 | _instance = cls(environment=Environment( 97 | loader=DictLoader({'template': template_string, template_file: template_string}), autoescape=True), 98 | llm_interface=llm_interface, **kwargs) 99 | return _instance 100 | -------------------------------------------------------------------------------- /func_ai/utils/llm_tools.py: -------------------------------------------------------------------------------- 1 | """ 2 | A module for interacting with the Language Learning Model 3 | """ 4 | import functools 5 | import hashlib 6 | import json 7 | import logging 8 | import os 9 | import traceback 10 | from abc import abstractmethod 11 | from enum import Enum 12 | from typing import Any 13 | 14 | import openai 15 | from tenacity import retry, wait_fixed, stop_after_attempt 16 | 17 | from pydantic import BaseModel, Field 18 | 19 | from func_ai.utils.common import arg_in_func 20 | from func_ai.utils.py_function_parser import type_mapping, func_to_json 21 | 22 | logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG) 23 | logger = logging.getLogger(__name__) 24 | 25 | 26 | class ConversationStore(BaseModel): 27 | """ 28 | A class for storing conversations 29 | 30 | """ 31 | conversation: list = Field(default_factory=list, description="A dictionary of conversations") 32 | 33 | @abstractmethod 34 | def add_message(self, message: Any): 35 | """ 36 | Adds a message to the conversation 37 | :param message: 38 | :return: 39 | """ 40 | raise NotImplementedError 41 | 42 | def get_conversation(self) -> list: 43 | """ 44 | Returns the conversation 45 | :return: 46 | """ 47 | return self.conversation 48 | 49 | def get_last_message(self) -> Any: 50 | """ 51 | Returns the last message 52 | :return: 53 | """ 54 | return self.conversation[-1] 55 | 56 | def clear(self) -> None: 57 | """ 58 | Clears the conversation 59 | :return: 60 | """ 61 | self.conversation = [] 62 | 63 | 64 | class OpenAIConversationStore(ConversationStore): 65 | 66 | def add_message(self, message: Any): 67 | self.conversation.append(message) 68 | 69 | def add_system_message(self, message: str): 70 | # remove any existing system messages 71 | self.conversation = [x for x in self.conversation if x['role'] != 'system'] 72 | self.conversation.insert(0, {"role": "system", "content": message}) 73 | 74 | 75 | class LLMInterface(BaseModel): 76 | """ 77 | Interface for interacting with the Language Learning Model 78 | 79 | """ 80 | usage: dict = Field(default_factory=dict, description="A dictionary of the usage of the API") 81 | cost_mapping: dict = Field(default_factory=dict, description="A dictionary of the cost of the API") 82 | conversation_store: ConversationStore = Field(..., 83 | description="A class for storing conversations") 84 | 85 | def __init__(self, *args, **kwargs): 86 | super().__init__(*args, **kwargs) 87 | 88 | @abstractmethod 89 | def send(self, prompt: str, **kwargs) -> Any: 90 | """ 91 | Sends a prompt to the API 92 | 93 | :param prompt: 94 | :param kwargs: 95 | :return: 96 | """ 97 | raise NotImplementedError 98 | 99 | @abstractmethod 100 | def load_cost_mapping(self, file_path: str) -> None: 101 | """ 102 | Loads the cost mapping from a file 103 | 104 | :param file_path: 105 | :return: 106 | """ 107 | raise NotImplementedError 108 | 109 | @abstractmethod 110 | def update_cost(self, model, api_response) -> None: 111 | """ 112 | Updates the cost of the API 113 | 114 | :param model: The model used 115 | :param api_response: The response from the API 116 | :return: 117 | """ 118 | raise NotImplementedError 119 | 120 | def get_usage(self) -> dict: 121 | return self.usage 122 | 123 | def reset_usage(self) -> None: 124 | """ 125 | Resets the usage of the API 126 | 127 | :return: 128 | """ 129 | self.usage = dict() 130 | 131 | def store_usage(self, file_path: str) -> None: 132 | """ 133 | Appends the usage of the API to a file 134 | 135 | :param file_path: The path to the file 136 | :return: 137 | """ 138 | with open(file_path, "a") as f: 139 | json.dump(self.usage, f) 140 | 141 | def get_conversation(self) -> list[any]: 142 | """ 143 | Returns the conversation 144 | :return: 145 | """ 146 | return self.conversation_store.get_conversation() 147 | 148 | def add_conversation_message(self, message: any, update_llm: bool = False, **kwargs) -> "LLMInterface": 149 | """ 150 | Adds a message to the conversation 151 | :param message: The message to add 152 | :param update_llm: Whether to update the LLM with the new conversation 153 | :param kwargs: Parameters to pass to the API 154 | :return: 155 | """ 156 | self.conversation_store.add_message(message) 157 | if update_llm: 158 | self.update_llm_conversation(**kwargs) 159 | return self 160 | 161 | def update_llm_conversation(self, **kwargs) -> "LLMInterface": 162 | """ 163 | Sends the updated conversation to the LLM 164 | 165 | :param kwargs: Parameters to pass to the API 166 | :return: 167 | """ 168 | raise NotImplementedError 169 | 170 | 171 | class OpenAIInterface(LLMInterface): 172 | """ 173 | Interface for interacting with the OpenAI API 174 | """ 175 | max_tokens: int = Field(default=256, description="The maximum number of tokens to return") 176 | model: str = Field(default="gpt-3.5-turbo-0613", description="The model to use") 177 | temperature: float = Field(default=0.0, description="The temperature to use") 178 | conversation_store: OpenAIConversationStore = Field(default_factory=OpenAIConversationStore, 179 | description="A class for storing conversations") 180 | 181 | def __init__(self, *args, **kwargs): 182 | super().__init__(*args, max_tokens=kwargs.get("max_tokens", 256), 183 | model=kwargs.get("model", "gpt-3.5-turbo-0613"), 184 | temperature=kwargs.get("temperature", 0.0), 185 | conversation_store=OpenAIConversationStore()) 186 | openai.api_key = kwargs.get("api_key", os.getenv("OPENAI_API_KEY")) 187 | 188 | @retry(stop=stop_after_attempt(3), reraise=True, wait=wait_fixed(1), 189 | retry_error_callback=lambda x: logger.warning(x)) 190 | def update_llm_conversation(self, **kwargs) -> "OpenAIInterface": 191 | """ 192 | Sends the updated conversation to the LLM 193 | 194 | :param kwargs: Parameters to pass to the API 195 | :return: 196 | """ 197 | _functions = kwargs.get("functions", None) 198 | _model = kwargs.get("model", self.model) 199 | try: 200 | if _functions: 201 | response = openai.ChatCompletion.create( 202 | model=_model, 203 | messages=self.conversation_store.get_conversation(), 204 | functions=_functions, 205 | function_call="auto", 206 | temperature=kwargs.get("temperature", self.temperature), 207 | top_p=1.0, 208 | frequency_penalty=0.0, 209 | presence_penalty=0.0, 210 | max_tokens=kwargs.get("max_tokens", self.max_tokens), 211 | ) 212 | else: 213 | response = openai.ChatCompletion.create( 214 | model=_model, 215 | messages=self.conversation_store.get_conversation(), 216 | temperature=kwargs.get("temperature", self.temperature), 217 | top_p=1.0, 218 | frequency_penalty=0.0, 219 | presence_penalty=0.0, 220 | max_tokens=kwargs.get("max_tokens", self.max_tokens), 221 | ) 222 | # decode utf-8 bytes to unicode 223 | if "content" in response["choices"][0]["message"] and response["choices"][0]["message"]["content"]: 224 | response["choices"][0]["message"]["content"] = response["choices"][0]["message"]["content"].encode( 225 | 'utf-8').decode( 226 | "utf-8") 227 | if "function_call" in response["choices"][0]["message"] and response["choices"][0]["message"][ 228 | "function_call"]: 229 | response["choices"][0]["message"]["function_call"]["arguments"] = \ 230 | response["choices"][0]["message"]["function_call"]["arguments"].encode('utf-8').decode("utf-8") 231 | self.update_cost(_model, response) 232 | _response_message = response["choices"][0]["message"] 233 | self.conversation_store.add_message(_response_message) 234 | return self 235 | except Exception as e: 236 | logger.error(f"Error: {e}") 237 | traceback.print_exc() 238 | raise e 239 | 240 | def send(self, prompt: str, **kwargs) -> dict[str, any]: 241 | # print(type(self._conversation_store)) 242 | self.conversation_store.add_message({"role": "user", "content": prompt}) 243 | logger.debug(f"Prompt: {prompt}") 244 | response = self.update_llm_conversation(**kwargs) 245 | logger.debug(f"Response: {response}") 246 | return self.conversation_store.get_last_message() 247 | 248 | def load_cost_mapping(self, file_path: str) -> None: 249 | with open(file_path) as f: 250 | self.cost_mapping = json.load(f) 251 | 252 | def update_cost(self, model, api_response) -> None: 253 | if model not in self.usage: 254 | self.usage[model] = { 255 | "prompt_tokens": 0, 256 | "completion_tokens": 0, 257 | "total_tokens": 0 258 | } 259 | self.usage[model]["prompt_tokens"] += api_response['usage']['prompt_tokens'] 260 | self.usage[model]["completion_tokens"] += api_response['usage']['completion_tokens'] 261 | self.usage[model]["total_tokens"] += api_response['usage']['total_tokens'] 262 | 263 | 264 | class OpenAISchema(BaseModel): 265 | @classmethod 266 | @property 267 | def openai_schema(cls): 268 | schema = cls.schema() 269 | 270 | return { 271 | "name": schema["title"], 272 | "description": schema.get("description", f"{schema['title']} class"), 273 | "parameters": { 274 | "type": "object", 275 | "properties": {f"{name}": OpenAISchema.get_field_def(name, field_info) for name, field_info in 276 | cls.__fields__.items()}, 277 | }, 278 | "required": [name for name, field_info in cls.__fields__.items() if field_info.required] 279 | } 280 | 281 | @staticmethod 282 | def get_field_def(name, field_info) -> dict[str, str]: 283 | """ 284 | Returns a string representation of a field definition 285 | 286 | :param name: 287 | :param field_info: 288 | :return: 289 | """ 290 | 291 | default = f". Default value: {str(field_info.default)}" if not field_info.required else "" 292 | description = field_info.field_info.description 293 | if description: 294 | description = description.replace("\n", " ") 295 | else: 296 | description = "" 297 | _enum_values = "" 298 | if issubclass(field_info.outer_type_, Enum): 299 | _enum_values = ". Enum: " + ",".join([f"{_enum.name}" for _enum in field_info.outer_type_]) 300 | return { 301 | "description": f"{description}{default}{_enum_values}", 302 | "type": type_mapping(field_info.outer_type_) 303 | } 304 | 305 | @classmethod 306 | def from_response(cls, completion, throw_error=True): 307 | """ 308 | Returns an instance of the class from LLM completion response 309 | 310 | :param completion: completion response from LLM 311 | :param throw_error: whether to throw error if function call is not present 312 | :return: 313 | """ 314 | if throw_error: 315 | assert "function_call" in completion, "No function call detected" 316 | assert ( 317 | completion["function_call"]["name"] == cls.openai_schema["name"] 318 | ), "Function name does not match" 319 | 320 | function_call = completion["function_call"] 321 | arguments = json.loads(function_call["arguments"]) 322 | return cls(**arguments) 323 | 324 | @classmethod 325 | def from_prompt(cls, prompt: str, llm_interface: LLMInterface, throw_error=True): 326 | """ 327 | Returns an instance of the class from LLM prompt 328 | 329 | :param prompt: User prompt 330 | :param llm_interface: LLM interface 331 | :param throw_error: whether to throw error if function call is not present 332 | :return: 333 | """ 334 | completion = llm_interface.send(prompt, functions=[cls.openai_schema]) 335 | # print(llm_interface.get_conversation()) 336 | # TODO add crud interface functions here 337 | return cls.from_response(completion, throw_error) 338 | 339 | 340 | class OpenAIFunctionWrapper(object): 341 | """ 342 | A wrapper class for OpenAI functions. 343 | """ 344 | 345 | def __init__(self, llm_interface: LLMInterface, name: str, description: str, parameters: dict[str, any], 346 | func: callable, **kwargs) -> None: 347 | """ 348 | Initializes the OpenAI function wrapper 349 | :param llm_interface: 350 | :param name: 351 | :param description: 352 | :param parameters: 353 | :param func: 354 | :param kwargs: All these are treated as metadata that can be used by other tooling to augment the function 355 | """ 356 | self.llm_interface = llm_interface 357 | self._name = name 358 | self._description = description 359 | assert "required" in parameters, "Required field not present in parameters" 360 | self._parameters = parameters 361 | self.is_partial = False 362 | assert callable(func) or isinstance(func, functools.partial), "Function must be callable" 363 | if arg_in_func(func, "action"): 364 | self.func = functools.partial(func, action=self) 365 | else: 366 | self.func = func 367 | if isinstance(func, functools.partial): 368 | module_name = func.func.__module__ 369 | function_name = func.func.__qualname__ # use __qualname__ for nested functions 370 | self.is_partial = True 371 | else: 372 | module_name = func.__module__ 373 | function_name = func.__qualname__ # use __qualname__ for nested functions 374 | self._identifier = f"{module_name}.{function_name}" 375 | if "" in self._identifier: 376 | raise ValueError("Cannot use lambda functions") 377 | if "" in self._identifier: 378 | raise ValueError("Cannot use nested functions") 379 | self._hash = hashlib.sha1(f"{self._identifier}-{self._name}-{self._description}".encode()).hexdigest() 380 | self._metadata = kwargs 381 | self._llm_calls = [] 382 | 383 | @property 384 | def identifier(self) -> str: 385 | """ 386 | Returns the identifier of the function 387 | 388 | :return: identifier of the function 389 | """ 390 | return self._identifier 391 | 392 | @property 393 | def hash(self) -> str: 394 | """ 395 | Returns the identifier of the function 396 | 397 | :return: identifier of the function 398 | """ 399 | return self._hash 400 | 401 | @property 402 | def name(self) -> str: 403 | """ 404 | Returns the name of the function 405 | 406 | :return: name of the function 407 | """ 408 | return self._name 409 | 410 | @property 411 | def description(self) -> str: 412 | """ 413 | Returns the description of the function 414 | 415 | :return: description of the function 416 | """ 417 | return self._description 418 | 419 | @property 420 | def parameters(self) -> dict[str, any]: 421 | """ 422 | Returns the parameters of the function 423 | 424 | :return: parameters of the function 425 | """ 426 | return self._parameters 427 | 428 | def call(self, *args, **kwargs) -> dict[str, any]: 429 | """ 430 | Calls the function with the given arguments 431 | 432 | :param kwargs: arguments to be passed to the function 433 | :return: 434 | """ 435 | return self.func(*args, **kwargs) 436 | 437 | @property 438 | def metadata(self) -> dict[str, any]: 439 | """ 440 | Returns the metadata of the function 441 | 442 | :return: dict containing the metadata 443 | """ 444 | return self._metadata 445 | 446 | @property 447 | def metadata_dict(self) -> dict[str, str]: 448 | """ 449 | Returns the metadata of the function as a 450 | 451 | :return: 452 | """ 453 | return {k: str(v) for k, v in self._metadata.items()} 454 | 455 | @property 456 | def schema(self) -> dict[str, any]: 457 | """ 458 | Returns the schema of the function that can be passed to OpenAI API 459 | 460 | :return: dict containing the schema 461 | """ 462 | return { 463 | "name": self.name, 464 | "description": self.description, 465 | "parameters": self.parameters, 466 | } 467 | 468 | def __str__(self) -> str: 469 | return f"{self.schema}" 470 | 471 | def __repr__(self) -> str: 472 | return f"{self.schema}" 473 | 474 | def to_dict(self) -> dict[str, any]: 475 | """ 476 | Returns a dict representation of the function 477 | 478 | :return: dict containing the function 479 | """ 480 | return { 481 | "name": self.name, 482 | "description": self.description, 483 | "parameters": self.parameters, 484 | "metadata": self.metadata, 485 | } 486 | 487 | @property 488 | def last_call(self) -> dict[str, any]: 489 | """ 490 | Returns the last response from the function call 491 | 492 | :return: dict containing the last response 493 | """ 494 | return self._llm_calls[-1] 495 | 496 | @property 497 | def calls(self) -> list[dict[str, any]]: 498 | """ 499 | Returns the list of responses from the function call 500 | 501 | :return: list containing the responses 502 | """ 503 | return self._llm_calls 504 | 505 | def from_response(self, llm_response: dict[str, any]) -> "OpenAIFunctionWrapper": 506 | """ 507 | Returns an instance of the class from LLM completion response 508 | 509 | :param llm_response: completion response from LLM 510 | :return: The response from the function call 511 | """ 512 | if "function_call" not in llm_response: 513 | raise ValueError(f"No function call detected: {llm_response}") 514 | if llm_response["function_call"]["name"] != self.name: 515 | raise ValueError(f"Function name does not match: {llm_response}") 516 | try: 517 | _func_response = self.func(**json.loads(llm_response["function_call"]["arguments"])) 518 | except Exception as e: 519 | _func_response = f"Error: {repr(e)}" 520 | logger.warning(f"Failed to process function call: {llm_response}") 521 | traceback.print_exc() 522 | _function_call_llm_response = { 523 | "role": "function", 524 | "name": self.name, 525 | "content": f"{_func_response}", 526 | } 527 | self._llm_calls.append({ 528 | "function_call": llm_response["function_call"], 529 | "function_response": _function_call_llm_response, 530 | }) 531 | self.llm_interface.add_conversation_message(_function_call_llm_response) 532 | return self 533 | 534 | def from_response_raw(self, llm_response: dict[str, any]) -> any: 535 | """ 536 | Returns an instance of the class from LLM completion response 537 | 538 | :param llm_response: completion response from LLM 539 | :return: The response from the function call 540 | """ 541 | if "function_call" not in llm_response: 542 | raise ValueError(f"No function call detected: {llm_response}") 543 | if llm_response["function_call"]["name"] != self.name: 544 | raise ValueError(f"Function name does not match: {llm_response}") 545 | try: 546 | _func_response = self.func(**json.loads(llm_response["function_call"]["arguments"])) 547 | return _func_response 548 | except Exception as e: 549 | logger.warning(f"Failed to process function call: {llm_response}") 550 | raise e 551 | 552 | def from_prompt(self, prompt: str, **kwargs) -> "OpenAIFunctionWrapper": 553 | """ 554 | Returns an instance of the class from LLM prompt 555 | 556 | :param prompt: User prompt 557 | :param kwargs: arguments to be passed to the function 558 | :return: The response from the function call 559 | """ 560 | self.from_response(self.llm_interface.send(prompt, functions=[self.schema])) 561 | return self 562 | 563 | @classmethod 564 | def from_python_function(cls, func: callable, llm_interface: LLMInterface, **kwargs) -> "OpenAIFunctionWrapper": 565 | """ 566 | Returns an instance of the class from Python function 567 | 568 | :param func: Python function 569 | :param llm_interface: LLM interface 570 | :param kwargs: arguments to be passed to the function 571 | :return: 572 | """ 573 | _func = func_to_json(func) 574 | return cls(llm_interface=llm_interface, func=func, **_func, **kwargs) 575 | -------------------------------------------------------------------------------- /func_ai/utils/openapi_function_parser.py: -------------------------------------------------------------------------------- 1 | """ doc """ 2 | import functools 3 | import os 4 | 5 | import requests 6 | import json 7 | 8 | from func_ai.function_indexer import FunctionIndexer 9 | from func_ai.utils.llm_tools import OpenAIInterface, OpenAIFunctionWrapper 10 | from func_ai.utils.py_function_parser import func_to_json 11 | 12 | curdir = os.path.dirname(os.path.abspath(__file__)) 13 | 14 | 15 | def get_spec_from_url(url): 16 | response = requests.get(url) 17 | response.raise_for_status() # Raise an exception in case of a failure 18 | return response.json() 19 | 20 | 21 | def _coerce_type(value: any, value_type: type) -> any: 22 | """ 23 | Coerce the value to the given type. If the type is not supported, return the value as is. 24 | 25 | Note: this is not great, not even good but it is a start. 26 | 27 | :param value: The value to be coerced 28 | :param value_type: The type to coerce to 29 | :return: 30 | """ 31 | if value_type == "integer": 32 | return int(value) 33 | if value_type == "number": 34 | return float(value) 35 | if value_type == "boolean": 36 | return bool(value) 37 | return value 38 | 39 | 40 | def get_operations_from_path_item(path_item): 41 | """ 42 | Get the operations from a path item 43 | 44 | :param path_item: 45 | :return: 46 | """ 47 | http_methods = ["get", "put", "post", "delete", "options", "head", "patch", "trace"] 48 | operations = [{"method": op, "spec": op_spec} for op, op_spec in path_item.items() if op in http_methods] 49 | return operations 50 | 51 | 52 | def get_operation_details(operation, path): 53 | """ 54 | Get the details of an operation 55 | 56 | :param operation: 57 | :param path: 58 | :return: 59 | """ 60 | return { 61 | "name": operation['spec'].get('operationId'), 62 | "description": operation['spec'].get('description'), 63 | "parameters": operation['spec'].get('parameters'), 64 | "summary": operation['spec'].get('summary', ''), 65 | "method": operation['method'], 66 | "consumes": operation['spec'].get('consumes', []), 67 | "produces": operation['spec'].get('produces', []), 68 | "responses": operation['spec'].get('responses', {}), 69 | "security": operation['spec'].get('security', []), 70 | "tags": operation['spec'].get('tags', []), 71 | "path": path, 72 | } 73 | 74 | 75 | def parse_parameters(parameters, _defs): 76 | """ 77 | Parse the parameters of an operation and return a dict that can be used to generate a function signature. 78 | 79 | :param parameters: 80 | :param _defs: 81 | :return: 82 | """ 83 | params_dict = {"type": "object", "properties": {}, "required": []} 84 | 85 | for param in parameters: 86 | name = param['name'] 87 | description = param.get('description', '') # TODO generate a description if not present 88 | if 'schema' in param and '$ref' in param['schema']: 89 | _param_model = param.get('schema').get('$ref').replace('#/definitions/', '') 90 | param_model = _defs.get(_param_model) 91 | for p, pd in param_model['properties'].items(): 92 | if '$ref' in pd: 93 | param_model['properties'][p] = _defs.get(pd['$ref'].replace('#/definitions/', '')) 94 | if 'items' in pd and '$ref' in pd['items']: 95 | param_model['properties'][p]['items'] = _defs.get(pd['items']['$ref'].replace('#/definitions/', '')) 96 | description += ".\nJSON Model: " + json.dumps(param_model) 97 | param_type = param['schema'].get('type', 'string') if 'schema' in param else 'string' 98 | # TODO if parameter is schema then we need to go down the rabbit hole and get the schema 99 | params_dict["properties"][name] = { 100 | "description": description, 101 | "type": param_type, 102 | "in": param['in'] if 'in' in param else "query" 103 | } 104 | 105 | if param.get('required', False): 106 | params_dict["required"].append(name) 107 | 108 | return params_dict 109 | 110 | 111 | def get_func_details(operation, _defs): 112 | """ 113 | Generate the function details from the operation details 114 | 115 | :param operation: 116 | :param _defs: 117 | :return: 118 | """ 119 | name = operation.get('name') 120 | description = operation.get('summary', '') + operation.get( 121 | 'description') # TODO if description not present generate one with AI 122 | parameters = operation.get('parameters', []) 123 | 124 | # Parse the parameters 125 | parameters = parse_parameters(parameters, _defs) 126 | 127 | return { 128 | "name": name, 129 | "description": description, 130 | "parameters": parameters 131 | } 132 | 133 | 134 | def parse_spec(spec) -> dict[str, dict]: 135 | """ 136 | Parse the OpenAPI specification and return a dictionary of functions that can be called 137 | 138 | :param spec: 139 | :return: 140 | """ 141 | _defs = spec.get('definitions') 142 | paths = spec['paths'] 143 | _funcs = {} 144 | for path, path_item in paths.items(): 145 | operations = get_operations_from_path_item(path_item) 146 | for operation in operations: 147 | details = get_operation_details(operation, path) 148 | _funcs[details['name']] = {"details": details, "func": get_func_details(details, _defs), 149 | "operation_raw": operation} 150 | return _funcs 151 | 152 | 153 | def _api_call(action: OpenAIFunctionWrapper, **kwargs): 154 | """ 155 | Call the API 156 | 157 | :param action: 158 | :param kwargs: 159 | :return: 160 | """ 161 | _f_name = action.name 162 | _body_params = action.metadata["body_params"] 163 | _query_params = action.metadata["query_params"] 164 | _path_params = action.metadata["path_params"] 165 | _path = action.metadata["path"] 166 | _method = action.metadata["method"] 167 | _url_spec = action.metadata["url_spec"]["template_url"].format(**{**action.metadata["url_spec"], "path": _path}) 168 | if len(_path_params): 169 | _url_spec = _url_spec.format( 170 | **{param['name']: _coerce_type(kwargs.get(param['name']), param['type']) for param in _path_params}) 171 | if len(_query_params): 172 | _url_spec += "?" + "&".join( 173 | [f"{param['name']}={kwargs.get(param['name'])}" for param in _query_params]) 174 | _body_param_name = None if len(_body_params) == 0 else _body_params[0]['name'] 175 | api_resp = None 176 | # TODO headers should comply with the spec 177 | if _method == "post": 178 | api_resp = requests.post(_url_spec, json=json.loads(kwargs.get(_body_param_name)), 179 | headers={'Content-Type': 'application/json', 'Accept': 'application/json'}) 180 | elif _method == "put": 181 | api_resp = requests.put(_url_spec, json=json.loads(kwargs.get(_body_param_name)), 182 | headers={'Content-Type': 'application/json', 'Accept': 'application/json'}) 183 | elif _method == "patch": 184 | api_resp = requests.patch(_url_spec, json=json.loads(kwargs.get(_body_param_name)), 185 | headers={'Content-Type': 'application/json', 'Accept': 'application/json'}) 186 | elif _method == "get": 187 | api_resp = requests.get(_url_spec, 188 | headers={'Content-Type': 'application/json', 'Accept': 'application/json'}) 189 | else: 190 | raise ValueError(f"Method {_method} not supported") 191 | return { 192 | "status_code": api_resp.status_code, 193 | "response": api_resp.text 194 | } 195 | 196 | 197 | def _read_system_prompt_file(path: str) -> str: 198 | """ 199 | Read the system prompt file 200 | 201 | :param path: 202 | :return: 203 | """ 204 | with open(path, "r") as f: 205 | return f.read() 206 | 207 | 208 | ## QA Utility Functions 209 | def list_available_functions(spec_wrapper: "OpenAPISpecOpenAIWrapper", **kwargs) -> str: 210 | """ 211 | List the available API operations 212 | a 213 | :param spec_wrapper: The OpenAPI Spec Wrapper 214 | :return: 215 | """ 216 | return "Available functions:\n" + "\n".join([f"- {k}: {v}" for k, v in spec_wrapper.operations_summary.items()]) 217 | 218 | 219 | def get_operation_function_info(operation_name: str, spec_wrapper: "OpenAPISpecOpenAIWrapper", **kwargs) -> dict[ 220 | str, any]: 221 | """a 222 | Returns the full information about an operation including all parameters 223 | 224 | :param operation_name: The name of the operation 225 | :param spec_wrapper: The OpenAPI Spec Wrapper 226 | :return: 227 | """ 228 | 229 | return spec_wrapper.get_operation(operation_name).to_dict() 230 | 231 | 232 | class OpenAPISpecOpenAIWrapper(object): 233 | """ 234 | Class that wrap around the OpenAPI specification and provides a set of methods to interact with it using OpenAI 235 | function calling. 236 | """ 237 | 238 | def __init__(self, spec: dict[any, any], 239 | llm_interface: OpenAIInterface, 240 | source=None, 241 | index: bool = False, **kwargs) -> None: 242 | """ 243 | Initialize the OpenAPI wrapper 244 | Note: When index=True the LLM system message will be overwritten with the system message from the spec QA system prompt file 245 | 246 | :param spec: The OpenAPI specification 247 | :param llm_interface: The LLM Interface 248 | :param source: The source of the spec 249 | :param index: Whether to index the spec (default: False) - this will put all operations in a vector store and make them searchable 250 | :param kwargs: Additional arguments 251 | """ 252 | self.spec = spec 253 | self.llm_interface = llm_interface 254 | self.source = source 255 | _funcs = parse_spec(spec) 256 | self._function_calls = [] 257 | self._url_spec = { 258 | "host": spec["host"], 259 | "base_path": spec["basePath"], 260 | "schemes": spec["schemes"], 261 | "default_scheme": "https" if "https" in spec["schemes"] else "http", 262 | "template_url": "{default_scheme}://{host}{base_path}{path}", 263 | } 264 | self._funcs = { 265 | fn: OpenAIFunctionWrapper(llm_interface=self.llm_interface, 266 | name=fn, 267 | description=f['func']['description'], 268 | parameters=f['func']['parameters'], 269 | func=functools.partial( 270 | _api_call, ), 271 | # metadata 272 | **{ 273 | "url_spec": self._url_spec, 274 | "body_params": [param for param in f['details']['parameters'] 275 | if 276 | param['in'] == 'body'], 277 | "path_params": [param for param in f['details']['parameters'] 278 | if 279 | param['in'] == 'path'], 280 | "query_params": [param for param in f['details']['parameters'] 281 | if 282 | param['in'] == 'query'], 283 | "method": f['details']['method'], 284 | "path": f['details']['path'], 285 | **kwargs 286 | }) for 287 | fn, f in _funcs.items()} 288 | if index: 289 | self.indexer = FunctionIndexer(llm_interface=self.llm_interface) 290 | # TODO: here we're resetting the index every time we create a new wrapper. In the future we should be able to index the same spec multiple times 291 | self.indexer.reset_function_index() 292 | self.indexer.index_wrapper_functions([f for k, f in self.operations.items()]) 293 | self.llm_interface.conversation_store.add_system_message( 294 | _read_system_prompt_file(os.path.join(curdir, 'api_qa_system_prompt.txt'))) 295 | 296 | @property 297 | def operations(self) -> dict[str, any]: 298 | """ 299 | Return the dictionary of functions that can be called 300 | 301 | :return: 302 | """ 303 | return self._funcs 304 | 305 | @property 306 | def operations_summary(self) -> dict[str, str]: 307 | """ 308 | Return a summary of the operations in the form of a dictionary 309 | where the key is the operation name and the value is the description 310 | 311 | :return: A dictionary of operations 312 | """ 313 | return {fn: f.description for fn, f in self._funcs.items()} 314 | 315 | def to_dict(self) -> dict[str, any]: 316 | """ 317 | Return the dictionary of functions that can be called 318 | 319 | :return: 320 | """ 321 | return {fn: f.to_dict() for fn, f in self._funcs.items()} 322 | 323 | def get_operation(self, name: str) -> OpenAIFunctionWrapper: 324 | """ 325 | Get a function by name 326 | 327 | :param name: The name of the function 328 | :return: 329 | """ 330 | return self[name] 331 | 332 | @classmethod 333 | def from_url(cls, url: str, llm_interface: OpenAIInterface, **kwargs) -> "OpenAPISpecOpenAIWrapper": 334 | """ 335 | Create an instance of the class from a URL 336 | 337 | :param url: The URL to load 338 | :param llm_interface: The interface to use 339 | :param kwargs: Additional arguments 340 | :return: 341 | """ 342 | spec = get_spec_from_url(url) 343 | return cls(spec, llm_interface=llm_interface, source={"url": url, "type": "url"}, **kwargs) 344 | 345 | @classmethod 346 | def from_file(cls, file: str, llm_interface: OpenAIInterface, **kwargs) -> "OpenAPISpecOpenAIWrapper": 347 | """ 348 | Create an instance of the class from a file 349 | 350 | :param file: The file to load 351 | :param llm_interface: The interface to use 352 | :return: 353 | """ 354 | with open(file) as f: 355 | spec = json.load(f) 356 | 357 | return cls(spec, llm_interface=llm_interface, source={"url": file, "type": "file"}, **kwargs) 358 | 359 | def __getitem__(self, item) -> OpenAIFunctionWrapper: 360 | """ 361 | Get a function from the wrapper 362 | :param item: The name of the function 363 | :return: The function wrapper 364 | """ 365 | return self._funcs[item] 366 | 367 | def from_prompt(self, prompt: str, operation_name: str, **kwargs) -> "OpenAPISpecOpenAIWrapper": 368 | """ 369 | Prompt the user for a function to call and the parameters to use 370 | :param prompt: The prompt to use 371 | :param operation_name: The name of the operation to use 372 | :param kwargs: Additional arguments to pass to the prompt 373 | :return: The result of the function call 374 | """ 375 | _fwrap = self._funcs[operation_name].from_prompt(prompt=prompt, **kwargs) 376 | self._function_calls.append(_fwrap.last_call) 377 | return self 378 | 379 | @property 380 | def last_call(self) -> dict[str, any]: 381 | """ 382 | Return the last function call 383 | :return: 384 | """ 385 | return self._function_calls[-1] 386 | 387 | @property 388 | def calls(self) -> list[dict[str, any]]: 389 | """ 390 | Return the list of function calls 391 | :return: 392 | """ 393 | return self._function_calls 394 | 395 | def api_qa(self, prompt: str, **kwargs) -> any: 396 | """ 397 | Ask a question about the API 398 | :param prompt: The prompt to use 399 | :param kwargs: Additional arguments to pass to the prompt 400 | :return: The result of the function call 401 | """ 402 | # What operations does this API support? 403 | _list_fun = OpenAIFunctionWrapper.from_python_function( 404 | func=functools.partial(list_available_functions, spec_wrapper=self), 405 | llm_interface=self.llm_interface, ) 406 | # What endpoints are available in this API? 407 | 408 | # What are the mandatory parameters for operation X? 409 | _fun_info = OpenAIFunctionWrapper.from_python_function( 410 | func=functools.partial(get_operation_function_info, spec_wrapper=self), 411 | llm_interface=self.llm_interface, ) 412 | # What are the optional parameters for operation X? 413 | 414 | _f_map = {_list_fun.name: _list_fun, 415 | _fun_info.name: _fun_info, } 416 | _llm_resp = self.llm_interface.send(prompt=prompt, functions=[k.schema for _, k in _f_map.items()], **kwargs) 417 | # print(f"LLM Response: {_llm_resp}") 418 | if "function_call" in _llm_resp: 419 | _fcall = _f_map[_llm_resp["function_call"]["name"]].from_response(llm_response=_llm_resp).last_call[ 420 | 'function_response'] 421 | # print(f"Function call: {_fcall}") 422 | self.llm_interface.add_conversation_message(_fcall, update_llm=True, **kwargs) 423 | return self.llm_interface.conversation_store.get_last_message()['content'] 424 | # _llm_secondary_resp = self.llm_interface.send(prompt=prompt, 425 | # functions=[k.schema for _, k in _f_map.items()], 426 | # **kwargs) 427 | else: 428 | return _llm_resp['content'] 429 | -------------------------------------------------------------------------------- /func_ai/utils/py_function_parser.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import inspect 3 | import re 4 | 5 | 6 | def type_mapping(dtype: type) -> str: 7 | """ 8 | Map python types to json schema types 9 | 10 | :param dtype: 11 | :return: 12 | """ 13 | if dtype == float: 14 | return "number" 15 | elif dtype == int: 16 | return "integer" 17 | elif dtype == str: 18 | return "string" 19 | else: 20 | return "string" 21 | 22 | 23 | def extract_params(doc_str: str) -> dict[str, str]: 24 | """ 25 | Parse the docstring to get the descriptions for each parameter in dict format 26 | 27 | :param doc_str: 28 | :return: 29 | """ 30 | # split doc string by newline, skipping empty lines 31 | params_str = [line for line in doc_str.split("\n") if line.strip()] 32 | params = {} 33 | for line in params_str: 34 | # we only look at lines starting with ':param' 35 | if line.strip().startswith(':param'): 36 | param_match = re.findall(r'(?<=:param )\w+', line) 37 | if param_match: 38 | param_name = param_match[0] 39 | desc_match = line.replace(f":param {param_name}:", "").strip() 40 | # if there is a description, store it 41 | if desc_match: 42 | params[param_name] = desc_match 43 | return params 44 | 45 | 46 | def extract_return_description(docstring): 47 | """ 48 | Extract the return description from a Python docstring. 49 | 50 | :param docstring: The docstring to extract the return description from. 51 | :return: The return description, or empty string if no return description is found. 52 | """ 53 | match = re.search(r':return: (.*)', docstring) 54 | if match: 55 | return " " + match.group(1) 56 | else: 57 | return "" 58 | 59 | 60 | def func_to_json(func) -> dict[str, any]: 61 | """ 62 | Convert a function to a json schema 63 | 64 | :param func: Python function 65 | :return: 66 | """ 67 | # Check if the function is a functools.partial 68 | if isinstance(func, functools.partial) or isinstance(func, functools.partialmethod): 69 | fixed_args = func.keywords 70 | _func = func.func 71 | if isinstance(func, functools.partial) and (fixed_args is None or fixed_args == {}): 72 | fixed_args = dict(zip(func.func.__code__.co_varnames, func.args)) 73 | else: 74 | fixed_args = {} 75 | _func = func 76 | 77 | # first we get function name 78 | func_name = _func.__name__ 79 | # then we get the function annotations 80 | argspec = inspect.getfullargspec(_func) 81 | # get the function docstring 82 | func_doc = inspect.getdoc(_func) 83 | # parse the docstring to get the description 84 | func_description = ''.join([line for line in func_doc.split("\n") if not line.strip().startswith(':')]) 85 | func_description += extract_return_description(func_doc) 86 | # parse the docstring to get the descriptions for each parameter in dict format 87 | param_details = extract_params(func_doc) if func_doc else {} 88 | # attach parameter types to params and exclude fixed args 89 | # get params 90 | params = {} 91 | for param_name in argspec.args: 92 | if param_name not in fixed_args.keys(): 93 | params[param_name] = { 94 | "description": param_details.get(param_name) or "", 95 | "type": type_mapping(argspec.annotations.get(param_name, type(None))) 96 | } 97 | # calculate required parameters excluding fixed args 98 | # _required = [arg for arg in argspec.args if arg not in fixed_args] 99 | _required = [i for i in argspec.args if i not in fixed_args.keys()] 100 | if inspect.getfullargspec(_func).defaults: 101 | _required = [argspec.args[i] for i, a in enumerate(argspec.args) if 102 | argspec.args[i] not in inspect.getfullargspec(_func).defaults and argspec.args[ 103 | i] not in fixed_args.keys()] 104 | # then return everything in dict 105 | # TODO: Move this to OpenAIFunctionWrapper 106 | return { 107 | "name": func_name, 108 | "description": func_description, 109 | "parameters": { 110 | "type": "object", 111 | "properties": params, 112 | "required": _required 113 | }, 114 | 115 | } 116 | -------------------------------------------------------------------------------- /func_ai/workflow_creator.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from func_ai.function_indexer import FunctionIndexer 4 | 5 | curdir = os.path.dirname(os.path.realpath(__file__)) 6 | 7 | 8 | def create_workflow(steps: list): 9 | """ 10 | Суздаване на workflow от стъпки 11 | 12 | :param steps: Лист от стъпки 13 | :return: 14 | """ 15 | print(steps) 16 | # TODO create a system prompt that includes the steps and instructions to ask the user to provide any missing information 17 | 18 | return "ok" 19 | 20 | 21 | def random_function(query: str) -> list: 22 | """ 23 | Връща случйно число от 1 до 3 24 | 25 | :param query: Заявка 26 | :return: 27 | """ 28 | return "ok" 29 | 30 | 31 | def main(query): 32 | _indexer = FunctionIndexer(f"{curdir}/function_indexer") 33 | _indexer.reset_function_index() 34 | _indexer.index_functions([]) 35 | 36 | _catalog_str = "\n".join([f"- {fn}:{fd}" for fn, fd in _indexer.get_ai_fn_abbr_map().items()]) 37 | # _system_prompt = f""" 38 | # Ти си помощник за планиране на действия. Твоята цел е от зададен системен каталог с операции да създадеш план за изпълнение на целите на потребителя. 39 | # 40 | # Системен Каталог с операции: 41 | # {_catalog_str} 42 | # 43 | # Пример: 44 | # - function_search_web_site: намери лекарство в даден сайт 45 | # - function_get_info: вземи информация за лекарство 46 | # - function_to_index_info: Индексирай информация за лекарство 47 | # 48 | # 49 | # Правила: 50 | # - Ще генерираш план за изпълнение на целите на потребителя под формата на лист с операции. 51 | # - Ще използваш само функции от системния каталог. 52 | # 53 | # 54 | # Рестрикции: 55 | # - Не използвай функции, които не са в системния каталог. 56 | # - Не генерирай нищо друго освен лист с операции. 57 | # """ 58 | _system_prompt = f""" 59 | Роля: Ти си експерт в планирането на действия, който използва системен каталог с операции за създаване на план за изпълнение на целите на потребителя. 60 | 61 | Задача: Използвай зададения системен каталог с операции, за да създадеш детайлен план за изпълнение на целите на потребителя. Този план трябва да бъде представен като списък с операции, които трябва да бъдат изпълнени. 62 | 63 | Контекст: Системният каталог с операции включва следните функции: 64 | {_catalog_str} 65 | 66 | Примери за функции от системния каталог: 67 | - function_search_web_site намери лекарство в даден сайт 68 | - function_get_info: вземи информация за лекарство 69 | - function_to_index_info: Индексирай информация за лекарство 70 | 71 | Правила: 72 | - Генерирай план за изпълнение на целите на потребителя под формата на списък с операции. 73 | - Използвай само функции от системния каталог. 74 | - Използвай само имената на фукнции от системния каталог. 75 | 76 | Ограничения: 77 | - Не използвай функции, които не са в системния каталог. 78 | - Не генерирай нищо друго освен списък с операции без допълнителни коментари или инфорамция. 79 | - Не добавяй параметри на фунциите от системния каталог. 80 | 81 | Когато свършиш с планирането спри и не генерирай повече съобщения. 82 | """ 83 | 84 | _query = f""" 85 | {query} 86 | """ 87 | 88 | _messages = [{ 89 | "role": "system", 90 | "content": _system_prompt 91 | }, 92 | { 93 | "role": "user", 94 | "content": _query 95 | }] 96 | _ai_fun_map, _fns_map, _fns_index_map = FunctionIndexer.get_functions([create_workflow]) 97 | _messages_1 = run_function_loop(_messages, None, None) 98 | print("Done") 99 | # The bellow message is useful to reduce the output token count and prevent the model from generating unneccecary messages 100 | 101 | _new_system_prompt = f""" 102 | Създай воркфлоу от следните стъпки: 103 | {_messages_1[-1]["content"]} 104 | """ 105 | _new_messages = [{ 106 | "role": "user", 107 | "content": _new_system_prompt 108 | }, ] 109 | _new_messages.append( 110 | {"role": "assistant", 111 | "content": "After executing the given function I will stop and will not generate any more messages"}) 112 | _messages_2 = run_function_loop(_new_messages, _ai_fun_map, _fns_map) 113 | print(_messages_2) 114 | calculate_costs() 115 | reset_usage() 116 | 117 | 118 | if __name__ == '__main__': 119 | main("Искам да добавя Панадол Бебе в базата данни.") 120 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: AI Functional Catalog 2 | extra: 3 | homepage: https://amikos.tech 4 | theme: 5 | name: material 6 | icon: 7 | logo: material/database-cog-outline 8 | features: 9 | - navigation.instant 10 | - navigation.tracking 11 | - navigation.tabs 12 | - navigation.tabs.sticky 13 | - toc.follow 14 | - navigation.top 15 | - search.suggest 16 | - search.highlight 17 | - search.share 18 | markdown_extensions: 19 | - admonition 20 | - pymdownx.details 21 | - pymdownx.superfences: 22 | custom_fences: 23 | - name: mermaid 24 | class: mermaid 25 | format: !!python/name:pymdownx.superfences.fence_code_format 26 | - pymdownx.highlight: 27 | anchor_linenums: true 28 | line_spans: __span 29 | pygments_lang_class: true 30 | - pymdownx.inlinehilite 31 | - pymdownx.snippets 32 | - attr_list 33 | - pymdownx.emoji: 34 | emoji_index: !!python/name:materialx.emoji.twemoji 35 | emoji_generator: !!python/name:materialx.emoji.to_svg 36 | repo_name: amikos-tech/func-ai 37 | plugins: 38 | - search: 39 | lang: en -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "func-ai" 3 | version = "0.0.14" 4 | description = "AI Functional Catalog - OpenAI functions on steriods" 5 | authors = ["Trayan Azarov "] 6 | readme = "README.md" 7 | license = "MIT" 8 | packages = [{ include = "func_ai" }] 9 | 10 | [tool.poetry.urls] 11 | "Bug Tracker" = "https://github.com/amikos-tech/func-ai/issues" 12 | "Homepage" = "https://github.com/amikos-tech/func-ai/" 13 | "Source" = "https://github.com/amikos-tech/func-ai/" 14 | 15 | [tool.poetry.dependencies] 16 | python = "^3.10" 17 | openai = "^0.27.8" 18 | requests = "^2.31.0" 19 | chromadb = "^0.4.2" 20 | pyyaml = "^6.0" 21 | jsonschema = "^4.18.0" 22 | python-dotenv = "^1.0.0" 23 | tenacity = "^8.2.2" 24 | jinja2 = "^3.1.2" 25 | gradio = "^3.36.1" 26 | transformers = "^4.30.2" 27 | torch = "^2.0.1" 28 | 29 | 30 | [tool.poetry.group.test.dependencies] 31 | pytest = "^7.4.0" 32 | pytest-env = "^0.8.2" 33 | pytest-html = "^3.2.0" 34 | pytest-metadata = "^3.0.0" 35 | jupyter = "^1.0.0" 36 | 37 | [build-system] 38 | requires = ["poetry-core"] 39 | build-backend = "poetry.core.masonry.api" 40 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | # pytest.ini 2 | [pytest] 3 | addopts = -rP --junitxml=report.xml --html=report.html --self-contained-html 4 | testpaths = 5 | tests 6 | bdd_features_base_dir = tests/features/ 7 | log_format = %(asctime)s %(levelname)s %(message)s 8 | log_date_format = %Y-%m-%d %H:%M:%S 9 | log_cli=true 10 | log_cli_level = DEBUG 11 | env = 12 | KUBECONFIG=./backend/config/config-ext -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amikos-tech/func-ai/6d86446e1010c280d98fce7d865283c2486cde5d/tests/__init__.py -------------------------------------------------------------------------------- /tests/jupyter/doc-example-testing.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "source": [ 6 | "# OpenAI Function Wrapper\n", 7 | "\n", 8 | "Here we demonstrate how to use `OpenAIfunctionWrapper` to wrap a python function and use LLM to call the function with parameters.\n" 9 | ], 10 | "metadata": { 11 | "collapsed": false 12 | } 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "outputs": [ 18 | { 19 | "name": "stderr", 20 | "output_type": "stream", 21 | "text": [ 22 | "2023-07-09 08:41:28,023 - func_ai.utils.llm_tools - DEBUG - Prompt: Say hello to John\n", 23 | "2023-07-09 08:41:28,027 - openai - DEBUG - message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions\n", 24 | "2023-07-09 08:41:28,028 - openai - DEBUG - api_version=None data='{\"model\": \"gpt-3.5-turbo-0613\", \"messages\": [{\"role\": \"user\", \"content\": \"Say hello to John\"}], \"functions\": [{\"name\": \"say_hello\", \"description\": \"This is a function that says hello to the user\", \"parameters\": {\"type\": \"object\", \"properties\": {\"name\": {\"description\": \"Name of the person to say hello to\", \"type\": \"string\"}}, \"required\": [\"name\"]}}], \"function_call\": \"auto\", \"temperature\": 0.0, \"top_p\": 1.0, \"frequency_penalty\": 0.0, \"presence_penalty\": 0.0, \"max_tokens\": 256}' message='Post details'\n", 25 | "2023-07-09 08:41:29,161 - urllib3.connectionpool - DEBUG - https://api.openai.com:443 \"POST /v1/chat/completions HTTP/1.1\" 200 None\n", 26 | "2023-07-09 08:41:29,178 - openai - DEBUG - message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=822 request_id=390f645eb76e0de6b991d41d639c09b3 response_code=200\n", 27 | "2023-07-09 08:41:29,180 - func_ai.utils.llm_tools - DEBUG - Response: usage={'gpt-3.5-turbo-0613': {'prompt_tokens': 64, 'completion_tokens': 15, 'total_tokens': 79}} cost_mapping={} conversation_store=OpenAIConversationStore(conversation=[{'role': 'user', 'content': 'Say hello to John'}, JSON: {\n", 28 | " \"role\": \"assistant\",\n", 29 | " \"content\": null,\n", 30 | " \"function_call\": {\n", 31 | " \"name\": \"say_hello\",\n", 32 | " \"arguments\": \"{\\n \\\"name\\\": \\\"John\\\"\\n}\"\n", 33 | " }\n", 34 | "}]) max_tokens=256 model='gpt-3.5-turbo-0613' temperature=0.0\n" 35 | ] 36 | }, 37 | { 38 | "name": "stdout", 39 | "output_type": "stream", 40 | "text": [ 41 | "Hello John!\n" 42 | ] 43 | }, 44 | { 45 | "data": { 46 | "text/plain": "{'name': 'say_hello', 'description': 'This is a function that says hello to the user', 'parameters': {'type': 'object', 'properties': {'name': {'description': 'Name of the person to say hello to', 'type': 'string'}}, 'required': ['name']}}" 47 | }, 48 | "execution_count": 2, 49 | "metadata": {}, 50 | "output_type": "execute_result" 51 | } 52 | ], 53 | "source": [ 54 | "from dotenv import load_dotenv\n", 55 | "from func_ai.utils import OpenAIFunctionWrapper, OpenAIInterface\n", 56 | "\n", 57 | "load_dotenv()\n", 58 | "\n", 59 | "\n", 60 | "def say_hello(name: str):\n", 61 | " \"\"\"\n", 62 | " This is a function that says hello to the user\n", 63 | "\n", 64 | " :param name: Name of the person to say hello to\n", 65 | " :return:\n", 66 | " \"\"\"\n", 67 | " print(f\"Hello {name}!\")\n", 68 | "\n", 69 | "\n", 70 | "_func_wrap = OpenAIFunctionWrapper.from_python_function(say_hello, OpenAIInterface())\n", 71 | "\n", 72 | "_func_wrap.from_prompt(\"Say hello to John\")" 73 | ], 74 | "metadata": { 75 | "collapsed": false, 76 | "ExecuteTime": { 77 | "end_time": "2023-07-09T05:41:29.187456Z", 78 | "start_time": "2023-07-09T05:41:28.021639Z" 79 | } 80 | } 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "source": [ 85 | "We can also also use partials to fix sensitive or non-compliant parameters." 86 | ], 87 | "metadata": { 88 | "collapsed": false 89 | } 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 3, 94 | "outputs": [ 95 | { 96 | "name": "stderr", 97 | "output_type": "stream", 98 | "text": [ 99 | "2023-07-09 09:19:00,850 - func_ai.utils.llm_tools - DEBUG - Prompt: Say hello to John\n", 100 | "2023-07-09 09:19:00,853 - openai - DEBUG - message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions\n", 101 | "2023-07-09 09:19:00,853 - openai - DEBUG - api_version=None data='{\"model\": \"gpt-3.5-turbo-0613\", \"messages\": [{\"role\": \"user\", \"content\": \"Say hello to John\"}], \"functions\": [{\"name\": \"say_hello\", \"description\": \"This is a function that says hello to the user\", \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []}}], \"function_call\": \"auto\", \"temperature\": 0.0, \"top_p\": 1.0, \"frequency_penalty\": 0.0, \"presence_penalty\": 0.0, \"max_tokens\": 256}' message='Post details'\n", 102 | "2023-07-09 09:19:00,859 - urllib3.util.retry - DEBUG - Converted retries value: 2 -> Retry(total=2, connect=None, read=None, redirect=None, status=None)\n", 103 | "2023-07-09 09:19:00,889 - urllib3.connectionpool - DEBUG - Starting new HTTPS connection (1): api.openai.com:443\n", 104 | "2023-07-09 09:19:02,009 - urllib3.connectionpool - DEBUG - https://api.openai.com:443 \"POST /v1/chat/completions HTTP/1.1\" 200 None\n", 105 | "2023-07-09 09:19:02,012 - openai - DEBUG - message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=711 request_id=0dbe0a70bb073d0f1954d7e438d06519 response_code=200\n", 106 | "2023-07-09 09:19:02,013 - func_ai.utils.llm_tools - DEBUG - Response: usage={'gpt-3.5-turbo-0613': {'prompt_tokens': 47, 'completion_tokens': 7, 'total_tokens': 54}} cost_mapping={} conversation_store=OpenAIConversationStore(conversation=[{'role': 'user', 'content': 'Say hello to John'}, JSON: {\n", 107 | " \"role\": \"assistant\",\n", 108 | " \"content\": null,\n", 109 | " \"function_call\": {\n", 110 | " \"name\": \"say_hello\",\n", 111 | " \"arguments\": \"{}\"\n", 112 | " }\n", 113 | "}]) max_tokens=256 model='gpt-3.5-turbo-0613' temperature=0.0\n" 114 | ] 115 | }, 116 | { 117 | "name": "stdout", 118 | "output_type": "stream", 119 | "text": [ 120 | "Hello World!\n" 121 | ] 122 | }, 123 | { 124 | "data": { 125 | "text/plain": "{'name': 'say_hello', 'description': 'This is a function that says hello to the user', 'parameters': {'type': 'object', 'properties': {}, 'required': []}}" 126 | }, 127 | "execution_count": 3, 128 | "metadata": {}, 129 | "output_type": "execute_result" 130 | } 131 | ], 132 | "source": [ 133 | "from functools import partial\n", 134 | "_func_wrap = OpenAIFunctionWrapper.from_python_function(partial(say_hello,name=\"World\"), OpenAIInterface())\n", 135 | "\n", 136 | "_func_wrap.from_prompt(\"Say hello\")" 137 | ], 138 | "metadata": { 139 | "collapsed": false, 140 | "ExecuteTime": { 141 | "end_time": "2023-07-09T06:19:02.443030Z", 142 | "start_time": "2023-07-09T06:19:00.852033Z" 143 | } 144 | } 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": null, 149 | "outputs": [], 150 | "source": [], 151 | "metadata": { 152 | "collapsed": false 153 | } 154 | } 155 | ], 156 | "metadata": { 157 | "kernelspec": { 158 | "display_name": "Python 3", 159 | "language": "python", 160 | "name": "python3" 161 | }, 162 | "language_info": { 163 | "codemirror_mode": { 164 | "name": "ipython", 165 | "version": 2 166 | }, 167 | "file_extension": ".py", 168 | "mimetype": "text/x-python", 169 | "name": "python", 170 | "nbconvert_exporter": "python", 171 | "pygments_lexer": "ipython2", 172 | "version": "2.7.6" 173 | } 174 | }, 175 | "nbformat": 4, 176 | "nbformat_minor": 0 177 | } 178 | -------------------------------------------------------------------------------- /tests/local_inf.py: -------------------------------------------------------------------------------- 1 | from time import perf_counter 2 | 3 | from dotenv import load_dotenv 4 | from transformers import pipeline 5 | 6 | from func_ai.utils.llm_tools import OpenAIInterface 7 | from func_ai.utils.openapi_function_parser import OpenAPISpecOpenAIWrapper 8 | 9 | 10 | def get_highest_score_label(data): 11 | scores = data['scores'] 12 | highest_score = max(scores) 13 | highest_score_index = scores.index(highest_score) 14 | return data['labels'][highest_score_index] 15 | 16 | 17 | def test_pipeline(): 18 | load_dotenv() 19 | pipe = pipeline(model="facebook/bart-large-mnli") 20 | _spec = OpenAPISpecOpenAIWrapper.from_url('http://petstore.swagger.io/v2/swagger.json', 21 | llm_interface=OpenAIInterface()) 22 | t0 = perf_counter() 23 | _sum = _spec.operations_summary 24 | _scored_labels = pipe("I want to add a new pet to the store.", 25 | # here we give the function descriptions as labels to the model as this seems to work better 26 | candidate_labels=[fn for fn in _sum.values()], 27 | ) 28 | elapsed = 1000 * (perf_counter() - t0) 29 | print("Inference time: %d ms.", elapsed) 30 | # return the index of the highest scoring label 31 | _highest_score_label = get_highest_score_label(_scored_labels) 32 | print(next((k for k, v in _sum.items() if v == _highest_score_label), None)) 33 | # _max = max(_scored_labels, key=lambda x: x["score"]) 34 | -------------------------------------------------------------------------------- /tests/template.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | your_username 7 | your_password 8 | 9 | 10 | 11 | 12 | {{ID}} 13 | {{NAME}} 14 | 15 | 16 | -------------------------------------------------------------------------------- /tests/template2.txt: -------------------------------------------------------------------------------- 1 | My is {{NAME}} and I come from {{COUNTRY}} 2 | -------------------------------------------------------------------------------- /tests/test_decorators.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | from dotenv import load_dotenv 4 | from pydantic import Field 5 | 6 | from func_ai.utils.llm_tools import OpenAIInterface, OpenAISchema 7 | 8 | load_dotenv() 9 | 10 | 11 | class UserType(str, Enum): 12 | """ 13 | This is a user type 14 | """ 15 | patient = "patient" 16 | doctor = "doctor" 17 | nurse = "nurse" 18 | 19 | 20 | class User(OpenAISchema): 21 | """ 22 | This is a user 23 | """ 24 | id: int = Field(None, description="The user's id") 25 | name: str = Field(..., description="The user's name") 26 | type: UserType = Field(default="doctor", description="The user's type") 27 | 28 | 29 | def test_from_prompt(): 30 | # print(User.openai_schema) 31 | _user = User.from_prompt(prompt="Create a user with id 100 and name Jimmy. Jimmy is a nurse", 32 | llm_interface=OpenAIInterface()).dict() 33 | print(_user) 34 | assert _user["id"] == 100 35 | assert _user["name"] == "Jimmy" 36 | assert _user["type"] == "nurse" 37 | -------------------------------------------------------------------------------- /tests/test_function_indexer.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import chromadb 4 | import openai 5 | from chromadb import Settings 6 | from dotenv import load_dotenv 7 | 8 | from func_ai.function_indexer import FunctionIndexer 9 | from func_ai.utils import OpenAIInterface 10 | 11 | 12 | def fun_to_index_1() -> str: 13 | """ 14 | This is a function that bars 15 | 16 | :return: Returns something of interest 17 | """ 18 | pass 19 | 20 | 21 | def fun_to_index_2() -> str: 22 | """ 23 | This is a function that foo bar 24 | 25 | :return: Returns nothing of interest 26 | """ 27 | pass 28 | 29 | 30 | def test_find_functions_with_threshold_some_results_above(): 31 | """ 32 | This function tests the search with a threshold 33 | :return: 34 | """ 35 | load_dotenv() 36 | openai.api_key = os.getenv("OPENAI_API_KEY") 37 | _indexer = FunctionIndexer("./fi_test_1") 38 | _indexer.reset_function_index() 39 | _indexer.index_functions([fun_to_index_1, fun_to_index_2]) 40 | _res = _indexer.find_functions("Function to foo bar", similarity_threshold=0.1) 41 | assert len(_res) == 1 42 | print(f"Got response: {_res}") 43 | 44 | 45 | def test_find_functions_with_threshold_all_results_below(): 46 | """ 47 | This function tests the search with a threshold 48 | :return: 49 | """ 50 | load_dotenv() 51 | openai.api_key = os.getenv("OPENAI_API_KEY") 52 | _indexer = FunctionIndexer("./fi_test_1") 53 | _indexer.reset_function_index() 54 | _indexer.index_functions([fun_to_index_1, fun_to_index_2]) 55 | _res = _indexer.find_functions("Function to foo bar", similarity_threshold=0.5) 56 | assert len(_res) == 2 57 | print(f"Got response: {_res}") 58 | 59 | 60 | def test_function_indexer_init_no_args(): 61 | load_dotenv() 62 | _indexer = FunctionIndexer() 63 | 64 | assert _indexer.collection_name == "function_index" 65 | 66 | 67 | def function_to_index(a: int, b: int) -> int: 68 | """ 69 | This is a function that adds two numbers 70 | 71 | :param a: First number 72 | :param b: Second number 73 | :return: Sum of a and b 74 | """ 75 | return a + b 76 | 77 | 78 | def another_function_to_index() -> str: 79 | """ 80 | This is a function returns hello world 81 | 82 | :return: Hello World 83 | """ 84 | 85 | return "Hello World" 86 | 87 | 88 | def test_function_indexer_init_no_args_index_function(): 89 | load_dotenv() 90 | _indexer = FunctionIndexer() 91 | 92 | _indexer.index_functions([function_to_index]) 93 | 94 | 95 | def test_function_indexer_init_no_args_find_function(): 96 | load_dotenv() 97 | _indexer = FunctionIndexer(chroma_client=chromadb.PersistentClient(settings=Settings(allow_reset=True))) 98 | _indexer.reset_function_index() 99 | _indexer.index_functions([function_to_index, another_function_to_index]) 100 | _results = _indexer.find_functions("Add two numbers", max_results=10, similarity_threshold=0.2) 101 | assert len(_results) == 1 102 | assert _results[0].function(1, 2) == 3 103 | 104 | 105 | def test_function_indexer_init_no_args_find_function_enhanced_summary(): 106 | load_dotenv() 107 | _indexer = FunctionIndexer(chroma_client=chromadb.PersistentClient(settings=Settings(allow_reset=True))) 108 | _indexer.reset_function_index() 109 | _indexer.index_functions([function_to_index, another_function_to_index], enhanced_summary=True) 110 | _results = _indexer.find_functions("Add two numbers", max_results=10, similarity_threshold=0.2) 111 | assert len(_results) == 1 112 | assert _results[0].function(1, 2) == 3 113 | 114 | 115 | def test_function_indexer_reindex(): 116 | load_dotenv() 117 | _indexer = FunctionIndexer(chroma_client=chromadb.PersistentClient(settings=Settings(allow_reset=True))) 118 | _indexer.reset_function_index() 119 | _indexer.index_functions([function_to_index, another_function_to_index]) 120 | _indexer.index_functions([function_to_index, another_function_to_index]) 121 | # _results = _indexer.find_functions("Add two numbers", max_results=10, similarity_threshold=0.2) 122 | # assert len(_results) == 1 123 | # assert _results[0].function(1, 2) == 3 -------------------------------------------------------------------------------- /tests/test_function_indexing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function indexing test 3 | """ 4 | from dotenv import load_dotenv 5 | 6 | from func_ai.function_indexer import FunctionIndexer 7 | from func_ai.utils.llm_tools import OpenAIInterface 8 | from func_ai.utils.openapi_function_parser import OpenAPISpecOpenAIWrapper 9 | 10 | 11 | def test_api_indexing(): 12 | load_dotenv() 13 | _spec = OpenAPISpecOpenAIWrapper.from_url('http://petstore.swagger.io/v2/swagger.json', 14 | llm_interface=OpenAIInterface()) 15 | 16 | _fi = FunctionIndexer() 17 | _fi.reset_function_index() 18 | _fi.index_wrapper_functions([f for k, f in _spec.operations.items()]) 19 | print(_fi.find_functions("How can I add a pet")) 20 | 21 | 22 | def test_api_parser_with_index(): 23 | load_dotenv() 24 | 25 | _spec = OpenAPISpecOpenAIWrapper.from_url('http://petstore.swagger.io/v2/swagger.json', 26 | llm_interface=OpenAIInterface(), index=True) 27 | 28 | # print(_spec.api_qa("What operations are available?")) 29 | # print(_spec.api_qa("What are the mandatory parameters for operation addPet?")) 30 | # print(_spec.api_qa("Give me an example of how to use operation addPet?")) 31 | print(_spec.api_qa("Give me a unit test using pytest of addPet operation?", 32 | max_tokens=500)) # if we expect a larger response then we could provide a larger max_tokens 33 | -------------------------------------------------------------------------------- /tests/test_openapi_function_parser.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from dotenv import load_dotenv 4 | 5 | from func_ai.utils.llm_tools import OpenAIInterface 6 | from func_ai.utils.openapi_function_parser import OpenAPISpecOpenAIWrapper 7 | 8 | 9 | def test_func_wrapper(): 10 | """ 11 | This function tests the func_wrapper 12 | :return: 13 | """ 14 | load_dotenv() 15 | _spec = OpenAPISpecOpenAIWrapper.from_url('http://petstore.swagger.io/v2/swagger.json', 16 | llm_interface=OpenAIInterface()) 17 | print(_spec.from_prompt("Get pet with id 10", "getPetById").last_call) 18 | 19 | 20 | def test_func_wrapper_chaining(): 21 | """ 22 | This function tests the func_wrapper 23 | :return: 24 | """ 25 | load_dotenv() 26 | _spec = OpenAPISpecOpenAIWrapper.from_url('http://petstore.swagger.io/v2/swagger.json', 27 | llm_interface=OpenAIInterface()) 28 | _calls = _spec.from_prompt( 29 | "Add new pet named Rocky with following photoUrl: http://rocky.me/pic.png. Tag the Rocky with 'dog' and 'pet'", 30 | "addPet").from_prompt("Get pet", "getPetById").calls 31 | print(json.dumps(_calls)) 32 | 33 | 34 | def test_get_spec_operation(): 35 | _spec = OpenAPISpecOpenAIWrapper.from_url('http://petstore.swagger.io/v2/swagger.json', 36 | llm_interface=OpenAIInterface()) 37 | _spec_dict = _spec.to_dict() 38 | assert "getPetById" in _spec_dict 39 | assert "addPet" in _spec_dict 40 | print(json.dumps(_spec.to_dict(), indent=2)) 41 | 42 | 43 | def test_spec_qa_basic(): 44 | load_dotenv() 45 | _spec = OpenAPISpecOpenAIWrapper.from_url('http://petstore.swagger.io/v2/swagger.json', 46 | llm_interface=OpenAIInterface()) 47 | _spec_dict = _spec.to_dict() 48 | # 49 | # llm_interface = OpenAIInterface() 50 | # llm_interface.conversation_store.add_system_message("You are an API expert. Your goal is to assist the user" 51 | # "in using an API that he/she is not familiar with." 52 | # "The user will provide commands which you will use to find out information about an API" 53 | # "Then the user will ask you questions about the API and you will answer them." 54 | # "" 55 | # "Rules:" 56 | # "1. You will only answer questions about the API" 57 | # "2. You will keep your output only to the essential information") 58 | # _resp = llm_interface.send(f"Give me an example of how to add a pet\n Context: {_spec.get_operation('addPet')}") 59 | # print(_resp) 60 | print("\n".join([f"- {k}: {v}" for k, v in _spec.operations_summary.items()])) 61 | -------------------------------------------------------------------------------- /tests/test_parser.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import json 3 | 4 | from func_ai.utils.py_function_parser import func_to_json 5 | 6 | 7 | def func_with_no_params(): 8 | """ 9 | This function has no parameters 10 | :return: 11 | """ 12 | return 1 13 | 14 | 15 | def func_with_mandatory_params_single_space_doc(a: str, b: str): 16 | """ 17 | This function has mandatory parameters 18 | :param a: 19 | :param b: 20 | :return: 21 | """ 22 | return 1 23 | 24 | 25 | def func_with_optional_params_single_space_doc(a: str, b: str = "b"): 26 | """ 27 | This function has optional parameters 28 | :param a: 29 | :param b: 30 | :return: 31 | """ 32 | return 1 33 | 34 | 35 | def func_with_mandatory_params_double_space_doc(a: str, b: str): 36 | """ 37 | This function has mandatory parameters 38 | 39 | :param a: 40 | :param b: 41 | :return: 42 | """ 43 | return 1 44 | 45 | 46 | def func_with_optional_params_double_space_doc(a: str, b: str = "b"): 47 | """ 48 | This function has optional parameters 49 | 50 | :param a: 51 | :param b: 52 | :return: 53 | """ 54 | return 1 55 | 56 | 57 | def function_with_return_description() -> str: 58 | """ 59 | This function has a return description 60 | 61 | :return: This is the return description explaining what it returns 62 | """ 63 | return "" 64 | 65 | 66 | def test_func_to_json_func_with_no_params(): 67 | """ 68 | This function tests func_to_json with a function that has no parameters 69 | :return: 70 | """ 71 | _json_fun = func_to_json(func_with_no_params) 72 | assert _json_fun["name"] == "func_with_no_params" 73 | assert _json_fun["description"] == "This function has no parameters" 74 | assert 'properties' in _json_fun["parameters"] 75 | assert 'type' in _json_fun["parameters"] 76 | assert _json_fun["parameters"]["type"] == "object" 77 | assert _json_fun["parameters"]["properties"] == {} 78 | assert _json_fun["required"] == [] 79 | 80 | 81 | def test_func_to_json_func_with_mandatory_params_single_space_doc(): 82 | """ 83 | This function tests func_to_json with a function that has mandatory parameters and single space doc 84 | :return: 85 | """ 86 | _json_fun = func_to_json(func_with_mandatory_params_single_space_doc) 87 | assert _json_fun["name"] == "func_with_mandatory_params_single_space_doc" 88 | assert _json_fun["description"] == "This function has mandatory parameters" 89 | assert 'properties' in _json_fun["parameters"] 90 | assert 'type' in _json_fun["parameters"] 91 | assert _json_fun["parameters"]["type"] == "object" 92 | assert _json_fun["parameters"]["properties"] == { 93 | "a": { 94 | "description": "", 95 | "type": "string" 96 | }, 97 | "b": { 98 | "description": "", 99 | "type": "string" 100 | } 101 | } 102 | assert _json_fun["required"] == ["a", "b"] 103 | 104 | 105 | def test_func_to_json_partial_func_with_mandatory_params_single_space_doc(): 106 | """ 107 | This function tests func_to_json with a function that has mandatory parameters and single space doc 108 | :return: 109 | """ 110 | _json_fun = func_to_json(functools.partial(func_with_mandatory_params_single_space_doc, a="a")) 111 | assert _json_fun["name"] == "func_with_mandatory_params_single_space_doc" 112 | assert _json_fun["description"] == "This function has mandatory parameters" 113 | assert 'properties' in _json_fun["parameters"] 114 | assert 'type' in _json_fun["parameters"] 115 | assert _json_fun["parameters"]["type"] == "object" 116 | assert _json_fun["parameters"]["properties"] == { 117 | "b": { 118 | "description": "", 119 | "type": "string" 120 | } 121 | } 122 | assert _json_fun["required"] == ["b"] 123 | 124 | 125 | def test_func_to_json_partialmethod_func_with_mandatory_params_single_space_doc(): 126 | """ 127 | This function tests func_to_json with a function that has mandatory parameters and single space doc 128 | :return: 129 | """ 130 | _json_fun = func_to_json(functools.partialmethod(func_with_mandatory_params_single_space_doc, b="b")) 131 | assert _json_fun["name"] == "func_with_mandatory_params_single_space_doc" 132 | assert _json_fun["description"] == "This function has mandatory parameters" 133 | assert 'properties' in _json_fun["parameters"] 134 | assert 'type' in _json_fun["parameters"] 135 | assert _json_fun["parameters"]["type"] == "object" 136 | assert _json_fun["parameters"]["properties"] == { 137 | "a": { 138 | "description": "", 139 | "type": "string" 140 | } 141 | } 142 | assert _json_fun["required"] == ["a"] 143 | 144 | 145 | def test_func_to_json_func_with_optional_params_single_space_doc(): 146 | """ 147 | This function tests func_to_json with a function that has optional parameters and single space doc 148 | :return: 149 | """ 150 | _json_fun = func_to_json(func_with_optional_params_single_space_doc) 151 | assert _json_fun["name"] == "func_with_optional_params_single_space_doc" 152 | assert _json_fun["description"] == "This function has optional parameters" 153 | assert 'properties' in _json_fun["parameters"] 154 | assert 'type' in _json_fun["parameters"] 155 | assert _json_fun["parameters"]["type"] == "object" 156 | assert _json_fun["parameters"]["properties"] == { 157 | "a": { 158 | "description": "", 159 | "type": "string" 160 | }, 161 | "b": { 162 | "description": "", 163 | "type": "string" 164 | } 165 | } 166 | assert _json_fun["required"] == ["a"] 167 | 168 | 169 | def test_func_to_json_partial_func_with_optional_params_single_space_doc(): 170 | """ 171 | This function tests func_to_json with a function that has optional parameters and single space doc 172 | :return: 173 | """ 174 | _json_fun = func_to_json(functools.partial(func_with_optional_params_single_space_doc, a="a")) 175 | print(_json_fun) 176 | assert _json_fun["name"] == "func_with_optional_params_single_space_doc" 177 | assert _json_fun["description"] == "This function has optional parameters" 178 | assert 'properties' in _json_fun["parameters"] 179 | assert 'type' in _json_fun["parameters"] 180 | assert _json_fun["parameters"]["type"] == "object" 181 | assert _json_fun["parameters"]["properties"] == { 182 | "b": { 183 | "description": "", 184 | "type": "string" 185 | } 186 | } 187 | assert _json_fun["required"] == [] 188 | 189 | 190 | def test_func_to_json_partial_func_with_optional_params_single_space_doc_positional(): 191 | """ 192 | This function tests func_to_json with a function that has optional parameters and single space doc 193 | :return: 194 | """ 195 | _json_fun = func_to_json(functools.partial(func_with_optional_params_single_space_doc, "a")) 196 | print(_json_fun) 197 | assert _json_fun["name"] == "func_with_optional_params_single_space_doc" 198 | assert _json_fun["description"] == "This function has optional parameters" 199 | assert 'properties' in _json_fun["parameters"] 200 | assert 'type' in _json_fun["parameters"] 201 | assert _json_fun["parameters"]["type"] == "object" 202 | assert _json_fun["parameters"]["properties"] == { 203 | "b": { 204 | "description": "", 205 | "type": "string" 206 | } 207 | } 208 | assert _json_fun["required"] == [] 209 | 210 | 211 | def test_func_to_json_partialmethod_func_with_optional_params_single_space_doc(): 212 | """ 213 | This function tests func_to_json with a function that has optional parameters and single space doc 214 | :return: 215 | """ 216 | _json_fun = func_to_json(functools.partialmethod(func_with_optional_params_single_space_doc, b="b")) 217 | assert _json_fun["name"] == "func_with_optional_params_single_space_doc" 218 | assert _json_fun["description"] == "This function has optional parameters" 219 | assert 'properties' in _json_fun["parameters"] 220 | assert 'type' in _json_fun["parameters"] 221 | assert _json_fun["parameters"]["type"] == "object" 222 | assert _json_fun["parameters"]["properties"] == { 223 | "a": { 224 | "description": "", 225 | "type": "string" 226 | } 227 | } 228 | assert _json_fun["required"] == ['a'] 229 | 230 | 231 | def test_func_to_json_func_with_mandatory_params_double_space_doc(): 232 | """ 233 | This function tests func_to_json with a function that has mandatory parameters and double space doc 234 | :return: 235 | """ 236 | _json_fun = func_to_json(func_with_mandatory_params_double_space_doc) 237 | assert _json_fun["name"] == "func_with_mandatory_params_double_space_doc" 238 | assert _json_fun["description"] == "This function has mandatory parameters" 239 | assert 'properties' in _json_fun["parameters"] 240 | assert 'type' in _json_fun["parameters"] 241 | assert _json_fun["parameters"]["type"] == "object" 242 | assert _json_fun["parameters"]["properties"] == { 243 | "a": { 244 | "description": "", 245 | "type": "string" 246 | }, 247 | "b": { 248 | "description": "", 249 | "type": "string" 250 | } 251 | } 252 | assert _json_fun["required"] == ["a", "b"] 253 | 254 | 255 | def test_func_to_json_func_with_optional_params_double_space_doc(): 256 | """ 257 | This function tests func_to_json with a function that has optional parameters and double space doc 258 | :return: 259 | """ 260 | _json_fun = func_to_json(func_with_optional_params_double_space_doc) 261 | assert _json_fun["name"] == "func_with_optional_params_double_space_doc" 262 | assert _json_fun["description"] == "This function has optional parameters" 263 | assert 'properties' in _json_fun["parameters"] 264 | assert 'type' in _json_fun["parameters"] 265 | assert _json_fun["parameters"]["type"] == "object" 266 | assert _json_fun["parameters"]["properties"] == { 267 | "a": { 268 | "description": "", 269 | "type": "string" 270 | }, 271 | "b": { 272 | "description": "", 273 | "type": "string" 274 | } 275 | } 276 | assert _json_fun["required"] == ["a"] 277 | 278 | 279 | def test_func_with_return(): 280 | _json_fun = func_to_json(function_with_return_description) 281 | assert _json_fun["name"] == "function_with_return_description" 282 | print(_json_fun["description"]) 283 | assert "This is the return description explaining what it returns" in _json_fun["description"] 284 | -------------------------------------------------------------------------------- /tests/test_template.py: -------------------------------------------------------------------------------- 1 | """ doc """ 2 | import functools 3 | import json 4 | 5 | from dotenv import load_dotenv 6 | from jinja2 import Template, meta, Environment 7 | 8 | from func_ai.utils.jinja_template_functions import JinjaOpenAITemplateFunction 9 | from func_ai.utils.llm_tools import OpenAIInterface 10 | from func_ai.utils.py_function_parser import func_to_json 11 | 12 | 13 | def render_template(template_file: str, **params) -> str: 14 | """ 15 | Create subscription in the network using a template and given parameters by the user. 16 | 17 | :param template_file: 18 | :param params: parameters to be used in the template 19 | :return: 20 | """ 21 | # render the jinja template with the parameters 22 | 23 | with open(template_file) as f: 24 | prompt = f.read() 25 | print(Template(prompt).render(**params)) 26 | return "ok" 27 | 28 | 29 | def t1(a: str): 30 | """ 31 | wqewqeq 32 | 33 | :param a: 34 | :return: 35 | """ 36 | 37 | 38 | def test_template_fun(): 39 | load_dotenv() 40 | inf = OpenAIInterface() 41 | inf.conversation_store.add_system_message("""You are a code helper. Your goal is to help the user to convert a jinja template into a list of parameters. 42 | 43 | User will provide a jinja template as input. 44 | 45 | Your task is to extract the jinja parameters and return them in a bullet list. 46 | 47 | Do not return anything other than the bullet list. 48 | Return just the parameter names and nothing else. 49 | Return only jinja2 template parameters and nothing else. 50 | """) 51 | with open("template2.txt") as f: 52 | prompt = f.read() 53 | resp = inf.send(prompt=prompt) 54 | dynamic_args = resp['content'].replace("-", "").split("\n") 55 | # here we want to use partial so that we can se the template file 56 | _fun_partial = functools.partial(render_template, template_file="template2.txt") 57 | _fun = func_to_json(_fun_partial) 58 | _fun["parameters"]['properties'] = {k: {"type": "string", "description": f"{k}"} for k in dynamic_args} 59 | _fun['required'] = [k for k in dynamic_args] 60 | _t_prompt = "I am John. I live in France" 61 | _fun["description"] = f"Render Template\nAccepts the following {resp['content']}" 62 | inf.conversation_store.add_system_message( 63 | """Help the user run a template with the parameters provided by the user.""") 64 | resp2 = inf.send(prompt=_t_prompt, 65 | functions=[_fun]) 66 | print(f"LLM Response: {resp2}") 67 | if "function_call" in resp2: 68 | args = json.loads(resp2["function_call"]["arguments"]) 69 | print(_fun_partial(**args)) 70 | 71 | 72 | def test_template_xml(): 73 | load_dotenv() 74 | inf = OpenAIInterface() 75 | inf.conversation_store.add_system_message("""You are a code helper. Your goal is to help the user to convert a jinja template into a list of parameters. 76 | 77 | User will provide a jinja template as input. 78 | 79 | Your task is to extract the jinja parameters and return them in a bullet list. 80 | 81 | Do not return anything other than the bullet list. 82 | Return just the parameter names and nothing else. 83 | Return only jinja2 template parameters and nothing else. 84 | """) 85 | with open("template.xml") as f: 86 | prompt = f.read() 87 | resp = inf.send(prompt=prompt) 88 | dynamic_args = resp['content'].replace("-", "").split("\n") 89 | # here we want to use partial so that we can se the template file 90 | _fun_partial = functools.partial(render_template, template_file="template.xml") 91 | _fun = func_to_json(_fun_partial) 92 | _fun["parameters"]['properties'] = {k: {"type": "string", "description": f"{k}"} for k in dynamic_args} 93 | _fun['required'] = [k for k in dynamic_args] 94 | _t_prompt = "Create user 1 John" 95 | _fun["description"] = f"Render Template\nAccepts the following {resp['content']}" 96 | inf.conversation_store.add_system_message( 97 | """Help the user run a template with the parameters provided by the user.""") 98 | resp2 = inf.send(prompt=_t_prompt, 99 | functions=[_fun]) 100 | print(f"LLM Response: {resp2}") 101 | if "function_call" in resp2: 102 | args = json.loads(resp2["function_call"]["arguments"]) 103 | print(_fun_partial(**args)) 104 | print(f"Cost: {inf.get_usage()}") 105 | 106 | 107 | def test_template_vars(): 108 | _t = Template("{{ a }}") 109 | env = Environment() 110 | ast = env.parse("{{ a }}") 111 | # source = _t.environment.loader.get_source(_t.environment, _t.name) 112 | 113 | print(meta.find_undeclared_variables(ast)) # prints: {'a'} 114 | 115 | 116 | def test_jinja_template_object(): 117 | load_dotenv() 118 | ji = JinjaOpenAITemplateFunction.from_string_template("Name: {{ NAME }} \n Age: {{ AGE }}", OpenAIInterface()) 119 | resp = ji.render_from_prompt("John is 20 years old") 120 | assert "Name: John" in resp 121 | assert "Age: 20" in resp 122 | # prints 123 | """ 124 | Name: John 125 | Age: 20 126 | """ 127 | 128 | 129 | def test_jinja_template_object_complex_sentence(): 130 | load_dotenv() 131 | ji = JinjaOpenAITemplateFunction.from_string_template("Name: {{ NAME }} \n Age: {{ AGE }}", OpenAIInterface()) 132 | resp = ji.render_from_prompt("His name is John, He's born in Sofia and he's born 1983. We're in 2023.") 133 | print(resp) 134 | assert "Name: John" in resp 135 | assert "Age: 40" in resp 136 | # prints 137 | """ 138 | Name: John 139 | Age: 20 140 | """ 141 | 142 | 143 | def test_jinja_template_object_with_jinja_function(): 144 | load_dotenv() 145 | ji = JinjaOpenAITemplateFunction.from_string_template("Name: {{ NAME |upper}} \n Age: {{ AGE }}", OpenAIInterface()) 146 | resp = ji.render_from_prompt("John is 20 years old") 147 | assert "Name: JOHN" in resp 148 | assert "Age: 20" in resp 149 | print(resp) 150 | # prints 151 | """ 152 | Name: JOHN 153 | Age: 20 154 | """ 155 | 156 | 157 | def assert_is_digit(x): 158 | assert x.isdigit() 159 | return x 160 | 161 | 162 | def test_jinja_template_object_with_filters_function(): 163 | load_dotenv() 164 | ji = JinjaOpenAITemplateFunction.from_string_template("Name: {{ NAME }} \n Age: {{ AGE |validate_digit}}", 165 | OpenAIInterface(), 166 | filters={"validate_digit": assert_is_digit}) 167 | resp = ji.render_from_prompt("John is 20y") 168 | print(resp) 169 | assert "Name: John" in resp 170 | assert "Age: 20" in resp 171 | # prints 172 | """ 173 | Name: John 174 | Age: 20 175 | """ 176 | 177 | 178 | def test_single_prompt(): 179 | load_dotenv() 180 | llm = OpenAIInterface() 181 | _resp = llm.send( 182 | prompt="Create a user with name John and age 20\n Render as `Name: {{ NAME }} \n Age: {{ AGE }}`") 183 | 184 | print(_resp['content']) 185 | --------------------------------------------------------------------------------