├── tests ├── __init__.py ├── api │ ├── __init__.py │ ├── test_chat_decorator.py │ └── test_task_decorator.py ├── tasks │ ├── __init__.py │ ├── test_llm_chat.py │ └── test_llm_task.py ├── operators │ ├── __init__.py │ ├── base │ │ ├── __init__.py │ │ └── test_llm_settings.py │ ├── shared │ │ ├── __init__.py │ │ └── test_output_prompt.py │ ├── openai_operators │ │ ├── __init__.py │ │ ├── test_chat_operator.py │ │ └── test_operator.py │ ├── test_resolve_llm.py │ ├── test_utils.py │ └── test_operator_resolver.py ├── orchestrator │ ├── __init__.py │ ├── test_chat_orchestrator.py │ ├── test_future_llm_task.py │ └── test_task_orchestrator.py ├── python_parser │ ├── __init__.py │ ├── annotations │ │ ├── __init__.py │ │ └── test_type_annotation_to_schema.py │ ├── docstring_parsers │ │ ├── __init__.py │ │ └── reST │ │ │ ├── __init__.py │ │ │ └── test_parser.py │ ├── parser.py │ ├── test_magic_parser.py │ └── test_function_parser.py └── test_declarai.py ├── docs ├── CNAME ├── features │ ├── middlewares │ │ └── index.md │ ├── index.md │ ├── multi-model-multi-provider.md │ ├── chat │ │ ├── debugging-chat.md │ │ ├── customizing-chat-response.md │ │ ├── chat-memory │ │ │ ├── file-memory.md │ │ │ ├── redis-memory.md │ │ │ ├── mongodb-memory.md │ │ │ ├── postgresql-memory.md │ │ │ └── index.md │ │ ├── advanced-initialization.md │ │ ├── index.md │ │ └── controlling-chat-behavior.md │ ├── planning-future-tasks.md │ ├── magic.md │ ├── language-model-parameters.md │ ├── jinja_templating.md │ └── evals │ │ └── index.md ├── examples │ └── deployments │ │ ├── streamlit_img.png │ │ └── index.md ├── img │ ├── WeightsAndBiases-dashboard.png │ └── Vendi-logo.svg ├── providers │ ├── index.md │ ├── openai.md │ └── azure_openai.md ├── js │ ├── tablesort.js │ └── custom.js ├── css │ ├── custom.css │ ├── extra.css │ ├── mkdocstrings.css │ └── termynal.css ├── overrides │ └── main.html ├── beginners-guide │ ├── recap.md │ ├── simple-task.md │ ├── index.md │ ├── debugging-tasks.md │ └── controlling-task-behavior.md ├── contribute.md ├── integrations │ └── index.md ├── newsletter.md └── best-practices │ └── index.md ├── .python-version ├── mkdocs.insiders.yml ├── src └── declarai │ ├── evals │ ├── providers │ │ ├── __init__.py │ │ └── openai.py │ ├── __init__.py │ ├── logical_tasks │ │ ├── __init__.py │ │ └── sequence.py │ ├── manipulation │ │ ├── __init__.py │ │ └── data_manipulation_structured.py │ ├── generation │ │ ├── unstructured_short_form.py │ │ ├── unstructured_long_form.py │ │ ├── __init__.py │ │ ├── structured_open_ended.py │ │ └── structured_strict_complex.py │ ├── metadata_significance │ │ ├── __init__.py │ │ └── simple_task_significance.py │ ├── extraction │ │ ├── __init__.py │ │ ├── single_value.py │ │ ├── multiple_value.py │ │ ├── single_value_multi_types.py │ │ └── multiple_value_multi_types.py │ ├── runner.py │ └── README.md │ ├── python_parser │ ├── docstring_parsers │ │ ├── __init__.py │ │ ├── reST │ │ │ ├── __init__.py │ │ │ └── parser.py │ │ └── types.py │ ├── __init__.py │ ├── types.py │ ├── magic_parser.py │ ├── type_annotation_to_schema.py │ └── parser.py │ ├── core │ ├── __init__.py │ └── core_settings.py │ ├── __init__.py │ ├── middleware │ ├── third_party │ │ ├── __init__.py │ │ └── wandb_monitor.py │ ├── internal │ │ ├── __init__.py │ │ └── log_middleware.py │ ├── __init__.py │ └── base.py │ ├── operators │ ├── templates │ │ ├── instruct_function.py │ │ ├── chain_of_thought.py │ │ ├── __init__.py │ │ ├── output_structure.py │ │ └── output_prompt.py │ ├── openai_operators │ │ ├── __init__.py │ │ ├── chat_operator.py │ │ ├── settings.py │ │ └── task_operator.py │ ├── utils.py │ ├── message.py │ ├── __init__.py │ └── llm.py │ ├── memory │ ├── __init__.py │ ├── base.py │ ├── in_memory.py │ ├── file.py │ ├── redis.py │ ├── mongodb.py │ └── postgres.py │ └── _base.py ├── .flake8 ├── .gitignore ├── assets └── Logo-declarai.png ├── .github └── workflows │ ├── puiblish-package.yml │ ├── publish-docs.yml │ └── test.yaml ├── .coveragerc ├── LICENSE ├── requirements.txt ├── pyproject.toml ├── scripts └── gen_ref_pages.py └── .pre-commit-config.yaml /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/CNAME: -------------------------------------------------------------------------------- 1 | declarai.com -------------------------------------------------------------------------------- /tests/api/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.8.10 2 | -------------------------------------------------------------------------------- /tests/operators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/operators/base/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/orchestrator/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/python_parser/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/features/middlewares/index.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/operators/shared/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/examples/deployments/streamlit_img.png: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mkdocs.insiders.yml: -------------------------------------------------------------------------------- 1 | INHERIT: mkdocs.yml 2 | -------------------------------------------------------------------------------- /src/declarai/evals/providers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/python_parser/annotations/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/operators/openai_operators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/orchestrator/test_chat_orchestrator.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/python_parser/docstring_parsers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/declarai/python_parser/docstring_parsers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/python_parser/docstring_parsers/reST/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length=150 3 | per-file-ignores=__init__.py:F401 -------------------------------------------------------------------------------- /src/declarai/core/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Core settings for Declarai. 3 | """ 4 | -------------------------------------------------------------------------------- /src/declarai/evals/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Evaluation suite module for DeclarAI. 3 | """ 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .venv/ 2 | __pycache__/ 3 | .idea/ 4 | dist/ 5 | .pytest_cache/ 6 | .env 7 | .coverage -------------------------------------------------------------------------------- /assets/Logo-declarai.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hunch-app/declarai/HEAD/assets/Logo-declarai.png -------------------------------------------------------------------------------- /src/declarai/python_parser/docstring_parsers/reST/__init__.py: -------------------------------------------------------------------------------- 1 | from .parser import ReSTDocstringParser 2 | -------------------------------------------------------------------------------- /src/declarai/evals/logical_tasks/__init__.py: -------------------------------------------------------------------------------- 1 | # from .sequence import chain_of_thought, chain_of_thought_kwargs 2 | -------------------------------------------------------------------------------- /docs/img/WeightsAndBiases-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hunch-app/declarai/HEAD/docs/img/WeightsAndBiases-dashboard.png -------------------------------------------------------------------------------- /src/declarai/evals/manipulation/__init__.py: -------------------------------------------------------------------------------- 1 | from .data_manipulation_structured import data_manipulation, data_manipulation_kwargs 2 | -------------------------------------------------------------------------------- /docs/providers/index.md: -------------------------------------------------------------------------------- 1 | Declarai supports the following providers: 2 | 3 | - [OpenAI](./openai.md) 4 | - [Azure OpenAI](./azure_openai.md) 5 | -------------------------------------------------------------------------------- /src/declarai/__init__.py: -------------------------------------------------------------------------------- 1 | from .declarai import Declarai, openai, azure_openai, magic 2 | from .operators.registry import register_operator, register_llm 3 | -------------------------------------------------------------------------------- /src/declarai/middleware/third_party/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Middlewares offered that requires external dependencies. 3 | """ 4 | from .wandb_monitor import WandDBMonitorCreator 5 | -------------------------------------------------------------------------------- /src/declarai/python_parser/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Internal package for parsing User Python code into a ParsedFunction object. 3 | """ 4 | # from .parsers import ParsedFunction 5 | -------------------------------------------------------------------------------- /src/declarai/middleware/internal/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Middlewares offered by Declarai that does not require external dependencies. 3 | """ 4 | from .log_middleware import LoggingMiddleware 5 | -------------------------------------------------------------------------------- /docs/js/tablesort.js: -------------------------------------------------------------------------------- 1 | document$.subscribe(function() { 2 | var tables = document.querySelectorAll("article table:not([class])") 3 | tables.forEach(function(table) { 4 | new Tablesort(table) 5 | }) 6 | }) 7 | -------------------------------------------------------------------------------- /src/declarai/middleware/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Middleware module for Declarai. 3 | 4 | Middlewares are used to extend the functionality of the Declarai execution flow. 5 | """ 6 | 7 | from .internal import LoggingMiddleware 8 | -------------------------------------------------------------------------------- /src/declarai/operators/templates/instruct_function.py: -------------------------------------------------------------------------------- 1 | """ 2 | Instruct Function Template 3 | """ 4 | InstructFunctionTemplate = """{input_instructions} 5 | {input_placeholder} 6 | """ 7 | 8 | "." # for documentation purposes 9 | -------------------------------------------------------------------------------- /docs/css/custom.css: -------------------------------------------------------------------------------- 1 | .termynal-comment { 2 | color: #4a968f; 3 | font-style: italic; 4 | display: block; 5 | } 6 | 7 | .termy { 8 | /* For right to left languages */ 9 | direction: ltr; 10 | } 11 | 12 | .termy [data-termynal] { 13 | white-space: pre-wrap; 14 | } 15 | 16 | -------------------------------------------------------------------------------- /tests/python_parser/parser.py: -------------------------------------------------------------------------------- 1 | from declarai.python_parser.parser import PythonParser 2 | 3 | 4 | def test_parser(): 5 | class TestClass: 6 | pass 7 | 8 | def my_func(): 9 | pass 10 | 11 | # parsed_class = PythonParser(TestClass) 12 | parsed_func = PythonParser(my_func) 13 | -------------------------------------------------------------------------------- /src/declarai/operators/openai_operators/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | OpenAI operators and LLMs. 3 | """ 4 | from .chat_operator import AzureOpenAIChatOperator, OpenAIChatOperator 5 | from .openai_llm import AzureOpenAILLM, OpenAIError, OpenAILLM, OpenAILLMParams 6 | from .task_operator import AzureOpenAITaskOperator, OpenAITaskOperator 7 | -------------------------------------------------------------------------------- /docs/overrides/main.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block announce %} 4 |

🌟 Love Declarai? Star us on GitHub & start contributing! 🌟

5 | {% endblock %} 6 | -------------------------------------------------------------------------------- /src/declarai/operators/templates/chain_of_thought.py: -------------------------------------------------------------------------------- 1 | """Chain of thoughts templates.""" 2 | ChainOfThoughtsTemplate = """The following task should be done in {num_steps} steps: 3 | Use the output of the previous step as the input of the next step. 4 | {steps} 5 | 6 | Let's think step by step""" 7 | 8 | "." # for documentation purposes 9 | -------------------------------------------------------------------------------- /src/declarai/memory/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Memory module for Declarai interactions that includes message history. 3 | """ 4 | from .file import FileMessageHistory 5 | from .in_memory import InMemoryMessageHistory 6 | from .mongodb import MongoDBMessageHistory 7 | from .postgres import PostgresMessageHistory 8 | from .redis import RedisMessageHistory 9 | -------------------------------------------------------------------------------- /tests/operators/test_resolve_llm.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from declarai.operators.openai_operators import OpenAIError 4 | from declarai.operators import resolve_llm 5 | 6 | 7 | def test_resolve_openai_operator_no_token_raises_error(): 8 | with pytest.raises(OpenAIError): 9 | resolve_llm(provider="openai", model="davinci") 10 | -------------------------------------------------------------------------------- /src/declarai/evals/generation/unstructured_short_form.py: -------------------------------------------------------------------------------- 1 | from declarai import Declarai 2 | 3 | 4 | def unstructured_short_form(title: str) -> str: 5 | """ 6 | Write a 4 line poem based on the given title 7 | """ 8 | return Declarai.magic(title=title) 9 | 10 | 11 | unstructured_short_form_kwargs = {"title": "Using LLMs is fun!"} 12 | -------------------------------------------------------------------------------- /src/declarai/evals/generation/unstructured_long_form.py: -------------------------------------------------------------------------------- 1 | from declarai import Declarai 2 | 3 | 4 | def unstructured_long_form(title: str) -> str: 5 | """ 6 | Write a poem based on the given title 7 | The poem should have 4 verses 8 | """ 9 | return Declarai.magic(title) 10 | 11 | 12 | unstructured_long_form_kwargs = {"title": "Using LLMs is fun!"} 13 | -------------------------------------------------------------------------------- /.github/workflows/puiblish-package.yml: -------------------------------------------------------------------------------- 1 | name: Python package 2 | on: 3 | push: 4 | tags: 5 | - "v*.*.*" 6 | jobs: 7 | build: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v3 11 | - name: Build and publish to pypi 12 | uses: JRubics/poetry-publish@v1.17 13 | with: 14 | pypi_token: ${{ secrets.PYPI_TOKEN }} -------------------------------------------------------------------------------- /src/declarai/core/core_settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the core settings for the declarai project. 3 | In order to create proper separation from existing code on the client's environment, 4 | we require all environment variables used by `declarai` be prefixed with `DECLARAI_`. 5 | This way we do not interfere with any existing environment variables. 6 | """ 7 | 8 | DECLARAI_PREFIX = "DECLARAI" 9 | -------------------------------------------------------------------------------- /src/declarai/operators/templates/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the shared templates for the operators. 3 | """ 4 | from .chain_of_thought import ChainOfThoughtsTemplate 5 | from .instruct_function import InstructFunctionTemplate 6 | from .output_prompt import compile_output_prompt, compile_output_schema_template 7 | from .output_structure import ( 8 | StructuredOutputChatPrompt, 9 | StructuredOutputInstructionPrompt, 10 | ) 11 | -------------------------------------------------------------------------------- /tests/operators/base/test_llm_settings.py: -------------------------------------------------------------------------------- 1 | from declarai.operators import LLMSettings 2 | 3 | 4 | def test_llm_settings(): 5 | llm_settings = LLMSettings( 6 | provider="test-provider", 7 | model="test-model", 8 | version="test-version", 9 | ) 10 | 11 | assert llm_settings.provider == "test-provider" 12 | assert llm_settings.model == "test-model-test-version" 13 | assert llm_settings.version == "test-version" 14 | -------------------------------------------------------------------------------- /docs/features/index.md: -------------------------------------------------------------------------------- 1 | # Features 2 | 3 | As Declarai is aimed at being completely extensible and configurable, we provide interfaces to override and 4 | interact with any of the default behaviours if you choose. 5 | 6 | We are still actively working on exposing all the necessary interfaces to make this possible, so if there are any 7 | interfaces you would like to see exposed, please vote or open an issue on our [GitHub](https://github.com/vendi-ai/declarai/issues) 8 | -------------------------------------------------------------------------------- /src/declarai/evals/metadata_significance/__init__.py: -------------------------------------------------------------------------------- 1 | from .simple_task_significance import ( 2 | generate_a_poem_no_metadata, 3 | generate_a_poem_only_return_doc, 4 | generate_a_poem_only_return_magic, 5 | generate_a_poem_only_return_type, 6 | generate_a_poem_return_all, 7 | generate_a_poem_return_doc_return_magic, 8 | generate_a_poem_return_type_return_doc, 9 | generate_a_poem_return_type_return_magic, 10 | simple_task_significance_kwargs, 11 | ) 12 | -------------------------------------------------------------------------------- /src/declarai/evals/extraction/__init__.py: -------------------------------------------------------------------------------- 1 | from .multiple_value import multi_value_extraction, multi_value_extraction_kwargs 2 | from .multiple_value_multi_types import ( 3 | multi_value_multi_type_extraction, 4 | multi_value_multi_type_extraction_kwargs, 5 | ) 6 | from .single_value import single_value_extraction, single_value_extraction_kwargs 7 | from .single_value_multi_types import ( 8 | single_value_multi_type_extraction, 9 | single_value_multi_type_extraction_kwargs, 10 | ) 11 | -------------------------------------------------------------------------------- /src/declarai/evals/extraction/single_value.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from declarai import Declarai 4 | 5 | 6 | def single_value_extraction(text: str) -> List[str]: 7 | """ 8 | Extract the phone number from the provided text 9 | :param text: content to extract phone number from 10 | :return: The phone numbers that are used in the email 11 | """ 12 | return Declarai.magic(text=text) 13 | 14 | 15 | single_value_extraction_kwargs = { 16 | "text": "Hey jenny,\nyou can call me at 124-3435-132.\n" 17 | } 18 | -------------------------------------------------------------------------------- /src/declarai/evals/generation/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | 3 | from .structured_open_ended import structured_open_ended, structured_open_ended_kwargs 4 | from .unstructured_long_form import ( 5 | unstructured_long_form, 6 | unstructured_long_form_kwargs, 7 | ) 8 | from .unstructured_short_form import ( 9 | unstructured_short_form, 10 | unstructured_short_form_kwargs, 11 | ) 12 | 13 | if importlib.util.find_spec("pydantic"): 14 | from .structured_strict_complex import ( 15 | structured_strict_complex, 16 | structured_strict_complex_kwargs, 17 | ) 18 | -------------------------------------------------------------------------------- /src/declarai/evals/extraction/multiple_value.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from declarai import Declarai 4 | 5 | 6 | def multi_value_extraction(text: str) -> List[str]: 7 | """ 8 | Extract the phone numbers from the provided text 9 | :param text: content to extract phone number from 10 | :return: The phone numbers that where identified in the input text 11 | """ 12 | return Declarai.magic(text=text) 13 | 14 | 15 | multi_value_extraction_kwargs = { 16 | "text": "Hey jenny,\nyou can call me at 124-3435-132.\n" 17 | "you can also reach me at +43-938-243-223" 18 | } 19 | -------------------------------------------------------------------------------- /src/declarai/operators/templates/output_structure.py: -------------------------------------------------------------------------------- 1 | """ 2 | The prompt templates for the format of the output. 3 | """ 4 | StructuredOutputInstructionPrompt = """You are a REST api endpoint.You only answer in JSON structures 5 | with a single key named '{return_name}', nothing else. 6 | The expected format is: 7 | {output_schema}""" 8 | "." # for documentation purposes 9 | 10 | 11 | StructuredOutputChatPrompt = """Your responses should be a JSON structure with a single key named '{return_name}', nothing else. The expected format is: {output_schema}""" # noqa 12 | 13 | "." # for documentation purposes 14 | -------------------------------------------------------------------------------- /src/declarai/evals/providers/openai.py: -------------------------------------------------------------------------------- 1 | from declarai import Declarai 2 | 3 | openai_models = { 4 | # "open_openai_gpt_3_5_latest": Declarai(provider="openai", model="gpt-3.5-turbo"), 5 | "open_openai_gpt_3_5_0301": Declarai( 6 | provider="openai", model="gpt-3.5-turbo", version="0301" 7 | ), 8 | "open_openai_gpt_3_5_0613": Declarai( 9 | provider="openai", model="gpt-3.5-turbo", version="0613" 10 | ), 11 | # "open_openai_gpt_4_latest": Declarai(provider="openai", model="gpt-4"), 12 | "open_openai_gpt_4_0603": Declarai( 13 | provider="openai", model="gpt-4", version="0613" 14 | ), 15 | } 16 | -------------------------------------------------------------------------------- /docs/css/extra.css: -------------------------------------------------------------------------------- 1 | .md-header{ 2 | background-color: #1a1c1b; 3 | } 4 | 5 | .md-annotation__index:before{ 6 | background: rgba(133, 236, 199, 0.62); 7 | } 8 | 9 | .md-nav__link{ 10 | justify-content: inherit; 11 | } 12 | .md-banner { 13 | background-color: #1E3D58; 14 | color: #ffffff; 15 | } 16 | .md-banner a { 17 | color: #DAA520; /* Adjust this for the link color inside the banner */ 18 | } 19 | 20 | .md-banner a:hover { 21 | color: #e31849; /* Adjust this for the link hover color inside the banner */ 22 | } 23 | 24 | /* Maximum space for text block */ 25 | .md-grid { 26 | max-width: 80%; /* or 100%, if you want to stretch to full-width */ 27 | } -------------------------------------------------------------------------------- /docs/beginners-guide/recap.md: -------------------------------------------------------------------------------- 1 | # Recap 2 | In this tutorial you've covered the basics of **Declarai**! 3 | You should now be able to easily: 4 | 5 | - Create a declarai task. 6 | - control your task's behavior with native python 7 | - Use the `compile` method to view and debug your task template and final prompt! 8 | 9 | 10 |
11 | 12 | Previous 13 | 14 |
15 | 16 | ## Next steps 17 | 18 | You are welcome to explore our [**Features**](../../features/) section, where you can find the full list of supported features and how to use them. 19 | -------------------------------------------------------------------------------- /docs/img/Vendi-logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /src/declarai/evals/manipulation/data_manipulation_structured.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | from declarai import Declarai 4 | 5 | 6 | def data_manipulation(data: Dict[str, str]) -> Dict[str, str]: 7 | """ 8 | return a redacted version of the input data 9 | any potentially private data in the input should be replaced with "***" 10 | :param data: The data to anonymize 11 | :return: The anonymized data 12 | """ 13 | return Declarai.magic("redacted_info", data=data) 14 | 15 | 16 | data_manipulation_kwargs = { 17 | "data": { 18 | "name": "John Doe", 19 | "phone": "123-456-7890", 20 | "email": "john.doe@coolmail.com", 21 | "address": "9493 south bridge St.", 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/declarai/evals/extraction/single_value_multi_types.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | from declarai import Declarai 4 | 5 | 6 | def single_value_multi_type_extraction( 7 | text: str, info_fields: List[str] 8 | ) -> Dict[str, str]: 9 | """ 10 | Extract the provided info fields from the provided text 11 | :param text: content to extract info from 12 | :param info_fields: The information fields to extract 13 | :return: A mapping of extracted info field to the extracted value 14 | """ 15 | return Declarai.magic(text=text, info_fields=info_fields) 16 | 17 | 18 | single_value_multi_type_extraction_kwargs = { 19 | "text": "Hey jenny,\nyou can call me at 124-3435-132.\n", 20 | "info_fields": ["phone_number", "name"], 21 | } 22 | -------------------------------------------------------------------------------- /src/declarai/evals/extraction/multiple_value_multi_types.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | from declarai import Declarai 4 | 5 | 6 | def multi_value_multi_type_extraction( 7 | text: str, info_fields: List[str] 8 | ) -> Dict[str, List[str]]: 9 | """ 10 | Extract the provided info fields from the provided text 11 | :param text: content to extract info from 12 | :param info_fields: The information fields to extract 13 | :return: A mapping of extracted info field to the extracted value 14 | """ 15 | return Declarai.magic(text=text, info_fields=info_fields) 16 | 17 | 18 | multi_value_multi_type_extraction_kwargs = { 19 | "text": "Hey jenny,\nyou can call me at 124-3435-132.\n" 20 | "you can also reach me at +43-938-243-223", 21 | "info_fields": ["phone_number", "name"], 22 | } 23 | -------------------------------------------------------------------------------- /docs/features/multi-model-multi-provider.md: -------------------------------------------------------------------------------- 1 | # Multiple models / Multiple providers 2 | 3 | Declarai allows you to use multiple models from different providers in the same project. 4 | All you need to do is configure seperate Declarai instances for each model and provider. 5 | 6 | ```python 7 | import declarai 8 | 9 | # Configure the first Declarai instance 10 | declarai_gpt35 = declarai.openai(model="gpt-3.5-turbo") 11 | 12 | # Configure the second Declarai instance 13 | declarai_gpt4 = declarai.openai(model="gpt-4") 14 | 15 | # Now use the instances to create tasks 16 | @declarai_gpt35.task 17 | def say_something() -> str: 18 | """ 19 | Say something short to the world 20 | """ 21 | 22 | @declarai_gpt4.task 23 | def say_something() -> str: 24 | """ 25 | Say something short to the world 26 | """ 27 | 28 | ``` 29 | -------------------------------------------------------------------------------- /src/declarai/evals/generation/structured_open_ended.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List 2 | 3 | from declarai import Declarai 4 | 5 | 6 | def structured_open_ended(name: str, skills: List[str]) -> Dict[str, Any]: 7 | """ 8 | Generate a business profile based on the given name and skills 9 | Produce a short bio and a mapping of the skills and where they can be used 10 | :param name: The name of the person 11 | :param skills: The skills of the person 12 | :return: The generated business profile 13 | """ 14 | return Declarai.magic(name=name, skills=skills) 15 | 16 | 17 | structured_open_ended_kwargs = { 18 | "name": "Bob grapes", 19 | "skills": [ 20 | "Management", 21 | "entrepreneurship", 22 | "programming", 23 | "investing", 24 | "Machine Learning", 25 | ], 26 | } 27 | -------------------------------------------------------------------------------- /.github/workflows/publish-docs.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | push: 4 | branches: 5 | - main 6 | permissions: 7 | contents: write 8 | jobs: 9 | deploy: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v3 13 | - uses: actions/setup-python@v4 14 | with: 15 | python-version: 3.8 16 | - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV 17 | - uses: actions/cache@v3 18 | with: 19 | key: mkdocs-material-${{ env.cache_id }} 20 | path: .cache 21 | restore-keys: | 22 | mkdocs-material- 23 | - run: pip install -r requirements-docs.txt 24 | - run: pip install git+https://${{ secrets.DECLARAI_MKDOCSTRINGS_INSIDERS_TOKEN }}@github.com/pawamoy-insiders/mkdocstrings-python.git@1.6.0.1.4.0 25 | - run: mkdocs gh-deploy --force 26 | -------------------------------------------------------------------------------- /src/declarai/python_parser/docstring_parsers/types.py: -------------------------------------------------------------------------------- 1 | from abc import ABC # pylint: disable=E0611 2 | 3 | from declarai.python_parser.types import ( 4 | DocstringFreeform, 5 | DocstringParams, 6 | DocstringReturn, 7 | ) 8 | 9 | 10 | class BaseDocStringParser(ABC): 11 | """ 12 | Base class for docstring parsers. 13 | """ 14 | 15 | @property 16 | def freeform(self) -> DocstringFreeform: 17 | """ 18 | Return the freeform docstring 19 | """ 20 | raise NotImplementedError() 21 | 22 | @property 23 | def params(self) -> DocstringParams: 24 | """ 25 | Return the params/arguments docstring 26 | """ 27 | raise NotImplementedError() 28 | 29 | @property 30 | def returns(self) -> DocstringReturn: 31 | """ 32 | Return the return docstring 33 | """ 34 | raise NotImplementedError() 35 | -------------------------------------------------------------------------------- /src/declarai/python_parser/types.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Optional, Tuple, TypeVar 2 | 3 | ParamName = TypeVar("ParamName", bound=str) 4 | ParamDoc = TypeVar("ParamDoc", bound=str) 5 | TypeName = TypeVar("TypeName", bound=str) 6 | ReturnName = TypeVar("ReturnName", bound=str) 7 | 8 | DocstringFreeform = TypeVar("DocstringFreeform", bound=str) 9 | DocstringParams = Dict[ParamName, ParamDoc] 10 | DocstringReturn = Tuple[ReturnName, TypeName] 11 | 12 | ArgName = TypeVar("ArgName", bound=str) 13 | ArgType = TypeVar("ArgType") 14 | 15 | 16 | AnnotatedType = TypeVar("AnnotatedType") 17 | 18 | 19 | class SignatureReturn: 20 | def __init__( 21 | self, 22 | name: Optional[str] = None, 23 | str_schema: Optional[str] = None, 24 | type_: Optional[AnnotatedType] = None, 25 | ): 26 | self.name = name 27 | self.str_schema = str_schema 28 | self.type_ = type_ 29 | -------------------------------------------------------------------------------- /docs/beginners-guide/simple-task.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide: 3 | - footer 4 | --- 5 | 6 | # Simple task :material-flash: 7 | The simplest Declarai usage is a function decorated with `@task`: 8 | 9 | ```py 10 | import declarai 11 | 12 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 13 | 14 | @gpt_35.task 15 | def say_something() -> str: 16 | """ 17 | Say something short to the world 18 | """ 19 | 20 | print(say_something()) 21 | 22 | > "Spread love and kindness to make the world a better place." 23 | ``` 24 | In **Declarai**, The docstring represents the task's description and is used to generate the prompt. 25 | 26 | By explaining what you want the task to do, the model will be able to understand it and reply with the proper result. 27 | 28 | 29 | 30 |
31 | 32 | Next 33 | 34 |
35 | -------------------------------------------------------------------------------- /src/declarai/operators/utils.py: -------------------------------------------------------------------------------- 1 | import jinja2 2 | 3 | 4 | def can_be_jinja(string: str) -> bool: 5 | """ 6 | Checks if a string can be compiled using the jinja2 template engine. 7 | """ 8 | if "{{" in string or "{%" in string or "{#" in string: 9 | try: 10 | jinja2.Template(string) 11 | return True 12 | except jinja2.exceptions.TemplateSyntaxError: 13 | return False 14 | else: 15 | return False 16 | 17 | 18 | def format_prompt_msg(_string: str, **kwargs) -> str: 19 | """ 20 | Formats a string using the jinja2 template engine if possible, otherwise uses the python string format. 21 | Args: 22 | _string: The string to format 23 | **kwargs: The kwargs to pass to the template 24 | 25 | Returns: The formatted string 26 | """ 27 | if can_be_jinja(_string): 28 | return jinja2.Template(_string).render(**kwargs) 29 | else: 30 | return _string.format(**kwargs) 31 | -------------------------------------------------------------------------------- /tests/operators/test_utils.py: -------------------------------------------------------------------------------- 1 | from declarai.operators.utils import can_be_jinja, format_prompt_msg 2 | 3 | 4 | # Tests for can_be_jinja function 5 | def test_can_be_jinja_valid(): 6 | assert can_be_jinja("Hello {{ name }}") == True 7 | 8 | 9 | def test_can_be_jinja_invalid(): 10 | assert can_be_jinja("Hello {{ name") == False 11 | 12 | 13 | def test_can_be_jinja_no_jinja_syntax(): 14 | assert can_be_jinja("Hello name") == False 15 | 16 | 17 | # Tests for format_prompt_msg function 18 | def test_format_prompt_msg_valid_jinja(): 19 | assert format_prompt_msg("Hello {{ name }}", name="John") == "Hello John" 20 | 21 | 22 | def test_format_prompt_msg_invalid_jinja(): 23 | assert format_prompt_msg("Hello {{ name", name="John") == "Hello { name" 24 | 25 | 26 | def test_format_prompt_msg_python_format(): 27 | assert format_prompt_msg("Hello {name}", name="John") == "Hello John" 28 | 29 | 30 | def test_format_prompt_msg_no_format(): 31 | assert format_prompt_msg("Hello name") == "Hello name" 32 | -------------------------------------------------------------------------------- /docs/features/chat/debugging-chat.md: -------------------------------------------------------------------------------- 1 | # Debugging Chat :bug: 2 | 3 | Similarly to debugging tasks, understanding the prompts being sent to the llm is crucial to debugging chatbots. 4 | Declarai exposes the `compile` method for chat instances as well! 5 | 6 | ## Compiling chat 7 | ```py 8 | import declarai 9 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 10 | 11 | @gpt_35.experimental.chat 12 | class SQLBot: 13 | """ 14 | You are a sql assistant. You help with SQL queries with one-line answers. 15 | """ 16 | greeting = "Hello, I am a SQL assistant. How can I assist you today?" 17 | 18 | sql_bot = SQLBot() 19 | print(sql_bot.compile()) 20 | ``` 21 | ```py 22 | > { 23 | 'messages': 24 | [ 25 | "system: You are a sql assistant. You help with SQL queries with one-line answers.", 26 | "assistant: Hello, I am a SQL assistant. How can I assist you today?" 27 | ] 28 | } 29 | ``` 30 | Wonderful right? We can view the chatbot's messages in the format they will be sent to the language model. 31 | -------------------------------------------------------------------------------- /src/declarai/memory/base.py: -------------------------------------------------------------------------------- 1 | """ 2 | Base class for the memory module. 3 | """ 4 | from abc import ABC, abstractmethod # pylint: disable=E0611 5 | from typing import List 6 | 7 | from declarai.operators import Message 8 | 9 | 10 | class BaseChatMessageHistory(ABC): 11 | """ 12 | Abstract class to store the chat message history. 13 | 14 | See `ChatMessageHistory` for default implementation. 15 | 16 | """ 17 | 18 | @property 19 | @abstractmethod 20 | def history(self) -> List[Message]: 21 | """ 22 | Return the chat message history 23 | 24 | Returns: 25 | List of Message objects 26 | """ 27 | 28 | @abstractmethod 29 | def add_message(self, message: Message) -> None: 30 | """ 31 | Add a Message object to the state. 32 | 33 | Args: 34 | message: Message object to add to the state 35 | """ 36 | 37 | @abstractmethod 38 | def clear(self) -> None: 39 | """ 40 | Remove all messages from the state 41 | """ 42 | -------------------------------------------------------------------------------- /src/declarai/memory/in_memory.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the in-memory implementation of the chat message history. 3 | """ 4 | from typing import List 5 | 6 | from pydantic.main import BaseModel 7 | 8 | from declarai.operators import Message 9 | 10 | from .base import BaseChatMessageHistory 11 | 12 | 13 | class InMemoryMessageHistory(BaseChatMessageHistory, BaseModel): 14 | """ 15 | This memory implementation stores all messages in memory in a list. 16 | """ 17 | 18 | messages: List[Message] = [] 19 | 20 | @property 21 | def history(self) -> List[Message]: 22 | """ 23 | Returns the list of messages stored in memory. 24 | :return: List of messages 25 | """ 26 | return self.messages 27 | 28 | def add_message(self, message: Message) -> None: 29 | """ 30 | Adds a message to the list of messages stored in memory. 31 | :param message: the message content and role 32 | """ 33 | self.messages.append(message) 34 | 35 | def clear(self) -> None: 36 | self.messages = [] 37 | -------------------------------------------------------------------------------- /tests/orchestrator/test_future_llm_task.py: -------------------------------------------------------------------------------- 1 | # from unittest.mock import MagicMock 2 | # 3 | # from declarai.orchestrator.future_llm_task import FutureTask 4 | # 5 | # 6 | # def test_future_llm_task(): 7 | # exec_func = MagicMock() 8 | # exec_func.return_value = "output-value" 9 | # kwargs = { 10 | # "input": "input-value", 11 | # "output": "output-value", 12 | # } 13 | # compiled_template = "{input} | {output}" 14 | # populated_prompt = "input-value | output-value" 15 | # 16 | # future_llm_task = FutureTask( 17 | # exec_func=exec_func, 18 | # kwargs=kwargs, 19 | # compiled_template=compiled_template, 20 | # populated_prompt=populated_prompt, 21 | # ) 22 | # 23 | # task_res = future_llm_task() 24 | # assert task_res == "output-value" 25 | # exec_func.assert_called_with(populated_prompt) 26 | # 27 | # assert future_llm_task.populated_prompt == populated_prompt 28 | # 29 | # assert future_llm_task.compiled_template == compiled_template 30 | # 31 | # assert future_llm_task.task_kwargs == kwargs 32 | -------------------------------------------------------------------------------- /tests/python_parser/test_magic_parser.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | from unittest.mock import MagicMock 3 | 4 | from declarai.python_parser.magic_parser import Magic, extract_magic_args 5 | 6 | magic = MagicMock() 7 | 8 | 9 | def test_magic_parser(): 10 | """ 11 | TODO: This doesn't currently support aliases in the magic function, only string literals 12 | """ 13 | 14 | def mock_magic_parser_function(arg: str): 15 | return magic( 16 | "return_name", 17 | task_desc="This is a task description", 18 | input_desc={"arg": "This is an argument desc"}, 19 | output_desc="This is an output desc", 20 | arg=arg, 21 | ) 22 | 23 | code = inspect.getsource(mock_magic_parser_function) 24 | _magic = extract_magic_args(code) 25 | 26 | assert isinstance(_magic, Magic) 27 | assert _magic.return_name == "return_name" 28 | assert _magic.task_desc == "This is a task description" 29 | assert _magic.input_desc == {"arg": "This is an argument desc"} 30 | assert _magic.output_desc == "This is an output desc" 31 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | # .coveragerc to control coverage.py 2 | 3 | [run] 4 | omit = 5 | src/declarai/operators/shared/templates/** 6 | **/test_* 7 | **/types.py 8 | **/types/*.py 9 | 10 | # This is currently in experimental and not properly covered with tests yet 11 | src/declarai/evals/** 12 | src/declarai/middlewares/** 13 | src/declarai/orchestrator/sequence.py 14 | 15 | [report] 16 | # Regexes for lines to exclude from consideration 17 | exclude_lines = 18 | # Have to re-enable the standard pragma 19 | pragma: no cover 20 | @overload 21 | pass 22 | 23 | # Don't complain about missing debug-only code: 24 | def __repr__ 25 | if self\.debug 26 | 27 | # Don't complain if tests don't hit defensive assertion code: 28 | raise AssertionError 29 | raise NotImplementedError 30 | 31 | # Don't complain if non-runnable code isn't run: 32 | if 0: 33 | if __name__ == .__main__.: 34 | 35 | # Don't complain about abstract methods, they aren't run: 36 | @(abc\.)?abstractmethod 37 | 38 | ignore_errors = True 39 | 40 | [html] 41 | directory = coverage_html_report -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 vendi-ai 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /docs/features/chat/customizing-chat-response.md: -------------------------------------------------------------------------------- 1 | # Customizing the Chat Response 2 | 3 | The default response type of the language model messages is `str`. However, you can overwrite the `send` method to return a different type.
4 | Just like tasks, you can control the type hints by declaring the return type of the `send` method. 5 | 6 | ```py 7 | from typing import List 8 | import declarai 9 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 10 | 11 | @gpt_35.experimental.chat 12 | class SQLBot: 13 | """ 14 | You are a sql assistant.""" 15 | ... 16 | 17 | def send(self, operation: str) -> List[str]: 18 | ... 19 | 20 | sql_bot = SQLBot() 21 | print(sql_bot.send(message="Offer two sql queries that use the 'SELECT' operation")) 22 | > [ 23 | "SELECT * FROM table_name;", 24 | "SELECT column_name FROM table_name;" 25 | ] 26 | ``` 27 | 28 | !!! warning 29 | 30 | As with tasks, the message is sent along with the expected return types. 31 | This means that if not careful, a message conflicting with the expected results could cause weird behavior in the llm responses.
32 | For more best-practices, see [here](../../../best-practices). 33 | -------------------------------------------------------------------------------- /src/declarai/operators/openai_operators/chat_operator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Chat implementation of OpenAI operator. 3 | """ 4 | import logging 5 | from declarai.operators.openai_operators.openai_llm import AzureOpenAILLM, OpenAILLM 6 | from declarai.operators.operator import BaseChatOperator 7 | from declarai.operators.registry import register_operator 8 | 9 | logger = logging.getLogger("OpenAIChatOperator") 10 | 11 | 12 | @register_operator(provider="openai", operator_type="chat") 13 | class OpenAIChatOperator(BaseChatOperator): 14 | """ 15 | Chat implementation of OpenAI operator. This is a child of the BaseChatOperator class. See the BaseChatOperator class for further documentation. 16 | 17 | Attributes: 18 | llm: OpenAILLM 19 | """ 20 | 21 | llm: OpenAILLM 22 | 23 | 24 | @register_operator(provider="azure-openai", operator_type="chat") 25 | class AzureOpenAIChatOperator(OpenAIChatOperator): 26 | """ 27 | Chat implementation of OpenAI operator. This is a child of the BaseChatOperator class. See the BaseChatOperator class for further documentation. 28 | 29 | Attributes: 30 | llm: AzureOpenAILLM 31 | """ 32 | 33 | llm: AzureOpenAILLM 34 | -------------------------------------------------------------------------------- /tests/python_parser/test_function_parser.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from declarai.python_parser.parser import PythonParser, SignatureReturn 4 | 5 | 6 | def test_output_prompt(): 7 | def my_func(a_param: str, b_param: int) -> List[str]: 8 | """ 9 | This is the method docstring 10 | :param a_param: ths param is a string 11 | :param b_param: this param is an integer 12 | :return: This returns a list of strings 13 | """ 14 | 15 | parsed_func = PythonParser(my_func) 16 | assert parsed_func.name == "my_func" 17 | assert parsed_func.signature_kwargs == {"a_param": str, "b_param": int} 18 | return_signature = SignatureReturn( 19 | name="typing.List[str]", 20 | str_schema="List[string]", 21 | type_=List[str], 22 | ) 23 | assert parsed_func.signature_return.name == return_signature.name 24 | assert parsed_func.signature_return.str_schema == return_signature.str_schema 25 | assert parsed_func.signature_return.type_ == return_signature.type_ 26 | assert parsed_func.docstring_freeform == "This is the method docstring" 27 | assert parsed_func.docstring_params == { 28 | "a_param": "ths param is a string", 29 | "b_param": "this param is an integer", 30 | } 31 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.8.5 ; python_version >= "3.8" and python_version < "4.0" 2 | aiosignal==1.3.1 ; python_version >= "3.8" and python_version < "4.0" 3 | async-timeout==4.0.2 ; python_version >= "3.8" and python_version < "4.0" 4 | attrs==23.1.0 ; python_version >= "3.8" and python_version < "4.0" 5 | certifi==2023.7.22 ; python_version >= "3.8" and python_version < "4.0" 6 | charset-normalizer==3.2.0 ; python_version >= "3.8" and python_version < "4.0" 7 | colorama==0.4.6 ; python_version >= "3.8" and python_version < "4.0" and platform_system == "Windows" 8 | frozenlist==1.3.3 ; python_version >= "3.8" and python_version < "4.0" 9 | idna==3.4 ; python_version >= "3.8" and python_version < "4.0" 10 | jsonref==1.1.0 ; python_version >= "3.8" and python_version < "4.0" 11 | multidict==6.0.4 ; python_version >= "3.8" and python_version < "4.0" 12 | openai==0.27.8 ; python_version >= "3.8" and python_version < "4.0" 13 | pydantic==1.10.12 ; python_version >= "3.8" and python_version < "4.0" 14 | requests==2.31.0 ; python_version >= "3.8" and python_version < "4.0" 15 | tqdm==4.65.0 ; python_version >= "3.8" and python_version < "4.0" 16 | typing-extensions==4.7.1 ; python_version >= "3.8" and python_version < "4.0" 17 | urllib3==2.0.4 ; python_version >= "3.8" and python_version < "4.0" 18 | yarl==1.9.2 ; python_version >= "3.8" and python_version < "4.0" 19 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "declarai" 3 | version = "0.1.13" 4 | description = "Declarai, turning Python code into LLM tasks, easy to use, and production-ready." 5 | authors = ["Aviv Almashanu "] 6 | readme = "README.md" 7 | packages = [{include = "declarai", from = "src"}] 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.8" 11 | openai = "^0.27.8" 12 | pydantic = "^1.8.2" 13 | jsonref = "^1.1.0" 14 | wandb = {version = "^0.15.8", optional = true} 15 | jinja2 = "^3.1.2" 16 | 17 | 18 | [tool.poetry.group.dev.dependencies] 19 | pylint = "^2.13.9" 20 | black = "^23.3.0" 21 | isort = "^5.11.5" 22 | pytest = "^7.4.0" 23 | pytest-cov = "^4.1.0" 24 | rich = "^13.4.2" 25 | mkdocs-material = "9.2.0b1" 26 | mkdocstrings = {version = "^0.22.0"} 27 | mkdocs-autorefs = "^0.5.0" 28 | mkdocstrings-python = "^1.5.0" 29 | mkdocstrings-crystal = "0.3.6" 30 | mkdocs-gen-files = "^0.5.0" 31 | mkdocs-literate-nav = "^0.6.0" 32 | mkdocs-section-index = "^0.3.5" 33 | pylint-pydantic = "^0.2.4" 34 | 35 | [tool.poetry.extras] 36 | wandb = ["wandb"] 37 | postgresql = ["psycopg2"] 38 | redis = ["redis"] 39 | mongo = ["pymongo"] 40 | 41 | [build-system] 42 | requires = ["poetry-core"] 43 | build-backend = "poetry.core.masonry.api" 44 | 45 | [tool.isort] 46 | profile = "black" 47 | line_length = 150 48 | 49 | [tool.pylint.format] 50 | max-line-length = "150" 51 | -------------------------------------------------------------------------------- /docs/features/chat/chat-memory/file-memory.md: -------------------------------------------------------------------------------- 1 | # File Memory :material-file: 2 | 3 | For chat that requires a persistent message history, you can use a file to store the conversation history. 4 | 5 | ## Set file memory 6 | 7 | ```py 8 | import declarai 9 | from declarai.memory import FileMessageHistory 10 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 11 | 12 | @gpt_35.experimental.chat(chat_history=FileMessageHistory("sql_bot_history.txt")) # (1)! 13 | class SQLBot: 14 | """ 15 | You are a sql assistant. You help with SQL related questions with one-line answers. 16 | """ 17 | 18 | sql_bot = SQLBot() 19 | ``` 20 | 21 | 22 | 1. file path is not mandatory. If you do not provide a file path, the default file path is stored in a tmp directory. 23 | 24 | We can also initialize the `FileMessageHistory` class with a custom file path. 25 | 26 | 27 | ## Set file memory at runtime 28 | In case you want to set the file memory at runtime, you can use the `set_memory` method. 29 | 30 | ```py 31 | import declarai 32 | from declarai.memory import FileMessageHistory 33 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 34 | 35 | @gpt_35.experimental.chat 36 | class SQLBot: 37 | """ 38 | You are a sql assistant. You help with SQL related questions with one-line answers. 39 | """ 40 | 41 | sql_bot = SQLBot(chat_history=FileMessageHistory("sql_bot_history.txt")) 42 | ``` 43 | -------------------------------------------------------------------------------- /tests/python_parser/docstring_parsers/reST/test_parser.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from declarai.python_parser.docstring_parsers.reST.parser import ReSTDocstringParser 4 | 5 | multiline_docstring = """This is the documentation\nwith multiple lines 6 | :param param1: This is the first parameter 7 | with additional description 8 | :param param2: This is the second parameter 9 | with more details 10 | :return: This is the return value\n with multiple lines 11 | """ 12 | 13 | 14 | @pytest.mark.parametrize( 15 | "docstring, freeform, params, returns", 16 | [ 17 | ( 18 | multiline_docstring, 19 | "This is the documentation\nwith multiple lines", 20 | { 21 | "param1": "This is the first parameter\n with additional description", 22 | "param2": "This is the second parameter\n with more details", 23 | }, 24 | ("", "This is the return value\n with multiple lines"), 25 | ), 26 | ], 27 | ) 28 | def test_reST_docstring_parser(docstring, freeform, params, returns): 29 | parsed_docstring = ReSTDocstringParser(docstring) 30 | assert parsed_docstring.freeform == freeform 31 | for param_name, param_doc in parsed_docstring.params.items(): 32 | assert param_name in params 33 | assert param_doc == params[param_name] 34 | assert parsed_docstring.returns == returns 35 | -------------------------------------------------------------------------------- /docs/features/chat/advanced-initialization.md: -------------------------------------------------------------------------------- 1 | # Initialization :beginner: 2 | 3 | Although using the docstring and class properties is the recommended way to initialize a chatbot, it is not the only way. 4 | In cases were relying on the class docstring and properties is problematic, we allow manually passing the chat arguments to the class constructor.
5 | This takes away from the magic that Declarai provides, but we are aware not everyone may be comfortable with it. 6 | 7 | 8 | ## Initialization by passing parameters 9 | Let's see how we can initialize a chatbot by passing the `system` and `greeting` parameters as arguments. 10 | 11 | ```py 12 | import declarai 13 | 14 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 15 | @gpt_35.experimental.chat 16 | class SQLBot: 17 | ... 18 | 19 | 20 | sql_bot = SQLBot( 21 | system="You are a sql assistant. You help with SQL queries with one-line answers.", 22 | greeting="Hello, I am a SQL assistant. How can I assist you today?", 23 | ) 24 | 25 | print(sql_bot.send("Tell me your preferred SQL operation")) 26 | ``` 27 | 28 | ```py 29 | > "As an SQL assistant, I don't have a preferred SQL operation. I am here to assist with any SQL operation you need help with." 30 | ``` 31 | 32 | 33 | ## Next steps 34 | 35 | You are welcome to explore our [**Features**](../../../features/) section, where you can find the full list of supported features and how to use them. 36 | -------------------------------------------------------------------------------- /src/declarai/operators/message.py: -------------------------------------------------------------------------------- 1 | """ 2 | Message definition for the operators. 3 | """ 4 | from enum import Enum 5 | 6 | from pydantic import BaseModel 7 | 8 | 9 | class MessageRole(str, Enum): 10 | """ 11 | Message role enum for the Message class to indicate the role of the message in the chat. 12 | 13 | Attributes: 14 | system: The message is the system message, usually used as the first message in the chat. 15 | user: Every message that is sent by the user. 16 | assistant: Every message that is sent by the assistant. 17 | function: Every message that is sent by the assistant that is a function call. 18 | """ 19 | 20 | system: str = "system" 21 | user: str = "user" 22 | assistant: str = "assistant" 23 | function: str = "function" 24 | 25 | 26 | class Message(BaseModel): 27 | """ 28 | Represents a message in the chat. 29 | 30 | Args: 31 | message: The message string 32 | role: The role of the message in the chat 33 | 34 | Attributes: 35 | message: The message string 36 | role: The role of the message in the chat 37 | """ 38 | 39 | message: str 40 | role: MessageRole 41 | 42 | def __str__(self): 43 | return self.message 44 | 45 | def __repr__(self): 46 | return f"{self.role.value}: {self.message}" 47 | 48 | def __eq__(self, other): 49 | return self.message == other.message and self.role == other.role 50 | -------------------------------------------------------------------------------- /src/declarai/evals/generation/structured_strict_complex.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from pydantic import BaseModel 4 | 5 | from declarai import Declarai 6 | 7 | 8 | class TimeFrame(BaseModel): 9 | start: int 10 | end: Optional[int] 11 | 12 | 13 | class BusinessTrait(BaseModel): 14 | time_frame: TimeFrame 15 | title: str 16 | description: str 17 | experience: str 18 | 19 | 20 | class Recommendation(BaseModel): 21 | recommender: str 22 | recommendation: str 23 | 24 | 25 | class BusinessProfile(BaseModel): 26 | bio: str 27 | traits: List[BusinessTrait] 28 | previous_jobs: List[str] 29 | recommendations: List[Recommendation] 30 | 31 | 32 | def structured_strict_complex(name: str, skills: List[str]) -> BusinessProfile: 33 | """ 34 | Generate a business profile based on the given name and skills 35 | Produce a short bio and a mapping of the skills and where they can be used 36 | for fields with missing data, you can make up data to fill in the gaps 37 | :param name: The name of the person 38 | :param skills: The skills of the person 39 | :return: The generated business profile 40 | """ 41 | return Declarai.magic(name=name, skills=skills) 42 | 43 | 44 | structured_strict_complex_kwargs = { 45 | "name": "Bob grapes", 46 | "skills": [ 47 | "Management", 48 | "entrepreneurship", 49 | "programming", 50 | "investing", 51 | "Machine Learning", 52 | ], 53 | } 54 | -------------------------------------------------------------------------------- /src/declarai/operators/openai_operators/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Environment level configurations for working with openai and Azure openai providers. 3 | """ 4 | import os 5 | from declarai.core.core_settings import DECLARAI_PREFIX 6 | 7 | OPENAI_API_KEY: str = os.getenv( 8 | f"{DECLARAI_PREFIX}_OPENAI_API_KEY", os.getenv("OPENAI_API_KEY", "") 9 | ) # pylint: disable=E1101 10 | "API key for openai provider." 11 | 12 | OPENAI_MODEL: str = os.getenv( 13 | f"{DECLARAI_PREFIX}_OPENAI_MODEL", "gpt-3.5-turbo" 14 | ) # pylint: disable=E1101 15 | "Model name for openai provider." 16 | 17 | # Azure specific configurations 18 | AZURE_OPENAI_KEY: str = os.getenv( 19 | f"{DECLARAI_PREFIX}_AZURE_OPENAI_KEY", os.getenv("AZURE_OPENAI_KEY", "") 20 | ) # pylint: disable=E1101 21 | "API key for Azure openai provider." 22 | 23 | AZURE_OPENAI_API_BASE: str = os.getenv( 24 | f"{DECLARAI_PREFIX}_AZURE_OPENAI_API_BASE", 25 | os.getenv("AZURE_OPENAI_API_BASE", ""), 26 | ) # pylint: disable=E1101 27 | "Endpoint for Azure openai provider." 28 | 29 | AZURE_API_VERSION: str = os.getenv( 30 | f"{DECLARAI_PREFIX}_AZURE_API_VERSION", 31 | os.getenv("AZURE_API_VERSION", "2023-05-15"), 32 | ) # pylint: disable=E1101 33 | "API version for Azure openai provider." 34 | 35 | 36 | DEPLOYMENT_NAME: str = os.getenv( 37 | f"{DECLARAI_PREFIX}_AZURE_OPENAI_DEPLOYMENT_NAME", 38 | os.getenv("DEPLOYMENT_NAME", ""), 39 | ) # pylint: disable=E1101 40 | "Deployment name for the model in Azure openai provider." 41 | -------------------------------------------------------------------------------- /tests/operators/openai_operators/test_chat_operator.py: -------------------------------------------------------------------------------- 1 | from declarai.operators import OpenAILLM, OpenAIChatOperator 2 | from declarai.python_parser.parser import PythonParser 3 | 4 | 5 | def test_chat_openai_operator(): 6 | openai_operator_class = OpenAIChatOperator 7 | llm = OpenAILLM( 8 | openai_token="test-token", 9 | model="test-model", 10 | ) 11 | 12 | class MyChat: 13 | """ 14 | This is my beloved chat 15 | """ 16 | 17 | parsed = PythonParser(MyChat) 18 | openai_operator_instance = openai_operator_class(parsed=parsed, llm=llm) 19 | assert openai_operator_instance.parsed.name == MyChat.__name__ 20 | compiled = openai_operator_instance.compile(messages=[]) 21 | assert isinstance(compiled, dict) 22 | messages = list(compiled["messages"]) 23 | assert len(messages) == 1 24 | assert messages[0].message == "This is my beloved chat" 25 | assert messages[0].role == "system" 26 | 27 | # def openai_task(): 28 | # ... 29 | # 30 | # parsed = PythonParser(openai_task) 31 | # openai_operator_instance = openai_operator_class(parsed=parsed, llm=llm) 32 | # assert openai_operator_instance.parsed.name == openai_task.__name__ 33 | # compiled = openai_operator_instance.compile() 34 | # assert isinstance(compiled, dict) 35 | # messages = list(compiled["messages"]) 36 | # assert len(messages) == 1 37 | # assert messages[0].message == "\n\n" 38 | # assert messages[0].role == "user" 39 | -------------------------------------------------------------------------------- /tests/operators/openai_operators/test_operator.py: -------------------------------------------------------------------------------- 1 | from declarai.operators import OpenAILLM, OpenAITaskOperator 2 | from declarai.python_parser.parser import PythonParser 3 | 4 | 5 | def test_openai_operator(): 6 | openai_operator_class = OpenAITaskOperator 7 | llm = OpenAILLM( 8 | openai_token="test-token", 9 | model="test-model", 10 | ) 11 | 12 | def openai_task(argument: str) -> str: 13 | """ 14 | This is a test task 15 | :param argument: this is a test argument 16 | :return: this is a test return 17 | """ 18 | 19 | parsed = PythonParser(openai_task) 20 | openai_operator_instance = openai_operator_class(parsed=parsed, llm=llm) 21 | assert openai_operator_instance.parsed.name == openai_task.__name__ 22 | compiled = openai_operator_instance.compile() 23 | assert isinstance(compiled, dict) 24 | messages = list(compiled["messages"]) 25 | assert len(messages) == 1 26 | assert ( 27 | messages[0].message == "This is a test task\nInputs:\nargument: {argument}\n\n" 28 | ) 29 | assert messages[0].role == "user" 30 | 31 | def openai_task(): 32 | ... 33 | 34 | parsed = PythonParser(openai_task) 35 | openai_operator_instance = openai_operator_class(parsed=parsed, llm=llm) 36 | assert openai_operator_instance.parsed.name == openai_task.__name__ 37 | compiled = openai_operator_instance.compile() 38 | assert isinstance(compiled, dict) 39 | messages = list(compiled["messages"]) 40 | assert len(messages) == 1 41 | assert messages[0].message == "\n\n" 42 | assert messages[0].role == "user" 43 | -------------------------------------------------------------------------------- /scripts/gen_ref_pages.py: -------------------------------------------------------------------------------- 1 | """Generate the code reference pages and navigation.""" 2 | 3 | from pathlib import Path 4 | 5 | import mkdocs_gen_files 6 | 7 | nav = mkdocs_gen_files.Nav() 8 | exclude_patterns = [Path("src", "declarai", "evals")] 9 | mod_symbol = '' 10 | for path in sorted(Path("src").rglob("*.py")): 11 | skip = False 12 | for pattern in exclude_patterns: 13 | try: 14 | if path.relative_to(pattern): 15 | print(f"Skipping {path}") 16 | skip = True 17 | break 18 | except ValueError: 19 | pass 20 | if skip: 21 | continue 22 | 23 | module_path = path.relative_to("src").with_suffix("") 24 | doc_path = path.relative_to("src").with_suffix(".md") 25 | full_doc_path = Path("reference", doc_path) 26 | 27 | parts = tuple(module_path.parts) 28 | 29 | if parts[-1] == "__init__": 30 | parts = parts[:-1] 31 | doc_path = doc_path.with_name("index.md") 32 | full_doc_path = full_doc_path.with_name("index.md") 33 | elif parts[-1] == "__main__": 34 | continue 35 | 36 | nav_parts = [f"{mod_symbol} {part}" for part in parts] 37 | nav[tuple(nav_parts)] = doc_path.as_posix() 38 | 39 | with mkdocs_gen_files.open(full_doc_path, "w") as fd: 40 | ident = ".".join(parts) 41 | fd.write(f"::: {ident}") 42 | 43 | mkdocs_gen_files.set_edit_path(full_doc_path, ".." / path) 44 | 45 | with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: 46 | nav_file.writelines(nav.build_literate_nav()) 47 | -------------------------------------------------------------------------------- /docs/contribute.md: -------------------------------------------------------------------------------- 1 | # Contribute :rocket: 2 | 3 | Do you like **Declarai**? 4 | 5 | Spread the word! 6 | 7 | - **Star** :star: the repository 8 | - **Share** the [link](https://github.com/vendi-ai/declarai) to the repository with your friends and colleagues 9 | - **Watch** the github repository to get notified about new releases. 10 | 11 | ## Development :material-source-pull: 12 | Once you have cloned the repository, install the requirements: 13 | 14 | Using `venv` 15 | 16 | === "Poetry" 17 | 18 | 19 | ```console 20 | poetry install 21 | ``` 22 | 23 | 24 | === "Venv" 25 | 26 | 27 | ```console 28 | python -m venv env 29 | source env/bin/activate 30 | python -m pip install --upgrade pip 31 | pip install -r requirements.txt 32 | ``` 33 | 34 | 35 | ## Documentation :material-book-open-variant: 36 | 37 | The documentation is built using [MkDocs](https://www.mkdocs.org/). 38 | To view the documentation locally, run the following command: 39 | 40 |
41 | 42 | ```console 43 | $ cd docs 44 | $ mkdocs serve 45 | INFO - [11:37:30] Serving on http://127.0.0.1:8000/ 46 | ``` 47 |
48 | 49 | ## Testing 50 | The testing framework used is [pytest](https://docs.pytest.org/en/stable/). 51 | To run the tests, run the following command: 52 | 53 | 54 | ```bash 55 | pytest --cov=src 56 | ``` 57 | 58 | ## Pull Requests 59 | It should be extermly easy to contribute to this project. 60 | If you have any ideas, just open an pull request and we will discuss it. 61 | 62 | ```bash 63 | git checkout -b my-new-feature 64 | git commit -am 'Add some feature' 65 | git push origin my-new-feature 66 | ``` 67 | -------------------------------------------------------------------------------- /docs/features/chat/index.md: -------------------------------------------------------------------------------- 1 | # Chatbots :speech_balloon: 2 | 3 | Unlike tasks, chatbots are meant to keep the conversation going.
4 | Instead of executing a single operation, they are built to manage conversation context over time. 5 | 6 | Declarai can be used to create chatbots. The simplest way to do this is to use the `@declarai.experimental.chat` decorator. 7 | 8 | We declare a "system prompt" in the docstring of the class definition.
9 | The system prompt is the initial command that instructs the bot on who they are and what's expected in the conversation. 10 | 11 | 12 | ```py 13 | import declarai 14 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 15 | 16 | @gpt_35.experimental.chat 17 | class SQLBot: 18 | """ 19 | You are a sql assistant. You help with SQL related questions 20 | """ # (1)! 21 | ``` 22 | 23 | 1. The docstring represents the chatbot's description and is used to generate the prompt. 24 | 25 | ```py 26 | sql_bot = SQLBot() 27 | sql_bot.send("When should I use a LEFT JOIN?") # (1)! 28 | 29 | > "You should use a LEFT JOIN when you want to return all rows from the left table, and the matched rows from the right table." 30 | ``` 31 | 32 | 1. The created bot exposes a `send` method, by which you can interact and send messages. 33 | Every call to send results with a response from the bot. 34 | 35 | 36 | !!! tip 37 | You can also declare the chatbot system prompt by doing the following 38 | ```py 39 | @declarai.experimental.chat 40 | class SQLBot: 41 | pass 42 | sql_bot = SQLBot(system="You are a sql assistant. You help with SQL related questions with one-line answers.") 43 | ``` 44 | -------------------------------------------------------------------------------- /tests/operators/test_operator_resolver.py: -------------------------------------------------------------------------------- 1 | import os 2 | from unittest.mock import patch 3 | 4 | import pytest 5 | 6 | from declarai.operators import ( 7 | LLMSettings, 8 | resolve_operator, 9 | resolve_llm, 10 | AzureOpenAITaskOperator, 11 | ) 12 | from declarai.operators.openai_operators import OpenAIError, OpenAITaskOperator 13 | 14 | 15 | def test_resolve_openai_operator_with_token(): 16 | kwargs = {"openai_token": "test_token"} 17 | llm = resolve_llm(provider="openai", model="davinci", **kwargs) 18 | operator = resolve_operator(llm_instance=llm, operator_type="task") 19 | assert operator == OpenAITaskOperator 20 | assert llm.model == "davinci" 21 | assert llm.api_key == kwargs["openai_token"] 22 | 23 | 24 | @patch( 25 | "declarai.operators.openai_operators.openai_llm.OPENAI_API_KEY", 26 | "test_token", 27 | ) 28 | def test_resolve_openai_operator_without_token(): 29 | llm = resolve_llm(provider="openai", model="davinci") 30 | operator = resolve_operator(llm, operator_type="task") 31 | assert operator == OpenAITaskOperator 32 | 33 | 34 | def test_resolve_openai_operator_no_token_raises_error(): 35 | with pytest.raises(OpenAIError): 36 | llm = resolve_llm(provider="openai", model="davinci") 37 | resolve_operator(llm, operator_type="task") 38 | 39 | 40 | def test_resolve_azure_operator(): 41 | llm = resolve_llm( 42 | provider="azure-openai", 43 | model="test", 44 | azure_openai_key="123", 45 | azure_openai_api_base="456", 46 | ) 47 | operator = resolve_operator(llm, operator_type="task") 48 | assert operator == AzureOpenAITaskOperator 49 | -------------------------------------------------------------------------------- /docs/features/planning-future-tasks.md: -------------------------------------------------------------------------------- 1 | ## Plan task :material-airplane-clock: 2 | Once you have defined your task, you can create a plan for it that is already populated with the real values of the parameters. 3 | 4 | The plan is an object you call and get the results. This is very helpful when you want to populate the task with the real values of the parameters but delay the execution of it. 5 | 6 | ```py 7 | import declarai 8 | 9 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 10 | 11 | @gpt_35.task 12 | def say_something_about_movie(movie: str) -> str: 13 | """ 14 | Say something short about the following movie 15 | :param movie: The movie name 16 | """ 17 | 18 | return declarai.magic(movie) 19 | 20 | plan = say_something_about_movie.plan(movie="Avengers") 21 | 22 | print(plan) 23 | > # 24 | 25 | 26 | # Execute the task by calling the plan 27 | plan() 28 | > ['I liked the action-packed storyline and the epic battle scenes.', 29 | "I didn't like the lack of character development for some of the Avengers."] 30 | ``` 31 | 32 | 33 | !!! warning "Important" 34 | The plan is an object you call and get the results. This is very helpful when you want to populate the task with the real values of the parameters but delay the execution of it. 35 | If you just want to execute the task, you can call the task directly. 36 | 37 | ```py 38 | res = say_something_about_movie(movie="Avengers") 39 | 40 | > ['I liked the action-packed storyline and the epic battle scenes.', 41 | "I didn't like the lack of character development for some of the Avengers."] 42 | ``` 43 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: [pull_request] 4 | 5 | jobs: 6 | pre-commit: 7 | permissions: 8 | pull-requests: write 9 | 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v2 14 | 15 | - name: Set up Python 3.8 16 | uses: actions/setup-python@v2 17 | with: 18 | python-version: 3.8 19 | 20 | - name: Install dependencies 21 | run: | 22 | python -m pip install --upgrade pip 23 | pip install poetry pre-commit 24 | poetry install 25 | 26 | - name: Run pre-commit 27 | run: pre-commit run --all-files 28 | 29 | test: 30 | permissions: 31 | pull-requests: write 32 | 33 | runs-on: ubuntu-latest 34 | 35 | steps: 36 | - uses: actions/checkout@v2 37 | 38 | - name: Set up Python 3.8 39 | uses: actions/setup-python@v2 40 | with: 41 | python-version: 3.8 42 | 43 | - name: Install dependencies 44 | run: | 45 | python -m pip install --upgrade pip 46 | pip install poetry 47 | poetry install 48 | 49 | - name: Run tests with pytest 50 | run: | 51 | poetry run pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=src tests | tee pytest-coverage.txt 52 | 53 | - name: Upload coverage to GitHub Artifacts 54 | uses: actions/upload-artifact@v2 55 | with: 56 | name: coverage 57 | path: coverage.xml 58 | 59 | - name: Pytest coverage comment 60 | if: github.event.pull_request.head.repo.full_name == github.repository 61 | uses: MishaKav/pytest-coverage-comment@main 62 | with: 63 | pytest-coverage-path: ./pytest-coverage.txt 64 | junitxml-path: ./pytest.xml 65 | -------------------------------------------------------------------------------- /docs/integrations/index.md: -------------------------------------------------------------------------------- 1 | # Integrations 2 | 3 | Declarai comes with minimal dependencies out of the box, to keep the core of the library clean and simple. 4 | If you would like to extend the functionality of Declarai, you can install one of the following integrations. 5 | 6 | ## [Wandb](https://wandb.ai/site) 7 | 8 | Weights & Biases is a popular tool for tracking machine learning experiments. 9 | Recently they have provided an API for their tracking prompts in their platform. 10 | The platform has a free tier which you can use to experiment! 11 | 12 | ```bash 13 | pip install declarai[wandb] 14 | ``` 15 | 16 | !!! info 17 | 18 | To use this integration you will need to create an account at wandb. Once created, 19 | you can create a new project and get your API key from the settings page. 20 | 21 | 22 | Once set up, you can use the `WandDBMonitorCreator` to track your prompts in the platform. 23 | 24 | ```python 25 | from typing import Dict 26 | import declarai 27 | from declarai.middleware import WandDBMonitorCreator 28 | 29 | 30 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 31 | 32 | WandDBMonitor = WandDBMonitorCreator( 33 | name="", 34 | project="", 35 | key="", 36 | ) 37 | 38 | 39 | @gpt_35.task(middlewares=[WandDBMonitor]) 40 | def extract_info(text: str) -> Dict[str, str]: 41 | """ 42 | Extract the phone number, name and email from the provided text 43 | :param text: content to extract the info from 44 | :return: The info extracted from the text 45 | """ 46 | return declarai.magic(text=text) 47 | ``` 48 | The tracked prompts should look like this: 49 |

50 | WeightsAndBiases-dashboard 51 |

52 | -------------------------------------------------------------------------------- /docs/css/mkdocstrings.css: -------------------------------------------------------------------------------- 1 | /* Indentation. */ 2 | div.doc-contents:not(.first) { 3 | padding-left: 25px; 4 | border-left: .05rem solid var(--md-typeset-table-color); 5 | } 6 | 7 | /* Mark external links as such. */ 8 | a.external::after, 9 | a.autorefs-external::after { 10 | /* https://primer.style/octicons/arrow-up-right-24 */ 11 | mask-image: url('data:image/svg+xml,'); 12 | content: ' '; 13 | 14 | display: inline-block; 15 | vertical-align: middle; 16 | position: relative; 17 | 18 | height: 1em; 19 | width: 1em; 20 | background-color: var(--md-typeset-a-color); 21 | } 22 | 23 | a.external:hover::after, 24 | a.autorefs-external:hover::after { 25 | background-color: var(--md-accent-fg-color); 26 | } 27 | 28 | /* Mark external links as such (also in nav) */ 29 | a.external:hover::after, a.md-nav__link[href^="https:"]:hover::after { 30 | /* https://primer.style/octicons/link-external-16 */ 31 | background-image: url('data:image/svg+xml,'); 32 | height: 0.8em; 33 | width: 0.8em; 34 | margin-left: 0.2em; 35 | content: ' '; 36 | display: inline-block; 37 | } 38 | -------------------------------------------------------------------------------- /tests/orchestrator/test_task_orchestrator.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock 2 | 3 | from declarai.python_parser.parser import PythonParser 4 | from declarai.task import Task 5 | 6 | 7 | def test_task(): 8 | operator = MagicMock() 9 | instantiated_operator = MagicMock() 10 | instantiated_operator.streaming = False 11 | operator.return_value = instantiated_operator 12 | 13 | instantiated_operator.compile.return_value = "compiled_result" 14 | llm_response = MagicMock() 15 | llm_response.response = "predicted_result" 16 | instantiated_operator.predict.return_value = llm_response 17 | 18 | def test_task() -> str: 19 | pass 20 | 21 | instantiated_operator.parse_output.return_value = PythonParser(test_task).parse( 22 | llm_response.response 23 | ) 24 | 25 | task = Task(instantiated_operator) 26 | assert task.compile() == "compiled_result" 27 | 28 | # TODO: Implement test when plan is implemented 29 | # task_orchestrator.plan() 30 | 31 | res = task() 32 | assert res == "predicted_result" 33 | 34 | res = task(llm_params={"temperature": 0.5}) 35 | instantiated_operator.predict.assert_called_with(llm_params={"temperature": 0.5}) 36 | 37 | 38 | def test_task_streaming(): 39 | operator = MagicMock() 40 | instantiated_operator = MagicMock() 41 | instantiated_operator.streaming = True 42 | operator.return_value = instantiated_operator 43 | 44 | instantiated_operator.compile.return_value = "compiled_result" 45 | llm_response = MagicMock() 46 | llm_response.response = "predicted_result" 47 | instantiated_operator.predict.return_value = [llm_response] 48 | 49 | def test_task() -> str: 50 | pass 51 | 52 | task = Task(instantiated_operator) 53 | assert list(task()) == [llm_response] 54 | -------------------------------------------------------------------------------- /docs/features/chat/chat-memory/redis-memory.md: -------------------------------------------------------------------------------- 1 | 2 | # Redis Memory :material-database: 3 | 4 | For chat that requires a fast and scalable message history, you can use a Redis database to store the conversation history. 5 | 6 | ## Set Redis memory 7 | 8 | ```py 9 | import declarai 10 | from declarai.memory import RedisMessageHistory 11 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 12 | 13 | @gpt_35.experimental.chat( 14 | chat_history=RedisMessageHistory( 15 | session_id="unique_chat_id", 16 | url="redis://localhost:6379/0" 17 | ) 18 | ) # (1)! 19 | class SQLBot: 20 | """ 21 | You are a sql assistant. You help with SQL related questions with one-line answers. 22 | """ 23 | 24 | sql_bot = SQLBot() 25 | ``` 26 | 27 | 1. The `url` parameter specifies the connection details for the Redis server. Replace `localhost` and `6379` with your specific Redis connection details. The `session_id` parameter uniquely identifies the chat session for which the history is being stored. 28 | 29 | We can also initialize the `RedisMessageHistory` class with custom connection details. 30 | 31 | ## Set Redis memory at runtime 32 | 33 | In case you want to set the Redis memory at runtime, you can use the `set_memory` method. 34 | 35 | ```py 36 | import declarai 37 | from declarai.memory import RedisMessageHistory 38 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 39 | 40 | @gpt_35.experimental.chat 41 | class SQLBot: 42 | """ 43 | You are a sql assistant. You help with SQL related questions with one-line answers. 44 | """ 45 | 46 | sql_bot = SQLBot(chat_history=RedisMessageHistory(session_id="unique_chat_id", url="redis://localhost:6379/0")) 47 | ``` 48 | 49 | ## Dependencies 50 | 51 | Make sure to install the following dependencies before using Redis memory. 52 | 53 | ```bash 54 | pip install declarai[redis] 55 | ``` 56 | -------------------------------------------------------------------------------- /src/declarai/middleware/internal/log_middleware.py: -------------------------------------------------------------------------------- 1 | """ 2 | Logger Middleware 3 | """ 4 | import logging 5 | from time import time 6 | 7 | from declarai._base import TaskType 8 | from declarai.middleware.base import TaskMiddleware 9 | 10 | logger = logging.getLogger("PromptLogger") 11 | 12 | 13 | class LoggingMiddleware(TaskMiddleware): 14 | """ 15 | Creates a Simple logging middleware for a given task. 16 | 17 | Example: 18 | ```py 19 | @openai.task(middlewares=[LoggingMiddleware]) 20 | def generate_a_poem(title: str): 21 | ''' 22 | Generate a poem based on the given title 23 | :return: The generated poem 24 | ''' 25 | return declarai.magic("poem", title) 26 | ``` 27 | """ 28 | 29 | start_time: time = None 30 | 31 | def before(self, _): 32 | """ 33 | Before execution of the task, set the start time. 34 | """ 35 | self.start_time = time() 36 | 37 | def after(self, task: TaskType): 38 | """ 39 | After execution of the task, log the task details. 40 | Args: 41 | task: the task to be logged 42 | 43 | Returns: 44 | (Dict[str, Any]): the task details like execution time, task name, template, compiled template, result, time. 45 | 46 | """ 47 | end_time = time() - self.start_time 48 | log_record = { 49 | "task_name": task.__name__, 50 | "llm_model": task.llm_response.model, 51 | "template": str(task.compile()), 52 | "call_kwargs": str(self._kwargs), 53 | "compiled_template": str(task.compile(**self._kwargs)), 54 | "result": task.llm_response.response, 55 | "time": end_time, 56 | } 57 | logger.info(log_record) 58 | print(log_record) 59 | -------------------------------------------------------------------------------- /docs/beginners-guide/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide: 3 | - footer 4 | --- 5 | 6 | # Tutorial - Beginners guide 7 | 8 | This tutorial is a step-by-step guide to using **Declarai**. It walks you through the most basic features of the library. 9 | 10 | Each section gradually builds on the previous one while sections are structured by topic, 11 | so that you can skip to whichever part is relevant to you. 12 | 13 | ## Before we start 14 | 15 | If you haven't already, install the Declarai library as follows: 16 | 17 | ```bash 18 | $ pip install declarai 19 | ``` 20 | !!! info 21 | 22 | For this tutorial you will need an openai token. This token is completely your's and is not shared, stored or managed 23 | anywhere but on your machine! you can see more information about obtaining a token here: [openai](/declarai/src/providers/openai/) 24 | 25 | After installation, open a python file and start with setting up your declarai app: 26 | 27 | Once completed, the rest of the examples in this module should be as simple as copy/paste. 28 | 29 | 30 | 31 | ```python title="declarai_tutorial.py" 32 | import declarai 33 | 34 | gpt_35 = declarai.openai(model="gpt-3.5-turbo", openai_token="") 35 | ``` 36 | 37 | 38 | !!! info 39 | 40 | Do your best to copy, run and edit the code in your editor to really understand how powerful Declarai is. 41 | 42 | 47 | 48 | ## Advanced 49 | 50 | If you feel this tutorial is too easy, feel free to jump to our [**Features**](../features/) section, which covers more complex 51 | topics like middlewares, running evaluations and building multi provider flows. 52 | 53 | We recommend you read the tutorial first, and then the advanced guide if you want to learn more. 54 | -------------------------------------------------------------------------------- /src/declarai/evals/metadata_significance/simple_task_significance.py: -------------------------------------------------------------------------------- 1 | from declarai import Declarai 2 | 3 | simple_task_significance_kwargs = { 4 | "title": "Using LLMs is fun!", 5 | } 6 | 7 | 8 | def generate_a_poem_no_metadata(title: str): 9 | """ 10 | Write a 4 line poem based on the given title 11 | """ 12 | return Declarai.magic(title=title) 13 | 14 | 15 | def generate_a_poem_only_return_type(title: str) -> str: 16 | """ 17 | Write a 4 line poem based on the given title 18 | """ 19 | return Declarai.magic(title=title) 20 | 21 | 22 | def generate_a_poem_only_return_doc(title: str): 23 | """ 24 | Write a 4 line poem based on the given title 25 | :return: The generated poem 26 | """ 27 | return Declarai.magic(title=title) 28 | 29 | 30 | def generate_a_poem_only_return_magic(title: str): 31 | """ 32 | Write a 4 line poem based on the given title 33 | """ 34 | return Declarai.magic("poem", title=title) 35 | 36 | 37 | def generate_a_poem_return_type_return_doc(title: str) -> str: 38 | """ 39 | Write a 4 line poem based on the given title 40 | :return: The generated poem 41 | """ 42 | return Declarai.magic(title=title) 43 | 44 | 45 | def generate_a_poem_return_type_return_magic(title: str) -> str: 46 | """ 47 | Write a 4 line poem based on the given title 48 | """ 49 | return Declarai.magic("poem", title=title) 50 | 51 | 52 | def generate_a_poem_return_doc_return_magic(title: str): 53 | """ 54 | Write a 4 line poem based on the given title 55 | :return: The generated poem 56 | """ 57 | return Declarai.magic("poem", title=title) 58 | 59 | 60 | def generate_a_poem_return_all(title: str) -> str: 61 | """ 62 | Write a 4 line poem based on the given title 63 | :return: The generated poem 64 | """ 65 | return Declarai.magic("poem", title=title) 66 | -------------------------------------------------------------------------------- /docs/features/magic.md: -------------------------------------------------------------------------------- 1 | # Magic 2 | 3 | The Magic callable is an "empty" function that can be used for two main scenarios: 4 | 5 | - A placeholder for typing, so to simplify interaction with static typing without hacing to mark all Declarai functions with `# type: ignore`: 6 | - A replacement for the docstring content, if for some reason you don't want to use the docstring for the task description. 7 | 8 | 9 | ### Magic as a placeholder for typing 10 | 11 | Without magic: 12 | ```python 13 | @openai.task 14 | def suggest_nickname(real_name: str) -> str: # (1)! 15 | """ 16 | Suggest a nickname for a person 17 | :param real_name: The person's real name 18 | :return: A nickname for the person 19 | """ 20 | ``` 21 | 22 | 1. type hinter warning on unused argument `real_name` in function. 23 | 24 | with magic: 25 | ```python 26 | @openai.task 27 | def suggest_nickname(real_name: str) -> str: 28 | """ 29 | Suggest a nickname for a person 30 | :param real_name: The person's real name 31 | :return: A nickname for the person 32 | """ 33 | return declarai.magic(real_name=real_name) # (1)! 34 | ``` 35 | 36 | 1. type hint warning is resolved. 37 | 38 | 39 | ### Replacement for docstring 40 | 41 | In the scenario that you do not wan't to rely on the docstring for prompt generation, you can use the magic function to provide the description and parameters. 42 | 43 | ```python 44 | import declarai 45 | 46 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 47 | 48 | @gpt_35.task 49 | def suggest_nickname(real_name: str) -> str: 50 | return declarai.magic( 51 | real_name=real_name, 52 | description="Suggest a nickname for a person", 53 | params={"real_name": "The person's real name"}, 54 | returns="A nickname for the person", 55 | ) 56 | ``` 57 | 58 | This does take some of Declarai's magic out of the equation, but the result should be all the same. 59 | -------------------------------------------------------------------------------- /docs/features/chat/chat-memory/mongodb-memory.md: -------------------------------------------------------------------------------- 1 | # MongoDB Memory :material-database: 2 | 3 | For chat that requires a persistent and scalable message history, you can use a MongoDB database to store the 4 | conversation history. 5 | 6 | ## Set MongoDB memory 7 | 8 | ```py 9 | import declarai 10 | from declarai.memory import MongoDBMessageHistory 11 | 12 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 13 | 14 | 15 | @gpt_35.experimental.chat( 16 | chat_history=MongoDBMessageHistory( 17 | connection_string="mongodb://localhost:27017/mydatabase", 18 | session_id="unique_chat_id") 19 | ) # (1)! 20 | class SQLBot: 21 | """ 22 | You are a sql assistant. You help with SQL related questions with one-line answers. 23 | """ 24 | 25 | 26 | sql_bot = SQLBot() 27 | ``` 28 | 29 | 1. The `connection_string` parameter specifies the connection details for the MongoDB database. 30 | Replace `localhost`, `27017`, and `mydatabase` with your specific MongoDB connection details. The `session_id` 31 | parameter uniquely identifies the chat session for which the history is being stored. 32 | 33 | ## Set MongoDB memory at runtime 34 | 35 | In case you want to set the MongoDB memory at runtime, you can use the `set_memory` method. 36 | 37 | ```py 38 | import declarai 39 | from declarai.memory import MongoDBMessageHistory 40 | 41 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 42 | 43 | 44 | @gpt_35.experimental.chat 45 | class SQLBot: 46 | """ 47 | You are a sql assistant. You help with SQL related questions with one-line answers. 48 | """ 49 | 50 | 51 | sql_bot = SQLBot(chat_history=MongoDBMessageHistory(connection_string="mongodb://localhost:27017/mydatabase", 52 | session_id="unique_chat_id")) 53 | ``` 54 | 55 | ## Dependencies 56 | 57 | Make sure to install the following dependencies before using MongoDB memory. 58 | 59 | ```bash 60 | pip install declarai[mongodb] 61 | ``` 62 | -------------------------------------------------------------------------------- /docs/features/chat/chat-memory/postgresql-memory.md: -------------------------------------------------------------------------------- 1 | 2 | # PostgreSQL Memory :material-database: 3 | 4 | For chat that requires a persistent message history with the advantages of scalability and robustness, you can use a PostgreSQL database to store the conversation history. 5 | 6 | ## Set PostgreSQL memory 7 | 8 | ```py 9 | import declarai 10 | from declarai.memory import PostgresMessageHistory 11 | 12 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 13 | 14 | 15 | @gpt_35.experimental.chat( 16 | chat_history=PostgresMessageHistory( 17 | connection_string="postgresql://username:password@localhost:5432/mydatabase", 18 | session_id="unique_chat_id") 19 | ) # (1)! 20 | class SQLBot: 21 | """ 22 | You are a sql assistant. You help with SQL related questions with one-line answers. 23 | """ 24 | 25 | sql_bot = SQLBot() 26 | ``` 27 | 28 | 1. The `connection_string` parameter specifies the connection details for the PostgreSQL database. Replace `username`, `password`, `localhost`, `5432`, and `mydatabase` with your specific PostgreSQL connection details. The `session_id` parameter uniquely identifies the chat session for which the history is being stored. 29 | 30 | 31 | ## Set PostgreSQL memory at runtime 32 | 33 | In case you want to set the PostgreSQL memory at runtime, you can use the `set_memory` method. 34 | 35 | ```py 36 | import declarai 37 | from declarai.memory import PostgresMessageHistory 38 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 39 | 40 | @gpt_35.experimental.chat 41 | class SQLBot: 42 | """ 43 | You are a sql assistant. You help with SQL related questions with one-line answers. 44 | """ 45 | 46 | sql_bot = SQLBot(chat_history=PostgresMessageHistory(connection_string="postgresql://username:password@localhost:5432/mydatabase", session_id="unique_chat_id")) 47 | ``` 48 | 49 | ## Dependencies 50 | 51 | Make sure to install the following dependencies before using PostgreSQL memory. 52 | 53 | ```bash 54 | pip install declarai[postgresql] 55 | ``` 56 | -------------------------------------------------------------------------------- /tests/tasks/test_llm_chat.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from unittest.mock import MagicMock, patch 3 | 4 | from declarai import Declarai 5 | from declarai.operators import Message, LLMResponse 6 | 7 | 8 | @patch("declarai.declarai.resolve_llm") 9 | def test_chat(mock_resolve_llm): 10 | llm = MagicMock() 11 | llm.provider = "openai" 12 | llm.streaming = False 13 | llm.predict.return_value = LLMResponse( 14 | response='{"declarai_result": ["1", "2"]}' 15 | ) 16 | mock_resolve_llm.return_value = llm 17 | 18 | 19 | declarai = Declarai(provider="openai", model="gpt-3.5-turbo") 20 | 21 | @declarai.experimental.chat 22 | class MyChat: 23 | """ 24 | This is a test chat. 25 | """ 26 | greeting = "This is a greeting message" 27 | 28 | def send(self) -> List[str]: 29 | ... 30 | 31 | chat = MyChat() 32 | assert chat.system == "This is a test chat." 33 | assert chat.greeting == "This is a greeting message" 34 | assert chat.compile() == dict(messages=[ 35 | Message( 36 | message="This is a test chat./nYour responses should be a JSON structure with a single key named 'declarai_result', nothing else. The expected format is: \"declarai_result\": List[string]", 37 | role="system"), 38 | Message(message="This is a greeting message", role="assistant") 39 | ]) 40 | 41 | assert chat.send("return two string numbers in a list") == ["1", "2"] 42 | 43 | 44 | @patch("declarai.declarai.resolve_llm") 45 | def test_chat_jinja_system(mock_resolve_llm): 46 | llm = MagicMock() 47 | llm.provider = "openai" 48 | mock_resolve_llm.return_value = llm 49 | 50 | declarai = Declarai(provider="openai", model="gpt-3.5-turbo") 51 | 52 | @declarai.experimental.chat 53 | class MyJinjaChat: 54 | """ 55 | This is a test chat about {{ topic }}. 56 | """ 57 | 58 | chat = MyJinjaChat(topic="jinja2") 59 | chat.system = "This is a test chat about jinja2." 60 | -------------------------------------------------------------------------------- /tests/operators/shared/test_output_prompt.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from declarai.operators.openai_operators.task_operator import compile_output_prompt 4 | from declarai.operators.templates import ( 5 | StructuredOutputInstructionPrompt, 6 | compile_output_schema_template, 7 | ) 8 | 9 | 10 | @pytest.mark.parametrize( 11 | "return_name, return_type, return_doc, result", 12 | [ 13 | ("", "", "", ""), 14 | ("foo", "", "", '"foo": '), 15 | ("", "int", "", '"declarai_result": int'), 16 | ("", "", "the foo", '"declarai_result": # the foo'), 17 | ("foo", "int", "", '"foo": int'), 18 | ("foo", "", "the foo", '"foo": # the foo'), 19 | ("", "int", "the foo", '"declarai_result": int # the foo'), 20 | ("foo", "int", "the foo", '"foo": int # the foo'), 21 | ], 22 | ) 23 | def test_output_prompt( 24 | return_name: str, return_type: str, return_doc: str, result: str 25 | ): 26 | output_schema = compile_output_schema_template( 27 | return_name, return_type, return_doc, StructuredOutputInstructionPrompt 28 | ) 29 | replacement_prompt = StructuredOutputInstructionPrompt.format( 30 | return_name=return_name or "declarai_result", output_schema="" 31 | ) 32 | assert output_schema.replace(replacement_prompt, "") == result 33 | 34 | 35 | def test_compile_output_prompt(): 36 | return_name = "return_name" 37 | return_type = "Dict[str, str]" 38 | return_docstring = "The returned value from this function" 39 | 40 | compiled_output_prompt = compile_output_prompt( 41 | return_name, 42 | return_type, 43 | return_docstring, 44 | structured_template=StructuredOutputInstructionPrompt, 45 | ) 46 | formatted_output = ( 47 | '"return_name": Dict[str, str] # The returned value from this function' 48 | ) 49 | replacement_prompt = StructuredOutputInstructionPrompt.format( 50 | return_name=return_name or "declarai_result", output_schema="" 51 | ) 52 | assert compiled_output_prompt.replace(replacement_prompt, "") == formatted_output 53 | -------------------------------------------------------------------------------- /tests/api/test_chat_decorator.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock, patch 2 | 3 | from declarai import Declarai 4 | from declarai.operators import Message, MessageRole 5 | 6 | 7 | @patch("declarai.declarai.resolve_llm") 8 | @patch("declarai.chat.resolve_operator") 9 | def test_chat(mock_chat_resolve_operator, mock_resolve_llm): 10 | operator_class_mock = MagicMock() 11 | operator_instance_mock = MagicMock() 12 | 13 | # Set the .system attribute on the instance returned by the operator mock when called 14 | operator_instance_mock.system = "This is a test chat.\n" 15 | operator_instance_mock.greeting = "This is a greeting message" 16 | operator_class_mock.return_value = operator_instance_mock 17 | 18 | llm = MagicMock() 19 | mock_chat_resolve_operator.return_value = operator_class_mock 20 | mock_resolve_llm.return_value = llm 21 | declarai = Declarai(provider="test", model="test") 22 | 23 | @declarai.experimental.chat 24 | class TestChat: 25 | """ 26 | This is a test chat. 27 | """ 28 | 29 | greeting = "This is a greeting message" 30 | 31 | chat = TestChat() 32 | 33 | assert chat.__name__ == "TestChat" 34 | assert chat.greeting == "This is a greeting message" 35 | assert chat.conversation == [ 36 | Message(message="This is a greeting message", role=MessageRole.assistant) 37 | ] 38 | assert chat.system == "This is a test chat.\n" 39 | 40 | @declarai.experimental.chat 41 | class OverrideChatParams: 42 | """ 43 | This is a test chat. 44 | """ 45 | 46 | greeting = "This is a greeting message" 47 | 48 | chat2 = OverrideChatParams(greeting="New Message") 49 | 50 | assert chat2.__name__ == "OverrideChatParams" 51 | assert chat2.greeting == "New Message" 52 | 53 | @declarai.experimental.chat(system="This is a decorated chat.\n", greeting="This is a greeting message") 54 | class ChatWithParamsDecorated: 55 | ... 56 | 57 | chat3 = ChatWithParamsDecorated() 58 | 59 | assert chat3.system == "This is a decorated chat.\n" 60 | assert chat3.greeting == "This is a greeting message" 61 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | files: src 2 | repos: 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v4.0.1 5 | hooks: 6 | - id: debug-statements 7 | - id: end-of-file-fixer 8 | - id: check-merge-conflict 9 | - id: no-commit-to-branch 10 | - repo: https://github.com/jendrikseipp/vulture # finds unused code 11 | rev: v2.3 12 | hooks: 13 | - id: vulture 14 | args: [ "src", "--min-confidence", "100", "--exclude", "projects/backend/.venv,projects/backend/tests", "--ignore-names", "cls"] 15 | - repo: https://github.com/hadialqattan/pycln # Autoremoves unused imports 16 | rev: "v2.1.1" 17 | hooks: 18 | - id: pycln 19 | stages: [ manual ] 20 | - repo: https://github.com/PyCQA/flake8 21 | rev: 5.0.4 22 | hooks: 23 | - id: flake8 24 | args: 25 | - "--config=.flake8" 26 | #- repo: local 27 | # hooks: 28 | # - id: pylint 29 | # name: pylint 30 | # entry: pylint 31 | # language: system 32 | # types: [python] 33 | # args: ["-rn", "-sn", "--rcfile=.pylintrc", "--fail-on=I"] 34 | # exclude: tests(/\w*)*/functional/|tests/input|tests(/\w*)*data/|doc/ 35 | # - id: isort 36 | # name: isort 37 | # entry: isort 38 | # require_serial: true 39 | # language: python 40 | # types_or: [cython, pyi, python] 41 | # args: ['--filter-files', '--settings-path', 'pyproject.toml'] 42 | ## minimum_pre_commit_version: '2.9.2' 43 | 44 | - repo: https://github.com/psf/black 45 | rev: 22.3.0 46 | hooks: 47 | - id: black 48 | args: ["--config", "pyproject.toml" ] 49 | 50 | - repo: https://github.com/avilaton/add-msg-issue-prefix-hook 51 | rev: v0.0.5 52 | hooks: 53 | - id: add-msg-issue-prefix 54 | args: 55 | - ".git/COMMIT_EDITMSG" 56 | 57 | # Checks for common misspellings and typos 58 | - repo: https://github.com/codespell-project/codespell 59 | rev: "v2.2.1" 60 | hooks: 61 | - id: codespell 62 | 63 | - repo: https://github.com/gitleaks/gitleaks 64 | rev: v8.16.1 65 | hooks: 66 | - id: gitleaks 67 | - repo: https://github.com/codespell-project/codespell 68 | rev: "v2.2.1" 69 | hooks: 70 | - id: codespell 71 | args: 72 | - src/declarai/operators/shared/templates/ 73 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /src/declarai/python_parser/docstring_parsers/reST/parser.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from declarai.python_parser.docstring_parsers.types import BaseDocStringParser 4 | from declarai.python_parser.types import ( 5 | DocstringFreeform, 6 | DocstringParams, 7 | DocstringReturn, 8 | ) 9 | 10 | reST_PARAM_KEY: str = ":param" 11 | reST_RETURN_KEY: str = ":return" 12 | reST_FREEFORM_REGEX = rf"(?s)(.*?)(?=\n{reST_PARAM_KEY}|\n{reST_RETURN_KEY}|$)" 13 | reST_PARAMS_REGEX = ( 14 | rf"(?s)({reST_PARAM_KEY} .*?: .*?)(?=\n{reST_PARAM_KEY}|\n{reST_RETURN_KEY}|$)" 15 | ) 16 | reST_RETURN_REGEX = rf"(?s)({reST_RETURN_KEY}: .*?)($)" 17 | 18 | 19 | class ReSTDocstringParser(BaseDocStringParser): 20 | """ 21 | As recommended by (PEP 287)[https://peps.python.org/pep-0287/], 22 | the recommended docstring format is the reStructuredText format (shortform - reST). 23 | """ 24 | 25 | def __init__(self, docstring: str): 26 | self.docstring = docstring 27 | 28 | @property 29 | def freeform(self) -> DocstringFreeform: 30 | if not self.docstring: 31 | return "" 32 | freeform = re.search(reST_FREEFORM_REGEX, self.docstring).group().strip() 33 | return freeform 34 | 35 | @property 36 | def params(self) -> DocstringParams: 37 | params = [ 38 | param.group().strip() 39 | for param in re.finditer(reST_PARAMS_REGEX, self.docstring) 40 | ] 41 | params_dict = {} 42 | for param in params: 43 | param = param.replace(reST_PARAM_KEY, "").strip() 44 | param_name, doc = param.split(":") 45 | params_dict[param_name] = doc.strip() 46 | return params_dict 47 | 48 | @property 49 | def returns(self) -> DocstringReturn: 50 | if not self.docstring: 51 | return "", "" 52 | matched_returns = re.search(reST_RETURN_REGEX, self.docstring) 53 | if matched_returns: 54 | returns = matched_returns.group().strip() 55 | returns = returns.replace(reST_RETURN_KEY, "").strip() 56 | return_name, return_doc = returns.split(":") 57 | return return_name, return_doc.strip() 58 | 59 | return "", "" 60 | -------------------------------------------------------------------------------- /src/declarai/python_parser/magic_parser.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import textwrap 3 | 4 | 5 | class Magic: 6 | def __init__( 7 | self, 8 | return_name: str = None, 9 | task_desc: str = "", 10 | input_desc: dict = {}, 11 | output_desc: str = "", 12 | ): 13 | self.return_name = return_name 14 | self.task_desc = task_desc 15 | self.input_desc = input_desc 16 | self.output_desc = output_desc 17 | 18 | 19 | def extract_magic_args(code) -> Magic: 20 | # Parse the code into an abstract syntax tree 21 | code = textwrap.dedent(code) 22 | tree = ast.parse(code) 23 | 24 | for node in ast.walk(tree): 25 | if isinstance(node, ast.FunctionDef): 26 | function_node = node 27 | break 28 | else: 29 | raise ValueError("function not found") 30 | 31 | # Find the magic function call 32 | for node in ast.walk(function_node): 33 | if isinstance(node, ast.Call): 34 | if getattr(node.func, "id", None) == "magic": 35 | magic_call = node 36 | break 37 | if getattr(node.func, "attr", None) == "magic": 38 | magic_call = node 39 | break 40 | else: 41 | raise ValueError("magic function call not found") 42 | 43 | # Extract the arguments 44 | if len(magic_call.args) > 0: 45 | try: 46 | return_name = magic_call.args[0].s 47 | except: # noqa 48 | return_name = magic_call.args[0].id 49 | else: 50 | return_name = None 51 | 52 | task_desc = "" 53 | input_desc = {} 54 | output_desc = "" 55 | for kwarg in magic_call.keywords: 56 | if kwarg.arg == "task_desc": 57 | task_desc = kwarg.value.s 58 | elif kwarg.arg == "input_desc": 59 | zipped = zip(kwarg.value.keys, kwarg.value.values) 60 | for k, v in zipped: 61 | input_desc[k.s] = v.s 62 | elif kwarg.arg == "output_desc": 63 | output_desc = kwarg.value.s 64 | 65 | return Magic( 66 | return_name=return_name, 67 | task_desc=task_desc, 68 | input_desc=input_desc, 69 | output_desc=output_desc, 70 | ) 71 | -------------------------------------------------------------------------------- /src/declarai/memory/file.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the FileMessageHistory class, which is used to store chat message history in a local file. 3 | """ 4 | import json 5 | import logging 6 | import tempfile 7 | from pathlib import Path 8 | from typing import Dict, List, Optional 9 | 10 | from ..operators import Message 11 | from .base import BaseChatMessageHistory 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | class FileMessageHistory(BaseChatMessageHistory): 17 | """ 18 | Chat message history that stores history in a local file. 19 | 20 | Args: 21 | file_path: path of the local file to store the messages. if not passed the messages will be stored in a temporary file, 22 | and a warning will be logged. 23 | """ 24 | 25 | def __init__(self, file_path: Optional[str] = None): 26 | super().__init__() 27 | if not file_path: 28 | # Create a temporary file and immediately close it to get its name. 29 | temp = tempfile.NamedTemporaryFile(delete=False) 30 | self.file_path = Path(temp.name) 31 | self.file_path.write_text(json.dumps([])) 32 | logger.warning( 33 | "No file path provided to store the messages. " 34 | f"Messages will be stored in a temporary file path: {self.file_path}" 35 | ) 36 | else: 37 | self.file_path = Path(file_path) 38 | 39 | if not self.file_path.exists(): 40 | self.file_path.touch() 41 | self.file_path.write_text(json.dumps([])) 42 | 43 | @property 44 | def history(self) -> List[Message]: 45 | """Retrieve the messages from the local file""" 46 | items: List[Dict] = json.loads(self.file_path.read_text()) 47 | messages = [Message.parse_obj(obj=dict_item) for dict_item in items] 48 | return messages 49 | 50 | def add_message(self, message: Message) -> None: 51 | """Append the message to the record in the local file""" 52 | messages = self.history.copy() 53 | messages.append(message) 54 | messages_dict = [msg.dict() for msg in messages] 55 | self.file_path.write_text(json.dumps(messages_dict)) 56 | 57 | def clear(self) -> None: 58 | """Clear session memory from the local file""" 59 | self.file_path.write_text(json.dumps([])) 60 | -------------------------------------------------------------------------------- /src/declarai/evals/logical_tasks/sequence.py: -------------------------------------------------------------------------------- 1 | # from typing import List 2 | # 3 | # from declarai import Declarai 4 | # from declarai.orchestrator.sequence import Sequence 5 | # 6 | # 7 | # def suggest_title(question: str) -> str: 8 | # """ 9 | # Given a question from our customer support, suggest a title for it 10 | # :param question: the provided question 11 | # :return: The title suggested for the question 12 | # """ 13 | # return Declarai.magic("question_title", question) 14 | # 15 | # 16 | # def route_to_department(title: str, departments: List[str]) -> str: 17 | # """ 18 | # Given a question title, route it to the relevant department 19 | # :param title: A title generated for the question 20 | # :param departments: The departments to route the question to 21 | # :return: The department that the question should be routed to 22 | # """ 23 | # return Declarai.magic("department", title, departments) 24 | # 25 | # 26 | # def suggest_department_answers(title: str, department: str) -> List[str]: 27 | # """ 28 | # Given a question and a department, suggest 2 answers from the department's knowledge base 29 | # :param title: The question title to suggest answers for 30 | # :param department: The department to suggest answers from 31 | # :return: The suggested answers 32 | # """ 33 | # return Declarai.magic("answers", title, department) 34 | # 35 | # 36 | # available_departments = ["sales", "support", "billing"] 37 | # 38 | # 39 | # def chain_of_thought(declarai: Declarai, question: str): 40 | # suggested_title_task = declarai.task(suggest_title) 41 | # selected_department_task = declarai.task(route_to_department) 42 | # department_answers_task = declarai.task(suggest_department_answers) 43 | # 44 | # suggested_title = suggested_title_task.plan(question=question) 45 | # selected_department = selected_department_task.plan( 46 | # title=suggested_title, departments=available_departments 47 | # ) 48 | # suggested_answers = department_answers_task.plan( 49 | # title=suggested_title, department=selected_department 50 | # ) 51 | # 52 | # return Sequence(suggested_answers, reduce_strategy="CoT") 53 | # 54 | # 55 | # chain_of_thought_kwargs = { 56 | # "question": "Hey, I'm not using my account anymore. " 57 | # "I've already talked to customer support and am not interested in it anymore. " 58 | # "Who should I talk to about this?" 59 | # } 60 | -------------------------------------------------------------------------------- /tests/python_parser/annotations/test_type_annotation_to_schema.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List 2 | 3 | import pytest 4 | from pydantic import BaseModel, Field 5 | 6 | from declarai.python_parser.type_annotation_to_schema import ( 7 | type_annotation_to_str_schema, 8 | ) 9 | 10 | 11 | class MockSimpleModel(BaseModel): 12 | name: str 13 | numbers: List[int] 14 | 15 | 16 | class MockComplexModelArray(BaseModel): 17 | name: str 18 | children: List[MockSimpleModel] 19 | 20 | 21 | class MockComplexModelDict(BaseModel): 22 | name: str 23 | id: str = Field(description="Describe this field") 24 | children: Dict[str, MockSimpleModel] 25 | 26 | 27 | @pytest.mark.parametrize( 28 | "type_, result", 29 | [ 30 | (str, "str"), 31 | (int, "int"), 32 | (float, "float"), 33 | (bool, "bool"), 34 | (List[str], "List[string]"), 35 | (Dict[str, str], "Dict[string, string]"), 36 | (Dict[str, int], "Dict[string, integer]"), 37 | (Dict[float, bool], "Dict[number, boolean]"), 38 | ( 39 | MockSimpleModel, 40 | '{{\n "name": "string",\n "numbers": [\n "integer"\n ]\n}}', 41 | ), 42 | (List[MockSimpleModel], "List[{{'name': 'string', 'numbers': ['integer']}}]"), 43 | ( 44 | Dict[str, MockSimpleModel], 45 | "Dict[string, {{'name': 'string', 'numbers': ['integer']}}]", 46 | ), 47 | ( 48 | MockComplexModelArray, 49 | "{{\n" 50 | ' "name": "string",\n' 51 | ' "children": [\n' 52 | " {{\n" 53 | ' "name": "string",\n' 54 | ' "numbers": [\n' 55 | ' "integer"\n' 56 | " ]\n" 57 | " }}\n" 58 | " ]\n" 59 | "}}", 60 | ), 61 | ( 62 | MockComplexModelDict, 63 | "{{\n" 64 | ' "name": "string",\n' 65 | ' "id": "string - Describe this field",\n' 66 | ' "children": {{\n' 67 | ' "name": "string",\n' 68 | ' "numbers": [\n' 69 | ' "integer"\n' 70 | " ]\n" 71 | " }}\n" 72 | "}}", 73 | ), 74 | ], 75 | ) 76 | def test_type_hint_resolver(type_: Any, result: str): 77 | assert type_annotation_to_str_schema(type_) == result 78 | -------------------------------------------------------------------------------- /docs/features/language-model-parameters.md: -------------------------------------------------------------------------------- 1 | # Control LLM params 2 | Language models have various parameters that can be tuned to control the output of the model. To see the parameters for a specific LLM, see the corresponding [provider](../providers/index.md). 3 | 4 | Here is an example of how to control these parameters in a declarai task/chat: 5 | 6 | 7 | ## Set at declaration 8 | 9 | ```python 10 | import declarai 11 | gpt_35 = declarai.openai(model="gpt-3.5-turbo", openai_token="") 12 | 13 | 14 | @gpt_35.task(llm_params={"temperature": 0.5, "max_tokens": 1000}) 15 | def generate_song(): 16 | """ 17 | Generate a song about declarai 18 | """ 19 | 20 | ``` 21 | 22 | ## Set at runtime 23 | We can also pass parameters to the declarai task/chat interface at runtime: 24 | 25 | ```python 26 | import declarai 27 | gpt_35 = declarai.openai(model="gpt-3.5-turbo", openai_token="") 28 | 29 | @gpt_35.task 30 | def generate_song(): 31 | """ 32 | Generate a song about declarai 33 | """ 34 | 35 | generate_song(llm_params={"temperature": 0.5, "max_tokens": 1000}) # (1)! 36 | ``` 37 | 38 | 1. The `llm_params` argument is passed at runtime instead of at declaration. 39 | 40 | 41 | ## Override at runtime 42 | Furthermore, we can pass parameters to the declarai task/chat interface at runtime and override the parameters passed at declaration: 43 | 44 | ```python 45 | import declarai 46 | gpt_35 = declarai.openai(model="gpt-3.5-turbo", openai_token="") 47 | 48 | @gpt_35.task(llm_params={"temperature": 0.5, "max_tokens": 1000}) 49 | def generate_song(): 50 | """ 51 | Generate a song about declarai 52 | """ 53 | 54 | generate_song(llm_params={"temperature": 0.3, "max_tokens": 500}) 55 | ``` 56 | 57 | In this case, the `llm_params` argument passed at runtime will override the `llm_params` argument passed at declaration. 58 | 59 | 60 | ## Set for Chat interface 61 | Same as with tasks, we can pass parameters to the declarai chat interface at declaration, at runtime, or override the parameters passed at declaration at runtime. 62 | 63 | ```python 64 | import declarai 65 | gpt_35 = declarai.openai(model="gpt-3.5-turbo", openai_token="") 66 | 67 | @gpt_35.experimental.chat(llm_params={"temperature": 0.5, "max_tokens": 1000}) 68 | class SQLAdvisor: 69 | """ 70 | You are a proficient sql adivsor. 71 | Your goal is to help user's with sql related questions. 72 | """ 73 | 74 | sql_advisor = SQLAdvisor() 75 | ``` 76 | In the case above, all messages sent to the chat interface will use the parameters passed at declaration. 77 | -------------------------------------------------------------------------------- /src/declarai/operators/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Operators are the main interface that interacts internally with the LLMs. 3 | """ 4 | from typing import Type, Union, overload 5 | 6 | from typing_extensions import Literal 7 | 8 | from .llm import LLM, BaseLLM, BaseLLMParams, LLMParamsType, LLMResponse, LLMSettings 9 | from .message import Message, MessageRole 10 | from .openai_operators import ( 11 | AzureOpenAIChatOperator, 12 | AzureOpenAILLM, 13 | AzureOpenAITaskOperator, 14 | OpenAIChatOperator, 15 | OpenAILLM, 16 | OpenAITaskOperator, 17 | ) 18 | from .operator import BaseChatOperator, BaseOperator 19 | from .registry import llm_registry, operator_registry 20 | 21 | # Based on documentation from https://platform.openai.com/docs/models/overview 22 | ProviderOpenai = "openai" 23 | ProviderAzureOpenai = "azure-openai" 24 | ModelsOpenai = Literal[ 25 | "gpt-4", 26 | "gpt-3.5-turbo", 27 | "gpt-3.5-turbo-16k", 28 | "text-davinci-003", 29 | "text-davinci-002", 30 | "code-davinci-002", 31 | ] 32 | "All official OpenAI models" 33 | 34 | AllModels = Union[ModelsOpenai] 35 | 36 | 37 | def resolve_llm(provider: str, model: str = None, **kwargs) -> LLM: 38 | """ 39 | Resolves an LLM instance based on the provider and model name. 40 | 41 | Args: 42 | provider: Name of the provider 43 | model: Name of the model 44 | **kwargs: Additional arguments to pass to the LLM initialization 45 | 46 | Returns: 47 | llm (LLM): instance 48 | """ 49 | if provider == ProviderOpenai: 50 | model = LLMSettings( 51 | provider=provider, 52 | model=model, 53 | version=kwargs.pop("version", None), 54 | **kwargs, 55 | ).model 56 | 57 | llm_instance = llm_registry.resolve(provider, model, **kwargs) 58 | return llm_instance 59 | 60 | 61 | @overload 62 | def resolve_operator( 63 | llm_instance: LLM, operator_type: Literal["task"] 64 | ) -> Type[BaseOperator]: 65 | ... 66 | 67 | 68 | @overload 69 | def resolve_operator( 70 | llm_instance: LLM, operator_type: Literal["chat"] 71 | ) -> Type[BaseChatOperator]: 72 | ... 73 | 74 | 75 | def resolve_operator(llm_instance: LLM, operator_type: str): 76 | """ 77 | Resolves an operator based on the LLM instance and the operator type. 78 | 79 | Args: 80 | llm_instance: instance of initialized LLM 81 | operator_type (Type[BaseOperator]): task or chat 82 | 83 | Returns: 84 | Operator type class 85 | 86 | """ 87 | return operator_registry.resolve(llm_instance, operator_type) 88 | -------------------------------------------------------------------------------- /src/declarai/operators/templates/output_prompt.py: -------------------------------------------------------------------------------- 1 | """ 2 | The logic for constructing the output prompt. 3 | These methods accept all "return" related properties of the python function and build a string 4 | output prompt from them. 5 | """ 6 | from typing import Optional 7 | 8 | 9 | def compile_output_schema_template( 10 | return_name: str, return_type: str, return_doc: str, structured_template: str 11 | ) -> str: 12 | if not any([return_name, return_type, return_doc]): 13 | return "" 14 | return_name = return_name or "declarai_result" 15 | output_schema = f'"{return_name}": ' 16 | 17 | if return_type: 18 | output_schema += str(return_type) 19 | 20 | if return_doc: 21 | if not return_type and not return_name: 22 | return f"{return_doc}: " 23 | output_schema += f" # {return_doc}" 24 | 25 | if not output_schema: 26 | return "" 27 | 28 | return structured_template.format( 29 | output_schema=output_schema, return_name=return_name 30 | ) 31 | 32 | 33 | def compile_unstructured_template(return_type: str, return_docstring: str) -> str: 34 | """ 35 | Compiles the output prompt for unstructured output but where still a return type is expected (for example int, float). 36 | Args: 37 | return_type: the type of the return value 38 | return_docstring: the description of the return value 39 | 40 | Returns: 41 | 42 | """ 43 | if return_type == "str": 44 | return "" 45 | output_prompt = "" 46 | if return_type: 47 | output_prompt += f"respond only with the value of type {return_type}:" 48 | if return_docstring: 49 | output_prompt += f" # {return_docstring}" 50 | 51 | return output_prompt 52 | 53 | 54 | def compile_output_prompt( 55 | str_schema: str, 56 | return_type: str, 57 | return_docstring: str, 58 | return_magic: str = None, 59 | structured: Optional[bool] = True, 60 | structured_template: Optional[str] = None, 61 | ) -> str: 62 | """ 63 | Compiles the output prompt for given function properties. 64 | Args: 65 | str_schema: tbd 66 | return_type: tbd 67 | return_docstring: tbd 68 | return_magic: tbd 69 | structured: tbd 70 | structured_template: tbd 71 | 72 | Returns: 73 | 74 | """ 75 | str_schema = str_schema or return_magic 76 | 77 | if not structured: 78 | return compile_unstructured_template(return_type, return_docstring) 79 | 80 | return compile_output_schema_template( 81 | str_schema, return_type, return_docstring, structured_template 82 | ) 83 | -------------------------------------------------------------------------------- /src/declarai/memory/redis.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the RedisMessageHistory class, which is used to store chat message history in a Redis database. 3 | """ 4 | 5 | import json 6 | import logging 7 | from typing import List, Optional 8 | 9 | from ..operators import Message 10 | from .base import BaseChatMessageHistory 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | DEFAULT_TABLE_NAME = "message_store" 15 | """A table name for the Redis database.""" 16 | DEFAULT_URL = "redis://localhost:6379/0" 17 | """A URL for the Redis database.""" 18 | 19 | 20 | class RedisMessageHistory(BaseChatMessageHistory): 21 | """ 22 | Chat message history that stores history in a Redis database. 23 | 24 | Args: 25 | session_id: Arbitrary key that is used to store the messages for a single chat session. 26 | url: URL to connect to the Redis server. 27 | key_prefix: Prefix for the Redis key. 28 | ttl: Time-to-live for the message records. 29 | """ 30 | 31 | def __init__( 32 | self, 33 | session_id: str, 34 | url: str = DEFAULT_URL, 35 | key_prefix: str = f"{DEFAULT_TABLE_NAME}:", 36 | ttl: Optional[int] = None, 37 | ): 38 | super().__init__() 39 | try: 40 | import redis # pylint: disable=import-outside-toplevel 41 | except ImportError: 42 | raise ImportError( 43 | "Could not import redis python package. " 44 | "Please install it with `pip install redis`." 45 | ) 46 | 47 | self.redis_client = redis.StrictRedis.from_url(url) 48 | self.session_id = session_id 49 | self.key_prefix = key_prefix 50 | self.ttl = ttl 51 | 52 | @property 53 | def key(self) -> str: 54 | """Construct the record key to use""" 55 | return self.key_prefix + self.session_id 56 | 57 | @property 58 | def history(self) -> List[Message]: 59 | """Retrieve the messages from Redis""" 60 | _items = self.redis_client.lrange(self.key, 0, -1) 61 | items = [json.loads(m.decode("utf-8")) for m in _items[::-1]] 62 | messages = [Message.parse_obj(obj=dict_item) for dict_item in items] 63 | return messages 64 | 65 | def add_message(self, message: Message) -> None: 66 | """Append the message to the record in Redis""" 67 | self.redis_client.lpush(self.key, json.dumps(message.dict())) 68 | if self.ttl: 69 | self.redis_client.expire(self.key, self.ttl) 70 | 71 | def clear(self) -> None: 72 | """Clear session memory from Redis""" 73 | self.redis_client.delete(self.key) 74 | -------------------------------------------------------------------------------- /src/declarai/middleware/base.py: -------------------------------------------------------------------------------- 1 | """ 2 | Base class for task middlewares. 3 | """ 4 | from abc import abstractmethod # pylint: disable=E0611 5 | from typing import Any, Dict, Iterator 6 | 7 | from declarai._base import TaskType 8 | 9 | 10 | class TaskMiddleware: 11 | """ 12 | Base class for task middlewares. Middlewares are used to wrap a task and perform some actions before and after the task is executed. 13 | Is mainly used for logging, but can be used for other purposes as well. 14 | Please see `LoggingMiddleware` for an example of a middleware. 15 | Args: 16 | task: The task to wrap 17 | kwargs: The keyword arguments to pass to the task 18 | Attributes: 19 | _task: The task to wrap 20 | _kwargs: The keyword arguments to pass to the task 21 | """ 22 | 23 | def __init__(self, task: TaskType, kwargs: Dict[str, Any] = None): 24 | self._task = task 25 | self._kwargs = kwargs 26 | 27 | def _stream(self) -> Iterator: 28 | """ 29 | Re-streams the streaming response while adding the after sideeffects execution to the generator 30 | Returns: 31 | 32 | """ 33 | for chunk in self._task._exec(self._kwargs): 34 | yield chunk 35 | self.after(self._task) 36 | 37 | def __call__(self) -> Any: 38 | """ 39 | Once the middleware is called, it executes the task and returns the result. 40 | Before it executes the task, it calls the `before` method, and after it executes the task, it calls the `after` method. 41 | Returns: 42 | The result of the task 43 | """ 44 | self.before(self._task) 45 | # # If the task is streaming, handle it differently 46 | if self._task.operator.streaming: 47 | # Yield chunks from the task, then call the after method 48 | return self._stream() 49 | else: 50 | # Non-streaming tasks can be handled as before 51 | res = self._task._exec(self._kwargs) 52 | self.after(self._task) 53 | return res 54 | 55 | @abstractmethod 56 | def before(self, task: TaskType) -> None: 57 | """ 58 | Executed before the task is executed. Should be used to perform some actions before the task is executed. 59 | Args: 60 | task: the task to execute 61 | """ 62 | 63 | @abstractmethod 64 | def after(self, task: TaskType) -> None: 65 | """ 66 | Executed after the task is executed. Should be used to perform some actions after the task is executed. 67 | Args: 68 | task: the task to execute 69 | """ 70 | -------------------------------------------------------------------------------- /tests/api/test_task_decorator.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock, patch 2 | 3 | from declarai.declarai import magic 4 | from declarai.task import TaskDecorator 5 | 6 | 7 | @patch("declarai.task.PythonParser") 8 | @patch("declarai.task.resolve_operator") 9 | def test_task_decorator_no_args(mocked_resolve_operator, mocked_python_parser): 10 | operator_class_mock = MagicMock() 11 | operator_instance_mock = MagicMock() 12 | llm = MagicMock() 13 | middleware = MagicMock() 14 | 15 | # Setting up mocks 16 | mocked_python_parser.return_value = MagicMock() 17 | operator_instance_mock.parsed = mocked_python_parser.return_value 18 | operator_class_mock.return_value = operator_instance_mock 19 | mocked_resolve_operator.return_value = operator_class_mock 20 | 21 | middlewares = [middleware] 22 | 23 | task_decorator = TaskDecorator(llm=llm) 24 | decorator = task_decorator.task 25 | 26 | @decorator( 27 | middlewares=middlewares, 28 | ) 29 | def test_task(a: str, b: int) -> str: 30 | """ 31 | This is a test task 32 | :param a: this is a string 33 | :param b: this is an integer 34 | :return: returns a string 35 | """ 36 | return magic("return_name", a=a, b=b) 37 | 38 | assert task_decorator.llm == llm 39 | assert test_task.middlewares == middlewares 40 | assert test_task.__name__ == "test_task" 41 | assert test_task.operator.parsed == mocked_python_parser.return_value 42 | assert test_task.operator == operator_instance_mock 43 | passed_llm = operator_class_mock.call_args.kwargs["llm"] 44 | assert passed_llm == llm 45 | 46 | @decorator(middlewares=middlewares, llm_params={"temperature": 0.5}) 47 | def test_task(a: str, b: int) -> str: 48 | """ 49 | This is a test task 50 | :param a: this is a string 51 | :param b: this is an integer 52 | :return: returns a string 53 | """ 54 | 55 | passed_llm_params = operator_class_mock.call_args.kwargs["llm_params"] 56 | assert passed_llm_params == {"temperature": 0.5} 57 | assert test_task.__name__ == "test_task" 58 | assert test_task.middlewares == middlewares 59 | 60 | @decorator(llm_params={"temperature": 0.5}) 61 | def test_task(a: str, b: int) -> str: 62 | """ 63 | This is a test task 64 | :param a: this is a string 65 | :param b: this is an integer 66 | :return: returns a string 67 | """ 68 | 69 | test_task(llm_params={"temperature": 0.7}) 70 | passed_llm_params = operator_class_mock.call_args.kwargs["llm_params"] 71 | assert passed_llm_params == {"temperature": 0.5} 72 | operator_instance_mock.predict.assert_called_with(llm_params={"temperature": 0.7}) 73 | -------------------------------------------------------------------------------- /docs/css/termynal.css: -------------------------------------------------------------------------------- 1 | /** 2 | * termynal.js 3 | * 4 | * @author Ines Montani 5 | * @version 0.0.1 6 | * @license MIT 7 | */ 8 | 9 | :root { 10 | --color-bg: #252a33; 11 | --color-text: #eee; 12 | --color-text-subtle: #a2a2a2; 13 | } 14 | 15 | [data-termynal] { 16 | width: 750px; 17 | max-width: 100%; 18 | background: var(--color-bg); 19 | color: var(--color-text); 20 | /* font-size: 18px; */ 21 | font-size: 15px; 22 | /* font-family: 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; */ 23 | font-family: 'Roboto Mono', 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; 24 | border-radius: 4px; 25 | padding: 75px 45px 35px; 26 | position: relative; 27 | -webkit-box-sizing: border-box; 28 | box-sizing: border-box; 29 | } 30 | 31 | [data-termynal]:before { 32 | content: ''; 33 | position: absolute; 34 | top: 15px; 35 | left: 15px; 36 | display: inline-block; 37 | width: 15px; 38 | height: 15px; 39 | border-radius: 50%; 40 | /* A little hack to display the window buttons in one pseudo element. */ 41 | background: #d9515d; 42 | -webkit-box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930; 43 | box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930; 44 | } 45 | 46 | [data-termynal]:after { 47 | content: 'bash'; 48 | position: absolute; 49 | color: var(--color-text-subtle); 50 | top: 5px; 51 | left: 0; 52 | width: 100%; 53 | text-align: center; 54 | } 55 | 56 | a[data-terminal-control] { 57 | text-align: right; 58 | display: block; 59 | color: #aebbff; 60 | } 61 | 62 | [data-ty] { 63 | display: block; 64 | line-height: 2; 65 | } 66 | 67 | [data-ty]:before { 68 | /* Set up defaults and ensure empty lines are displayed. */ 69 | content: ''; 70 | display: inline-block; 71 | vertical-align: middle; 72 | } 73 | 74 | [data-ty="input"]:before, 75 | [data-ty-prompt]:before { 76 | margin-right: 0.75em; 77 | color: var(--color-text-subtle); 78 | } 79 | 80 | [data-ty="input"]:before { 81 | content: '$'; 82 | } 83 | 84 | [data-ty][data-ty-prompt]:before { 85 | content: attr(data-ty-prompt); 86 | } 87 | 88 | [data-ty-cursor]:after { 89 | content: attr(data-ty-cursor); 90 | font-family: monospace; 91 | margin-left: 0.5em; 92 | -webkit-animation: blink 1s infinite; 93 | animation: blink 1s infinite; 94 | } 95 | 96 | 97 | /* Cursor animation */ 98 | 99 | @-webkit-keyframes blink { 100 | 50% { 101 | opacity: 0; 102 | } 103 | } 104 | 105 | @keyframes blink { 106 | 50% { 107 | opacity: 0; 108 | } 109 | } -------------------------------------------------------------------------------- /src/declarai/python_parser/type_annotation_to_schema.py: -------------------------------------------------------------------------------- 1 | import json 2 | import typing 3 | from typing import Any, Dict, Optional 4 | 5 | import jsonref 6 | from pydantic import schema_json_of 7 | from pydantic.main import ModelMetaclass 8 | 9 | 10 | def resolve_pydantic_schema_recursive(schema_def: Dict[str, Any]) -> Any: 11 | obj_type = schema_def.get("type") 12 | if obj_type not in ("array", "object"): 13 | if "description" in schema_def: 14 | return f"{obj_type} - {schema_def.get('description')}" 15 | return obj_type 16 | 17 | schema = {} 18 | if obj_type == "object": 19 | if "properties" in schema_def: 20 | for k, v in schema_def["properties"].items(): 21 | schema[k] = resolve_pydantic_schema_recursive(v) 22 | elif "additionalProperties" in schema_def: 23 | return resolve_pydantic_schema_recursive(schema_def["additionalProperties"]) 24 | elif obj_type == "array": 25 | return [resolve_pydantic_schema_recursive(schema_def["items"])] 26 | 27 | return schema 28 | 29 | 30 | def resolve_to_json_schema(type_: Any) -> Dict: 31 | if isinstance(type_, ModelMetaclass): 32 | unresolved = type_.schema_json() 33 | else: 34 | unresolved = schema_json_of(type_) 35 | return jsonref.loads(unresolved) 36 | 37 | 38 | def schema_to_string_for_prompt(schema: str) -> str: 39 | schema = schema.replace("{", "{{").replace("}", "}}") 40 | return schema 41 | 42 | 43 | def type_annotation_to_str_schema(type_) -> Optional[str]: 44 | """ 45 | This method accepts arbitrary types defined in the return annotation of a functions. 46 | Then creates a string representation of the annotation schema to be passed to the model. 47 | """ 48 | if type_.__module__ == "builtins": 49 | if type_ in (str, int, float, bool): 50 | return type_.__name__ 51 | 52 | if isinstance(type_, typing._GenericAlias): 53 | root_name = type_._name 54 | if not root_name: 55 | if type_.__origin__ == typing.Union: 56 | root_name = "Union" 57 | properties = [] 58 | for sub_type in type_.__args__: 59 | resolved_schema = resolve_to_json_schema(sub_type) 60 | properties.append(resolve_pydantic_schema_recursive(resolved_schema)) 61 | 62 | if len(properties) > 1: 63 | resolved_str_schema = f"{root_name}[{properties[0]}, {properties[1]}]" 64 | else: 65 | resolved_str_schema = f"{root_name}[{properties[0]}]" 66 | return schema_to_string_for_prompt(resolved_str_schema) 67 | 68 | resolved_schema = resolve_to_json_schema(type_) 69 | resolved_schema = resolve_pydantic_schema_recursive(resolved_schema) 70 | str_schema = json.dumps(resolved_schema, indent=4) 71 | return schema_to_string_for_prompt(str_schema) 72 | -------------------------------------------------------------------------------- /docs/features/jinja_templating.md: -------------------------------------------------------------------------------- 1 | ## Jinja Templating 2 | 3 | [Jinja](https://jinja.palletsprojects.com/en/2.11.x/) is a templating language for Python. 4 | 5 | We can use Jinja to create templates for our tasks. This is useful when: 6 | - Task has a lot of boilerplate code 7 | - Task has a lot of parameters. 8 | - You want to control the task's prompt structure. 9 | 10 | For example, let's say we want to create a task that takes in a string and ranks its sentiment. We 11 | can use Jinja to create a template for this task: 12 | 13 | ```python 14 | import declarai 15 | from typing import List 16 | 17 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 18 | 19 | 20 | @gpt_35.task 21 | def sentiment_classification(string: str, examples: List[str, int]) -> int: 22 | """ 23 | Classify the sentiment of the provided string, based on the provided examples. 24 | The sentiment is ranked on a scale of 1-5, with 5 being the most positive. 25 | {% for example in examples %} 26 | {{ example[0] }} // {{ example[1] }} 27 | {% endfor %} 28 | {{ string }} // 29 | """ 30 | 31 | 32 | sentiment_classification.compile(string="I love this product but there are some annoying bugs", 33 | examples=[["I love this product", 5], ["I hate this product", 1]]) 34 | 35 | >>> {'messages': [ 36 | system: respond only with the value of type int:, # (1)! 37 | user: Classify the sentiment of the provided string, based on the provided examples. The sentiment is ranked on a scale of 1-5, with 5 being the most positive. # (2)! 38 | I love this product // 5 39 | I hate this product // 1 40 | I love this product // 41 | ] 42 | } 43 | 44 | sentiment_classification(string="I love this product but there are some annoying bugs", 45 | examples=[["I love this product", 5], ["I hate this product", 1]]) 46 | 47 | >>> 4 48 | ``` 49 | 50 | 51 | 1. The system message is generated based on the return type `int` of the function. 52 | 2. The user message is generated based on the docstring of the function. The Jinja template is rendered with the provided parameters. 53 | 54 | 55 | Same thing can be done with the `chat` decorator: 56 | 57 | ```python 58 | import declarai 59 | 60 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 61 | 62 | 63 | @gpt_35.experimental.chat 64 | class TranslatorBot: 65 | """ 66 | You are a translator bot, 67 | You will translate the provided text from English to {{ language }}. 68 | Do not translate the following categories of words: {{ exclude_words }} 69 | """ 70 | 71 | 72 | bot = TranslatorBot(language="French", exclude_words=["bad words"]) 73 | 74 | bot.compile() 75 | 76 | >>> {'messages': [ 77 | system: You are a translator bot, You will translate the provided text from English to French. 78 | Do not translate the following categories of words: ['bad words'] 79 | ] 80 | } 81 | ``` -------------------------------------------------------------------------------- /tests/test_declarai.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock, patch 2 | 3 | import declarai 4 | 5 | 6 | @patch("declarai.declarai.resolve_llm") 7 | @patch("declarai.declarai.TaskDecorator") 8 | def test_declarai(mocked_task_decorator, mocked_resolve_llm): 9 | kwargs = {} 10 | mocked_resolve_llm.return_value = MagicMock() 11 | mocked_task_decorator.return_value.task = MagicMock() 12 | 13 | dec = declarai.Declarai(provider="test", model="test", **kwargs) 14 | 15 | assert dec.llm == mocked_resolve_llm.return_value 16 | assert dec.task == mocked_task_decorator.return_value.task 17 | 18 | # Test experimental apis 19 | assert dec.experimental 20 | 21 | 22 | def test_declarai_openai(): 23 | kwargs = {"model": "davinci", "openai_token": "test_token"} 24 | dec = declarai.openai(**kwargs) 25 | assert dec.llm.provider == "openai" 26 | assert dec.llm.model == "davinci" 27 | assert dec.llm.api_key == "test_token" 28 | 29 | 30 | def test_declarai_openai_back_compat(): 31 | from declarai import Declarai 32 | kwargs = {"model": "davinci", "openai_token": "test_token"} 33 | dec = Declarai.openai(**kwargs) 34 | assert dec.llm.provider == "openai" 35 | assert dec.llm.model == "davinci" 36 | assert dec.llm.api_key == "test_token" 37 | 38 | 39 | def test_declarai_openai_back_compat2(): 40 | from declarai import Declarai 41 | import declarai 42 | kwargs = {"model": "davinci", "openai_token": "test_token"} 43 | dec = declarai.openai(**kwargs) 44 | assert dec.llm.provider == "openai" 45 | assert dec.llm.model == "davinci" 46 | assert dec.llm.api_key == "test_token" 47 | 48 | kwargs = { 49 | "model": "davinci", 50 | "openai_token": "test_token", 51 | "stream": True, 52 | } 53 | declarai = Declarai.openai( 54 | **kwargs 55 | ) 56 | 57 | assert declarai.llm.streaming is True 58 | assert declarai.llm.provider == "openai" 59 | assert declarai.llm.model == "davinci" 60 | assert declarai.llm.api_key == "test_token" 61 | 62 | 63 | def test_declarai_azure_openai(): 64 | from declarai import Declarai 65 | import declarai 66 | kwargs = { 67 | "deployment_name": "test", 68 | "azure_openai_key": "123", 69 | "azure_openai_api_base": "456", 70 | "api_version": "789", 71 | } 72 | dec = declarai.azure_openai(**kwargs) 73 | 74 | assert dec.llm.provider == "azure-openai" 75 | assert dec.llm.model == "test" 76 | assert dec.llm.api_key == "123" 77 | assert dec.llm._kwargs["api_base"] == "456" 78 | assert dec.llm._kwargs["api_version"] == "789" 79 | 80 | declarai = Declarai.azure_openai( 81 | **kwargs 82 | ) 83 | 84 | assert declarai.llm.provider == "azure-openai" 85 | assert declarai.llm.model == "test" 86 | assert declarai.llm.api_key == "123" 87 | assert declarai.llm._kwargs["api_base"] == "456" 88 | assert declarai.llm._kwargs["api_version"] == "789" 89 | -------------------------------------------------------------------------------- /src/declarai/memory/mongodb.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the MongoDBMessageHistory class, which is used to store chat message history in a MongoDB database. 3 | 4 | """ 5 | import json 6 | import logging 7 | from typing import List 8 | 9 | from ..operators import Message 10 | from .base import BaseChatMessageHistory 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | DEFAULT_DBNAME = "chat_history" 15 | """A database name for the MongoDB database.""" 16 | DEFAULT_COLLECTION_NAME = "message_store" 17 | """A collection name for the MongoDB database.""" 18 | DEFAULT_CONNECTION_STRING = "mongodb://localhost:27017" 19 | """A connection string for a MongoDB database.""" 20 | 21 | 22 | class MongoDBMessageHistory(BaseChatMessageHistory): 23 | """ 24 | Chat message history that stores history in MongoDB. 25 | 26 | Args: 27 | connection_string: connection string to connect to MongoDB 28 | session_id: Arbitrary key that is used to store the messages for a single chat session. 29 | database_name: name of the database to use 30 | collection_name: name of the collection to use 31 | """ 32 | 33 | def __init__( 34 | self, 35 | session_id: str, 36 | connection_string: str = DEFAULT_CONNECTION_STRING, 37 | database_name: str = DEFAULT_DBNAME, 38 | collection_name: str = DEFAULT_COLLECTION_NAME, 39 | ): 40 | try: 41 | from pymongo import MongoClient 42 | except ImportError: 43 | raise ImportError( 44 | "Could not import pymongo python package. " 45 | "Please install it with `pip install pymongo`." 46 | ) 47 | 48 | self.connection_string = connection_string 49 | self.session_id = session_id 50 | self.database_name = database_name 51 | self.collection_name = collection_name 52 | 53 | self.client: MongoClient = MongoClient(connection_string) 54 | self.db = self.client[database_name] 55 | self.collection = self.db[collection_name] 56 | self.collection.create_index("SessionId") 57 | 58 | @property 59 | def history(self) -> List[Message]: 60 | """Retrieve the messages from MongoDB""" 61 | cursor = self.collection.find({"SessionId": self.session_id}) 62 | 63 | if cursor: 64 | items = [json.loads(document["History"]) for document in cursor] 65 | else: 66 | items = [] 67 | 68 | messages = [Message.parse_obj(obj=dict_item) for dict_item in items] 69 | return messages 70 | 71 | def add_message(self, message: Message) -> None: 72 | """Append the message to the record in MongoDB""" 73 | self.collection.insert_one( 74 | { 75 | "SessionId": self.session_id, 76 | "History": json.dumps(message.dict()), 77 | } 78 | ) 79 | 80 | def clear(self) -> None: 81 | """Clear session memory from MongoDB""" 82 | self.collection.delete_many({"SessionId": self.session_id}) 83 | -------------------------------------------------------------------------------- /docs/examples/deployments/index.md: -------------------------------------------------------------------------------- 1 | # Deployments ⚒️ 2 | Ready to deploy your code? Here are some resources to help you get started: 3 | 4 | 5 | ## FastAPI 6 | Deploying business logic as a REST API is a common pattern. **FastAPI** is an ultimate solution. 7 | Here's how you can deploy your Declarai code behind a REST API using FastAPI: 8 | 9 | ```python 10 | from typing import Dict 11 | from pydantic import BaseModel 12 | from fastapi import FastAPI, APIRouter 13 | import declarai 14 | app = FastAPI() 15 | router = APIRouter() 16 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 17 | 18 | 19 | @gpt_35.task 20 | def movie_recommender(user_input: str) -> Dict[str, str]: 21 | """ 22 | Recommend a selection of real movies to watch based on the user input 23 | For each movie provide its name and a short description as well. 24 | :param user_input: The user's input 25 | :return: A mapping between movie names and descriptions 26 | """ 27 | 28 | 29 | class MovieRecommendationRequest(BaseModel): 30 | user_input: str 31 | 32 | 33 | @router.post("/movie_recommender", response_model=Dict[str, str]) 34 | def run_movie_recommender(request: MovieRecommendationRequest) -> Dict[str, str]: 35 | """ 36 | Run the movie recommender task behind a post request 37 | """ 38 | return movie_recommender(user_input=request.user_input) 39 | 40 | 41 | app.include_router(router) 42 | 43 | if __name__ == "__main__": 44 | import uvicorn 45 | 46 | uvicorn.run(app) 47 | ``` 48 | 49 | You can now run the server with `python app.py` and send a POST request to `http://localhost:8000/movie_recommender`: 50 | 51 | 52 | ```python 53 | import requests 54 | 55 | res = requests.post("http://localhost:8000/movie_recommender", 56 | json={"user_input": "I want to watch a movie about space"}) 57 | >>> res.json() 58 | 59 | {'Gravity': 'Two astronauts work together to survive after an accident leaves ' 60 | 'them stranded in space.', 61 | 'Interstellar': 'A team of explorers travel through a wormhole in space in an ' 62 | "attempt to ensure humanity's survival.", 63 | 'The Martian': 'An astronaut is left stranded on Mars and must find a way to ' 64 | 'survive until rescue is possible.'} 65 | 66 | ``` 67 | 68 | ## Streamlit app 69 | Streamlit is a great tool for quickly building interactive web apps. 70 | Assuming you have deployed your Declarai code as a REST API, you can use the following snippet to build a Streamlit app that interacts with it: 71 | ```python 72 | import streamlit as st 73 | import requests 74 | 75 | BACKEND_URL = "http://localhost:8000" 76 | st.title("Welcome to Movie Recommender System") 77 | st.write("This is a demo of a movie recommender system built using Declarai") 78 | 79 | user_input = st.text_input("What kind of movies do you like?") 80 | button = st.button("Submit") 81 | if button: 82 | print(user_input) 83 | with st.spinner("Thinking.."): 84 | res = requests.post(f"{BACKEND_URL}/movie_recommender", json={"user_input": user_input}) 85 | st.write(res.json()) 86 | ``` 87 | ![img.png](streamlit_img.png) 88 | -------------------------------------------------------------------------------- /src/declarai/_base.py: -------------------------------------------------------------------------------- 1 | """ 2 | Base classes for declarai tasks. 3 | """ 4 | from abc import abstractmethod 5 | from typing import Any, TypeVar, Iterator 6 | 7 | from declarai.operators import ( 8 | BaseOperator, 9 | LLMParamsType, 10 | LLMResponse, 11 | ) 12 | 13 | 14 | class BaseTask: 15 | """ 16 | Base class for tasks. 17 | """ 18 | 19 | operator: BaseOperator 20 | "The operator to use for the task" 21 | 22 | llm_response: LLMResponse 23 | "The response from the LLM" 24 | 25 | llm_stream_response: Iterator[LLMResponse] = None 26 | "The response from the LLM when streaming" 27 | 28 | @property 29 | def llm_params(self) -> LLMParamsType: 30 | """ 31 | Return the LLM parameters that are saved on the operator. These parameters are sent to the LLM when the task is 32 | executed. 33 | Returns: The LLM parameters 34 | 35 | """ 36 | return self.operator.llm_params 37 | 38 | @abstractmethod 39 | def _exec(self, kwargs: dict) -> Any: 40 | """ 41 | Execute the task 42 | Args: 43 | kwargs: the runtime keyword arguments that are used to compile the task prompt. 44 | 45 | Returns: The result of the task, which is the result of the operator. 46 | 47 | """ 48 | pass 49 | 50 | @abstractmethod 51 | def _exec_middlewares(self, kwargs) -> Any: 52 | """ 53 | Execute the task middlewares and the task itself 54 | Args: 55 | kwargs: the runtime keyword arguments that are used to compile the task prompt. 56 | 57 | Returns: The result of the task, which is the result of the operator. Same as `_exec`. 58 | 59 | """ 60 | pass 61 | 62 | @abstractmethod 63 | def compile(self, **kwargs) -> str: 64 | """ 65 | Compile the task to get the prompt sent to the LLM 66 | Args: 67 | **kwargs: the runtime keyword arguments that are placed within the prompt string. 68 | 69 | Returns: The prompt string that is sent to the LLM 70 | 71 | """ 72 | pass 73 | 74 | def __call__(self, *args, **kwargs): 75 | """ 76 | Orchestrates the execution of the task 77 | Args: 78 | *args: Depends on the inherited class 79 | **kwargs: Depends on the inherited class 80 | 81 | Returns: The result of the task, after parsing the result of the llm. 82 | 83 | """ 84 | pass 85 | 86 | def stream_handler(self, stream: Iterator[LLMResponse]) -> Iterator[LLMResponse]: 87 | """ 88 | A generator that yields each chunk from the stream and collects them in a buffer. 89 | After the stream is exhausted, it runs the cleanup logic. 90 | """ 91 | response_buffer = [] 92 | for chunk in stream: 93 | response_buffer.append(chunk) 94 | yield chunk 95 | 96 | # After the stream is exhausted, run the cleanup logic 97 | self.stream_cleanup(response_buffer[-1]) 98 | 99 | def stream_cleanup(self, last_chunk: LLMResponse): 100 | self.llm_response = last_chunk 101 | 102 | 103 | TaskType = TypeVar("TaskType", bound=BaseTask) 104 | -------------------------------------------------------------------------------- /docs/features/evals/index.md: -------------------------------------------------------------------------------- 1 | # Evaluations 2 | 3 | The `evals` library is an addition over the base `declarai` library that provides tools to track and benchmark 4 | the performance of prompt strategies across models and providers. 5 | 6 | We understand that a major challenge in the field of prompt engineering is the lack of a standardised way to evaluate 7 | along with the continuously evolving nature of the field. As such, we have designed the `evals` library to be a lean 8 | wrapper over the `declarai` library that allows users to easily track and benchmark changes in prompts and models. 9 | 10 | ### Usage 11 | 12 |
13 | 14 | ```console 15 | $ python -m declarai.evals.evaluator 16 | Running Extraction scenarios... 17 | single_value_extraction... 18 | ---> 100% 19 | multi_value_extraction... 20 | ---> 100% 21 | multi_value_multi_type_extraction... 22 | ---> 100% 23 | ... 24 | Done! 25 | ``` 26 | 27 |
28 | 29 | ### Evaluations 30 | The output table will allow you to review the performance of your task across models and provides and make an informed 31 | decision on which model and provider to use for your task. 32 | 33 | | Provider | Model | version | Scenario | runtime |
output
| 34 | |:---------|:--------------|:--------|:---------------------------------|:--------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 35 | | openai | gpt-3.5-turbo | latest | generate_a_poem_no_metadata | 1.235s | Using LLMs is fun! | 36 | | openai | gpt-3.5-turbo | 0301 | generate_a_poem_no_metadata | 0.891s | Using LLMs is fun! It's like playing with words Creating models that learn And watching them fly like birds | 37 | | openai | gpt-3.5-turbo | 0613 | generate_a_poem_no_metadata | 1.071s | Using LLMs is fun! | 38 | | openai | gpt-4 | latest | generate_a_poem_no_metadata | 3.494s | {'poem': 'Using LLMs, a joyous run,\nIn the world of AI, under the sun.\nWith every task, they stun,\nIndeed, using LLMs is fun!'} | 39 | | openai | gpt-4 | 0613 | generate_a_poem_no_metadata | 4.992s | {'title': 'Using LLMs is fun!', 'poem': "With LLMs, the fun's just begun, \nCoding and learning, second to none. \nComplex tasks become a simple run, \nOh, the joy when the work is done!"} | 40 | | openai | gpt-3.5-turbo | latest | generate_a_poem_only_return_type | 2.1s | Learning with LLMs, a delightful run, Exploring new knowledge, it's never done. With every challenge, we rise and we stun, Using LLMs, the learning is always fun! | 41 | -------------------------------------------------------------------------------- /docs/newsletter.md: -------------------------------------------------------------------------------- 1 | Subscribe to our newsletter to stay up to date with the latest news about the declarai, and other cool stuff 📬 2 | 3 |
4 | 5 | 10 |
11 |
12 |

Subscribe

13 |
* indicates required
14 |
15 |
16 | 17 | 18 |
19 | 23 |
24 |
25 | 26 |

Intuit Mailchimp

27 |
28 |
29 |
30 |
31 |
32 |
33 | -------------------------------------------------------------------------------- /docs/features/chat/chat-memory/index.md: -------------------------------------------------------------------------------- 1 | # Chat memory :brain: 2 | 3 | A chat instance saves the message history and uses it to future responses. 4 | Here is an example of a chatbot that retains conversation history across multiple `send` requests. 5 | ```py 6 | @declarai.experimental.chat 7 | class SQLBot: 8 | """ 9 | You are a sql assistant. You help with SQL related questions with one-line answers. 10 | """ 11 | 12 | sql_bot = SQLBot() 13 | 14 | sql_bot.send("When should I use a LEFT JOIN?") # (1)! 15 | > "You should use a LEFT JOIN when you want to retrieve all records from the left table and matching records from the right table." 16 | 17 | sql_bot.send("But how is it different from a RIGHT JOIN?") # (2)! 18 | > "A LEFT JOIN retrieves all records from the left table and matching records from the right table, while a RIGHT JOIN retrieves all records from the right table and matching records from the left table." 19 | ``` 20 | 21 | 1. The first message is sent with the system prompt. 22 | 2. The second message is sent with the previous conversation and therefore the model is aware of the first question. 23 | 24 | 25 | ## Conversation History 26 | You can view the conversation history by accessing the `conversation` attribute. 27 | 28 | ```py 29 | sql_bot.conversation 30 | 31 | > [ 32 | user: When should I use a LEFT JOIN?, 33 | assistant: You should use a LEFT JOIN when you want to retrieve all records from the left table and matching records from the right table., 34 | user: But how is it different from a RIGHT JOIN?, 35 | assistant: A LEFT JOIN retrieves all records from the left table and matching records from the right table, while a RIGHT JOIN retrieves all records from the right table and matching records from the left table. 36 | ] 37 | 38 | ``` 39 | 40 | !!! warning 41 | 42 | Keep in mind that the conversation history does not contain the system prompt. It only contains the user messages and the chatbot responses. 43 | 44 | If you want to access the system message, you can use the `system` attribute. 45 | 46 | ```py 47 | sql_bot.system 48 | 49 | > "system: You are a sql assistant. You help with SQL related questions with one-line answers.\n" 50 | ``` 51 | 52 | 53 | ## Default Memory 54 | 55 | **The default message history of a chat is a simple in-memory list**. This means that history exists only for the duration of the chatbot session. 56 | 57 | If you prefer to have a persistent history, you can use the `FileMessageHistory` class from the `declarai.memory` module. 58 | 59 | 60 | ## Setting up a memory 61 | Setting up a memory is done by passing `chat_history` as a keyword argument to the `declarai.experimental.chat` decorator. 62 | 63 | ```py 64 | import declarai 65 | from declarai.memory import FileMessageHistory 66 | 67 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 68 | 69 | @gpt_35.experimental.chat(chat_history=FileMessageHistory("sql_bot_history.txt")) # (1)! 70 | class SQLBot: 71 | """ 72 | You are a sql assistant. You help with SQL related questions with one-line answers. 73 | """ 74 | ``` 75 | 76 | 1. file path is not mandatory. If you do not provide a file path, the default file path is stored in a tmp directory. 77 | 78 | We can also initialize the chat_history at runtime 79 | 80 | ```py 81 | import declarai 82 | from declarai.memory import FileMessageHistory 83 | 84 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 85 | 86 | @gpt_35.experimental.chat 87 | class SQLBot: 88 | """ 89 | You are a sql assistant. You help with SQL related questions with one-line answers. 90 | """ 91 | sql_bot = SQLBot(chat_history=FileMessageHistory("sql_bot_history.txt")) 92 | ``` 93 | -------------------------------------------------------------------------------- /src/declarai/memory/postgres.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the PostgresMessageHistory class, which is used to store chat message history in a PostgreSQL database. 3 | 4 | """ 5 | import json 6 | import logging 7 | from typing import List, Optional 8 | 9 | from ..operators import Message 10 | from .base import BaseChatMessageHistory 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | DEFAULT_TABLE_NAME = "message_store" 15 | "A table name for the PostgreSQL database." 16 | DEFAULT_CONNECTION_STRING = "postgresql://postgres:postgres@localhost:5432/postgres" 17 | "A connection string for a PostgreSQL database." 18 | 19 | 20 | class PostgresMessageHistory(BaseChatMessageHistory): 21 | """ 22 | Chat message history that stores history in a PostgreSQL database. 23 | 24 | Args: 25 | session_id: Arbitrary key that is used to store the messages for a single chat session. 26 | connection_string: Database connection string. 27 | table_name: Name of the table to use. 28 | """ 29 | 30 | def __init__( 31 | self, 32 | session_id: str, 33 | connection_string: Optional[str] = DEFAULT_CONNECTION_STRING, 34 | table_name: str = DEFAULT_TABLE_NAME, 35 | ): 36 | try: 37 | import psycopg2 # pylint: disable=import-outside-toplevel 38 | except ImportError: 39 | raise ImportError( 40 | "Cannot import psycopg2." 41 | "Please install psycopg2 to use PostgresMessageHistory." 42 | ) 43 | self.conn = psycopg2.connect(connection_string) 44 | self.cursor = self.conn.cursor() 45 | self.table_name = table_name 46 | self.session_id = session_id 47 | self._initialize_tables() 48 | 49 | def _initialize_tables(self): 50 | """Initialize the tables if they don't exist.""" 51 | create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} ( 52 | id SERIAL PRIMARY KEY, 53 | session_id TEXT NOT NULL, 54 | message JSONB NOT NULL 55 | );""" 56 | self.cursor.execute(create_table_query) 57 | self.conn.commit() 58 | 59 | @property 60 | def history(self) -> List[Message]: 61 | """Retrieve the messages from the database.""" 62 | query = ( 63 | f"SELECT message FROM {self.table_name} WHERE session_id = %s ORDER BY id;" 64 | ) 65 | self.cursor.execute(query, (self.session_id,)) 66 | rows = self.cursor.fetchall() 67 | messages = [Message.parse_obj(row[0]) for row in rows] 68 | return messages 69 | 70 | def add_message(self, message: Message) -> None: 71 | """Add a message to the database.""" 72 | from psycopg2 import sql 73 | 74 | query = sql.SQL("INSERT INTO {} (session_id, message) VALUES (%s, %s);").format( 75 | sql.Identifier(self.table_name) 76 | ) 77 | self.cursor.execute(query, (self.session_id, json.dumps(message.dict()))) 78 | self.conn.commit() 79 | 80 | def clear(self) -> None: 81 | """Clear session memory from the database.""" 82 | query = f"DELETE FROM {self.table_name} WHERE session_id = %s;" 83 | self.cursor.execute(query, (self.session_id,)) 84 | self.conn.commit() 85 | 86 | def close(self): 87 | """Close cursor and connection.""" 88 | self.cursor.close() 89 | self.conn.close() 90 | 91 | def __del__(self): 92 | """Destructor to close cursor and connection.""" 93 | if hasattr(self, "cursor"): 94 | self.cursor.close() 95 | if hasattr(self, "conn"): 96 | self.conn.close() 97 | -------------------------------------------------------------------------------- /docs/features/chat/controlling-chat-behavior.md: -------------------------------------------------------------------------------- 1 | ## Greetings :material-human-greeting: 2 | 3 | Greetings are used to start the conversation with a bot message instead of a user message. 4 | The `greeting` attribute defines this first message and is added to the conversation on initialization. 5 | 6 | ```py 7 | import declarai 8 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 9 | 10 | @gpt_35.experimental.chat 11 | class SQLBot: 12 | """ 13 | You are a sql assistant. You help with SQL queries with one-line answers. 14 | """ 15 | greeting = "Hello, I am a SQL assistant. How can I assist you today?" 16 | ``` 17 | 18 | The greeting attribute is later available as a property of the chatbot instance to use when implementing your interface. 19 | ```py 20 | sql_bot = SQLBot() 21 | sql_bot.greeting 22 | 23 | > "Hello, I am a SQL assistant. How can I assist you today?" 24 | ``` 25 | 26 | ```py 27 | 28 | sql_bot.send("When should I use a LEFT JOIN?") 29 | 30 | > 'You should use a LEFT JOIN when you want to retrieve all records from the left table and matching records from the right table.' 31 | 32 | sql_bot.conversation 33 | 34 | > [ # (1)! 35 | assistant: Hello, I am a SQL assistant. How can I assist you today?, 36 | user: When should I use a LEFT JOIN?, 37 | assistant: You should use a LEFT JOIN when you want to retrieve all records from the left table and matching records from the right table. 38 | ] 39 | ``` 40 | 41 | 1. We can see here that the greeting, initiated by the assistant, is the first message in the conversation. 42 | 43 | ## Inject a message to the memory 44 | 45 | Declarai enables injecting custom messages into the conversation history by using the `add_message` method. 46 | 47 | This is super useful when you want to intervene with the conversation flow without necessarily triggering another response from the model. 48 | 49 | Consider using it for: 50 | 51 | * Creating a prefilled conversation even before the user's interaction. 52 | * Modifying the chatbot memory after the chatbot has generated a response. 53 | * Modifying the chatbot system prompt. 54 | * Guiding the conversation flow given certain criteria met in the user-bot interaction. 55 | 56 | ```py 57 | sql_bot = SQLBot() 58 | sql_bot.add_message("From now on, answer I DONT KNOW on any question asked by the user", role="system") 59 | # (1)! 60 | sql_bot.send("What is your favorite SQL operation?") 61 | 62 | > "I don't know." 63 | ``` 64 | 65 | 1. The chatbot's conversation history now contains the injected message and reacts accordingly. 66 | 67 | 68 | ## Dynamic system prompting 69 | In the following example, we will pass a parameter to the chatbot system prompt. 70 | This value will be populated at runtime and will allow us to easily create base chatbots with varying behaviors. 71 | 72 | ```py 73 | import declarai 74 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 75 | 76 | @gpt_35.experimental.chat 77 | class JokeGenerator: 78 | """ 79 | You are a joke generator. You generate jokes that a {character} would tell. 80 | """ # (1)! 81 | 82 | 83 | generator = JokeGenerator() 84 | favorite_joke = generator.send(character="Spongebob", message="What is your favorite joke?") 85 | squidward_joke = generator.send(message="What jokes can you tell about squidward?") 86 | 87 | print(favorite_joke) 88 | print(squidward_joke) 89 | ``` 90 | 91 | 1. The system prompt now contains the parameter `{character}`. This parameter will be replaced by the value passed to the `send` method. 92 | 93 | ```py 94 | > "Why did the jellyfish go to school? Because it wanted to improve its "sting-uage" skills!" 95 | > "Why did Squidward bring a ladder to work? Because he wanted to climb up the corporate "sour-cules"!" 96 | ``` 97 | -------------------------------------------------------------------------------- /src/declarai/middleware/third_party/wandb_monitor.py: -------------------------------------------------------------------------------- 1 | """ 2 | Wandb Monitor Middleware used to monitor the execution on wandb. 3 | """ 4 | from __future__ import annotations 5 | 6 | import importlib 7 | from time import time 8 | 9 | from declarai.middleware.base import TaskMiddleware 10 | 11 | 12 | class WandDBMonitorCreator: 13 | """ 14 | Creates a WandDBMonitor middleware for a given task. 15 | 16 | Args: 17 | name (str): The name of the run on wandb 18 | project (str): The name of the project on wandb 19 | key (str): The api key for wandb 20 | 21 | Returns: 22 | (WandDBMonitor): A WandDBMonitor middleware 23 | 24 | Example: 25 | ```py 26 | WandDBMonitor = WandDBMonitorCreator( 27 | name="", 28 | project="", 29 | key="", 30 | ) 31 | 32 | @openai.task(middlewares=[WandDBMonitor]) 33 | def generate_a_poem(title: str): 34 | ''' 35 | Generate a poem based on the given title 36 | :return: The generated poem 37 | ''' 38 | return declarai.magic("poem", title) 39 | ``` 40 | """ 41 | 42 | def __new__(cls, name: str, project: str, key: str) -> "WandDBMonitor": # noqa 43 | if importlib.util.find_spec("wandb"): 44 | import wandb 45 | from wandb.sdk.data_types.trace_tree import Trace 46 | 47 | wandb.login(key=key) 48 | wandb.init(id=name, name=name, project=project, resume="allow") 49 | else: 50 | raise ImportError("wandb is not installed") 51 | 52 | class WandDBMonitor(TaskMiddleware): 53 | """ 54 | WandDBMonitor middleware. 55 | """ 56 | 57 | _start_time_ms: time = None 58 | 59 | def before(self, _): 60 | self._start_time_ms = int(time() / 1000) 61 | 62 | def after(self, task): 63 | status = "success" 64 | status_message = "" 65 | end_time_ms = int(time() / 1000) # logged in milliseconds 66 | root_span = Trace( 67 | name=task.__name__, 68 | kind="llms", 69 | status_code=status, 70 | status_message=status_message, 71 | metadata={ 72 | "structured": task.prompt_config.structured, 73 | "multi_results": task.prompt_config.multi_results, 74 | "return_name": task.prompt_config.return_name, 75 | "temperature": task.prompt_config.temperature, 76 | "max_tokens": task.prompt_config.max_tokens, 77 | "top_p": task.prompt_config.top_p, 78 | "frequency_penalty": task.prompt_config.frequency_penalty, 79 | "presence_penalty": task.prompt_config.presence_penalty, 80 | "response": task.llm_response.response, 81 | "model": task.llm.model, 82 | "prompt_tokens": task.llm_response.prompt_tokens, 83 | "completion_tokens": task.llm_response.completion_tokens, 84 | "total_tokens": task.llm_response.total_tokens, 85 | }, 86 | start_time_ms=self._start_time_ms, 87 | end_time_ms=end_time_ms, 88 | inputs={"query": task.compile(**self._kwargs)}, 89 | outputs={"response": task.llm_response.response}, 90 | ) 91 | 92 | # log the span to wandb 93 | root_span.log(name=task.__name__) 94 | 95 | return WandDBMonitor 96 | -------------------------------------------------------------------------------- /src/declarai/evals/runner.py: -------------------------------------------------------------------------------- 1 | from time import time 2 | from typing import Any, Callable, Dict 3 | 4 | from rich.progress import Progress 5 | from rich.table import Table 6 | 7 | from declarai import Declarai 8 | 9 | 10 | def evaluate_single_task_scenario( 11 | scenario_name: str, 12 | scenario: Callable, 13 | scenario_kwargs: Dict[str, Any], 14 | models: Dict[str, Declarai], 15 | table: Table, 16 | ): 17 | with Progress() as progress: 18 | evaluator = progress.add_task(f"[red]{scenario_name}...", total=len(models)) 19 | 20 | for model, declarai in models.items(): 21 | try: 22 | initialized_scenario = declarai.task(scenario) 23 | 24 | start_time = time() 25 | res = initialized_scenario(**scenario_kwargs) 26 | total_time = time() - start_time 27 | progress.update(evaluator, advance=1) 28 | 29 | try: 30 | input_tokens = str(initialized_scenario.llm_response.prompt_tokens) 31 | output_tokens = str( 32 | initialized_scenario.llm_response.completion_tokens 33 | ) 34 | except: # noqa 35 | input_tokens = "error" 36 | output_tokens = "error" 37 | 38 | table.add_row( 39 | declarai.llm_config.provider, 40 | declarai.llm_config.model, 41 | declarai.llm_config.version or "latest", 42 | scenario_name, 43 | f"{round(total_time, 3)}s", 44 | input_tokens, 45 | output_tokens, 46 | str(res), 47 | ) 48 | except Exception as e: 49 | print(f"Error: {e}") 50 | table.add_row( 51 | declarai.llm_config.provider, 52 | declarai.llm_config.model, 53 | declarai.llm_config.version or "latest", 54 | scenario_name, 55 | "error", 56 | "error", 57 | "error", 58 | repr(e), 59 | ) 60 | 61 | 62 | def evaluate_sequence_task_scenario( 63 | scenario_name: str, 64 | scenario: Callable, 65 | scenario_kwargs: Dict[str, Any], 66 | models: Dict[str, Declarai], 67 | table: Table, 68 | ): 69 | with Progress() as progress: 70 | evaluator = progress.add_task(f"[red]{scenario_name}...", total=len(models)) 71 | 72 | for model, declarai in models.items(): 73 | try: 74 | initialized_scenario = scenario(declarai, **scenario_kwargs) 75 | start_time = time() 76 | res = initialized_scenario() 77 | total_time = time() - start_time 78 | progress.update(evaluator, advance=1) 79 | 80 | try: 81 | input_tokens = str(initialized_scenario.llm_response.prompt_tokens) 82 | output_tokens = str( 83 | initialized_scenario.llm_response.completion_tokens 84 | ) 85 | except: # noqa 86 | input_tokens = "error" 87 | output_tokens = "error" 88 | 89 | table.add_row( 90 | declarai.llm_config.provider, 91 | declarai.llm_config.model, 92 | declarai.llm_config.version or "latest", 93 | scenario_name, 94 | f"{round(total_time, 3)}s", 95 | input_tokens, 96 | output_tokens, 97 | str(res), 98 | ) 99 | except Exception as e: 100 | print(f"Error: {e}") 101 | table.add_row( 102 | declarai.llm_config.provider, 103 | declarai.llm_config.model, 104 | declarai.llm_config.version or "latest", 105 | scenario_name, 106 | "error", 107 | "error", 108 | "error", 109 | repr(e), 110 | ) 111 | -------------------------------------------------------------------------------- /tests/tasks/test_llm_task.py: -------------------------------------------------------------------------------- 1 | # from typing import Dict 2 | # from unittest.mock import MagicMock 3 | # 4 | # from declarai.orchestrator.future_llm_task import FutureTask 5 | # from declarai.operators.base.types import LLMTask 6 | # 7 | # TEST_TASK_TEMPLATE = "{input} | {output}" 8 | # TEMPLATE_KWARGS = { 9 | # "input": "input-value: {input_val}", 10 | # "output": "output-value: {output_val}", 11 | # } 12 | # TASK_KWARGS = { 13 | # "input_val": "input-value", 14 | # "output_val": "output-value", 15 | # } 16 | # 17 | # 18 | # def test_llm_task(): 19 | # test_llm = MagicMock() 20 | # test_llm.predict.return_value = MagicMock() 21 | # test_llm.predict.return_value.response = '{"declarai_result": "output-value"}' 22 | # 23 | # llm_task = LLMTask( 24 | # template=TEST_TASK_TEMPLATE, 25 | # template_kwargs=TEMPLATE_KWARGS, 26 | # llm=test_llm, 27 | # prompt_kwargs={"return_type": str}, 28 | # ) 29 | # 30 | # compiled_task_template = "input-value: {input_val} | output-value: {output_val}" 31 | # compiled_task_with_values = "input-value: input-value | output-value: output-value" 32 | # 33 | # assert llm_task.compile() == compiled_task_template 34 | # assert llm_task.compile(**TASK_KWARGS) == compiled_task_with_values 35 | # 36 | # llm_res = llm_task(**TASK_KWARGS) 37 | # assert llm_res == "output-value" 38 | # assert test_llm.predict.called 39 | # 40 | # 41 | # def test_llm_task_result_name_override(): 42 | # test_llm = MagicMock() 43 | # test_llm.predict.return_value = MagicMock() 44 | # test_llm.predict.return_value.response = '{"result": "output-value"}' 45 | # 46 | # llm_task = LLMTask( 47 | # template=TEST_TASK_TEMPLATE, 48 | # template_kwargs=TEMPLATE_KWARGS, 49 | # llm=test_llm, 50 | # prompt_kwargs={ 51 | # "return_name": "result", 52 | # "return_type": str, 53 | # }, 54 | # ) 55 | # compiled_task_template = "input-value: {input_val} | output-value: {output_val}" 56 | # compiled_task_with_values = "input-value: input-value | output-value: output-value" 57 | # assert llm_task.compile() == compiled_task_template 58 | # assert llm_task.compile(**TASK_KWARGS) == compiled_task_with_values 59 | # 60 | # llm_res = llm_task(**TASK_KWARGS) 61 | # assert llm_res == "output-value" 62 | # assert test_llm.predict.called 63 | # 64 | # 65 | # def test_llm_task_unstructured_result(): 66 | # test_llm = MagicMock() 67 | # test_llm.predict.return_value = MagicMock() 68 | # test_llm.predict.return_value.response = "output-value" 69 | # 70 | # llm_task = LLMTask( 71 | # template=TEST_TASK_TEMPLATE, 72 | # template_kwargs=TEMPLATE_KWARGS, 73 | # llm=test_llm, 74 | # prompt_kwargs={"structured": False}, 75 | # ) 76 | # llm_res = llm_task(**TASK_KWARGS) 77 | # assert llm_res == "output-value" 78 | # assert test_llm.predict.called 79 | # 80 | # 81 | # def test_llm_task_multiple_results(): 82 | # test_llm = MagicMock() 83 | # test_llm.predict.return_value = MagicMock() 84 | # test_llm.predict.return_value.response = ( 85 | # '{"result1": "output-value1"}\n\n\n{"result2": "output-value2"}' 86 | # ) 87 | # 88 | # llm_task = LLMTask( 89 | # template=TEST_TASK_TEMPLATE, 90 | # template_kwargs=TEMPLATE_KWARGS, 91 | # llm=test_llm, 92 | # prompt_kwargs={"multi_results": True}, 93 | # ) 94 | # llm_res = llm_task(**TASK_KWARGS) 95 | # assert llm_res == {"result1": "output-value1", "result2": "output-value2"} 96 | # assert test_llm.predict.called 97 | # 98 | # 99 | # def test_future_llm_task(): 100 | # test_llm = MagicMock() 101 | # test_llm.predict.return_value = MagicMock() 102 | # test_llm.predict.return_value.response = '{"declarai_result": "output-value"}' 103 | # 104 | # llm_task = LLMTask( 105 | # template=TEST_TASK_TEMPLATE, 106 | # template_kwargs=TEMPLATE_KWARGS, 107 | # llm=test_llm, 108 | # prompt_kwargs={"return_type": str}, 109 | # ) 110 | # compiled_task_with_values = "input-value: input-value | output-value: output-value" 111 | # future_task = llm_task.plan(**TASK_KWARGS) 112 | # assert isinstance(future_task, FutureTask) 113 | # assert future_task.populated_prompt == compiled_task_with_values 114 | # assert future_task() == "output-value" 115 | -------------------------------------------------------------------------------- /docs/providers/openai.md: -------------------------------------------------------------------------------- 1 | # OpenAI 2 | 3 | To use OpenAI models, you can set the following configuration options: 4 | 5 | ```py 6 | import declarai 7 | 8 | openai_model = declarai.openai( 9 | model="", 10 | openai_token="", 11 | headers={"": ""}, 12 | timeout="", 13 | request_timeout="", 14 | stream="", 15 | ) 16 | ``` 17 | 18 | 19 | | Setting |
Env Variable
| Required? | 20 | |-----------------|---------------------------------------------|:---------:| 21 | | Model | | ✅ | 22 | | API key | `OPENAI_API_KEY` | ✅ | 23 | | Headers | | | 24 | | Timeout | | | 25 | | Request timeout | | | 26 | | Stream | | | 27 | 28 | ## Getting an API key 29 | 30 | To obtain an OpenAI API key, follow these steps: 31 | 32 | 1. [Log in](https://platform.openai.com/) to your OpenAI account (sign up if you don't have one) 33 | 2. Go to the "API Keys" [page](https://platform.openai.com/account/api-keys) under your account settings. 34 | 3. Click "Create new secret key." A new API key will be generated. Make sure to copy the key to your clipboard, as you 35 | will not be able to see it again. 36 | 37 | ## Setting the API key 38 | 39 | You can set your API key at runtime like this: 40 | 41 | ```python 42 | import declarai 43 | 44 | gpt4 = declarai.openai(model="gpt4", openai_token="") 45 | ``` 46 | 47 | However, it is preferable to pass sensitive settings as an environment variable: `OPENAI_API_KEY`. 48 | 49 | To establish your OpenAI API key as an environment variable, launch your terminal and execute the following command, 50 | substituting with your actual key: 51 | 52 | ```shell 53 | export OPENAI_API_KEY= 54 | ``` 55 | 56 | This action will maintain the key for the duration of your terminal session. To ensure a longer retention, modify your 57 | terminal's settings or corresponding environment files. 58 | 59 | ## Control LLM Parameters 60 | 61 | OpenAI models have a number of parameters that can be tuned to control the output of the model. These parameters are 62 | passed to the declarai task/chat interface as a dictionary. The following parameters are supported: 63 | 64 | | Parameter | Type | Description | Default | 65 | |---------------------|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------| 66 | | `temperature` | `float` | Controls the randomness of the model. Lower values make the model more deterministic and repetitive. Higher values make the model more random and creative. | `0` | 67 | | `max_tokens` | `int` | Controls the length of the output. | `3000` | 68 | | `top_p` | `float` | Controls the diversity of the model. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `1` | 69 | | `frequency_penalty` | `float` | Controls how often the model repeats itself. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `0` | 70 | | `presence_penalty` | `float` | Controls how often the model generates new topics. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `0` | 71 | 72 | Pass your custom parameters to the declarai task/chat interface as a dictionary: 73 | 74 | ```python 75 | import declarai 76 | 77 | gpt4 = declarai.openai(model="gpt-4", openai_token="") 78 | 79 | 80 | @gpt4.task(llm_params={"temperature": 0.5, "max_tokens": 1000}) # (1)! 81 | def generate_song(): 82 | """ 83 | Generate a song about declarai 84 | """ 85 | 86 | ``` 87 | 88 | 1. Pass only the parameters you want to change. The rest will be set to their default values. 89 | -------------------------------------------------------------------------------- /docs/beginners-guide/debugging-tasks.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide: 3 | - footer 4 | --- 5 | 6 | # Debugging tasks 7 | 8 | So it all seems pretty magical up to this point, but what if you want to see what's going on behind the scenes? 9 | Being able to debug your tasks is a very important part of the development process, and **Declarai** makes it easy for you. 10 | 11 | ## Compiling tasks 12 | The first and simplest tool to better understand what's happening under the hood is the `compile` method.
13 | Declarai has an `evals` module as well for advanced debugging and benchmarking which you can review later here: [evals](../../features/evals/) 14 | 15 | Let's take the last task from the previous section and add a call to the `compile` method: 16 | ```python 17 | from typing import Dict 18 | import declarai 19 | 20 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 21 | 22 | @gpt_35.task 23 | def movie_recommender(user_input: str) -> Dict[str, str]: 24 | """ 25 | Recommend a selection of movies to watch based on the user input 26 | For each movie provide a short description as well 27 | :param user_input: The user's input 28 | :return: A dictionary of movie names and descriptions 29 | """ 30 | 31 | movie_recommender.compile() 32 | 33 | > { 34 | 'messages': [ # (1)! 35 | # (2)! 36 | system: You are a REST api endpoint. 37 | You only answer in JSON structures with a single key named 'declarai_result', nothing else. 38 | The expected format is: "declarai_result": Dict[string, string] # A dictionary of movie names and descriptions, 39 | # (3)! 40 | user: Recommend a selection of movies to watch based on the user input 41 | For each movie provide a short description as well. 42 | Inputs: user_input: {user_input} # (4)! 43 | ] 44 | } 45 | ``` 46 | 47 | 1. As we are working with the openai llm provider, which exposes a chat interface, we translate the task into **messages** as defined by openai's API. 48 | 2. In order to guide the task with the correct output format, we provide a **system** message that explains LLM's role and expected responses 49 | 3. The **user message** is the actual translation of the task at hand, with the user's input as a placeholder for the actual value. 50 | 4. **{user_input}** will be populated with the actual value when the task is being called at runtime. 51 | 52 | What we're seeing here is the template for this specific task. It is built so that when called at runtime, 53 | it will be populated with the real values passed to our task. 54 | 55 | !!! warning 56 | 57 | As you can see, that the actual prompt being sent to the model is a bit different than the original docstring. 58 | Even though Declarai incorporates best practices for prompt engineering while maintaining as little interference as possible with user prompts, 59 | it is still possible that the model will not generate the desired output. For this reason it is important to be able to debug your tasks and understand what actually got sent to the model 60 | 61 | ## Compiling tasks with real values 62 | The `compile` method can also be used to view the prompt with the real values provided to the task. 63 | This is useful when prompts might behave differently for different inputs. 64 | 65 | ```python hl_lines="10" 66 | print(movie_recommender.compile(user_input="I want to watch a movie about space")) 67 | 68 | > { 69 | 'messages': [ 70 | system: You are a REST api endpoint. 71 | You only answer in JSON structures with a single key named 'declarai_result', nothing else. 72 | The expected format is: "declarai_result": Dict[string, string] # A dictionary of movie names and descriptions, 73 | user: Recommend a selection of movies to watch based on the user input 74 | For each movie provide a short description as well. 75 | Inputs: user_input: I want to watch a movie about space # (1)! 76 | ]} 77 | ``` 78 | 79 | 1. The actual **value** of the parameter is now populated in the placeholder and we have our final prompt! 80 | 81 | 82 | !!! tip 83 | 84 | With the `compile` method, you can always take your prompts anywhere you like, 85 | if it's for monitoring, debugging or just for documentation, we've got you covered! 86 | 87 | 88 | 96 | -------------------------------------------------------------------------------- /src/declarai/operators/llm.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module defines the base classes for the LLM interface. 3 | """ 4 | from __future__ import annotations 5 | 6 | from abc import abstractmethod 7 | from typing import Optional, TypedDict, TypeVar 8 | 9 | from pydantic.main import BaseModel 10 | 11 | 12 | class LLMResponse(BaseModel): 13 | """ 14 | The response from the LLM. 15 | 16 | Attributes: 17 | response: The raw response from the LLM 18 | model: The model that was used to generate the response 19 | prompt_tokens: The number of tokens in the prompt 20 | completion_tokens: The number of tokens in the completion 21 | total_tokens: The total number of tokens in the response 22 | """ 23 | 24 | response: str 25 | model: Optional[str] = None 26 | prompt_tokens: Optional[int] = None 27 | completion_tokens: Optional[int] = None 28 | total_tokens: Optional[int] = None 29 | role: str = "assistant" 30 | raw_response: Optional[dict] = None 31 | 32 | 33 | class BaseLLMParams(TypedDict): 34 | """ 35 | The base LLM params that are common to all LLMs. 36 | """ 37 | 38 | # Define any common/generic params here 39 | pass 40 | 41 | 42 | class LLMSettings: 43 | """ 44 | The settings for the LLM. Defines the model and version to use. 45 | 46 | Args: 47 | provider: The provider of the model (openai, cohere, etc.) 48 | model: The model to use (gpt-4, gpt-3.5-turbo, etc.) 49 | version: The version of the model to use (optional) 50 | **_: Any additional params that are specific to the provider that will be ignored. 51 | 52 | 53 | Attributes: 54 | provider (str): The provider of the model (openai, cohere, etc.) 55 | model: The full model name to use. 56 | version: The version of the model to use (optional) 57 | """ 58 | 59 | def __init__( 60 | self, 61 | provider: str, 62 | model: str, 63 | version: Optional[str] = None, 64 | **_, 65 | ): 66 | self.provider = provider 67 | self._model = model 68 | self.version = version 69 | 70 | @property 71 | def model(self, delimiter: Optional[str] = "-", with_version: bool = True) -> str: 72 | """ 73 | Some model providers allow defining a base model as well as a sub-model. 74 | Often the base model is an alias to latest model served on that model. 75 | for example, when sending gpt-3.5-turbo to OpenAI, the actual model will be one of the 76 | publicly available snapshots or an internally exposed version as described on their website: 77 | as of 27/07/2023 - https://platform.openai.com/docs/models/continuous-model-upgrades 78 | | With the release of gpt-3.5-turbo, some of our models are now being continually updated. 79 | | We also offer static model versions that developers can continue using for at least 80 | | three months after an updated model has been introduced. 81 | 82 | Another use-case for sub models is using your own fine-tuned models. 83 | As described in the documentation: 84 | https://platform.openai.com/docs/guides/fine-tuning/customize-your-model-name 85 | 86 | You will likely build your fine-tuned model names by concatenating the base model name 87 | with the fine-tuned model name, separated by a hyphen. 88 | For example 89 | gpt-3.5-turbo-declarai-text-classification-2023-03 90 | or 91 | gpt-3.5-turbo:declarai:text-classification-2023-03 92 | 93 | In any case you can always pass the full model name in the model parameter and leave the 94 | sub_model parameter empty if you prefer. 95 | """ 96 | if self.version and with_version: 97 | return f"{self._model}{delimiter}{self.version}" 98 | return self._model 99 | 100 | 101 | class BaseLLM: 102 | """ 103 | The base LLM class that all LLMs should inherit from. 104 | """ 105 | 106 | provider: str 107 | model: str 108 | 109 | @abstractmethod 110 | def predict(self, *args, **kwargs) -> LLMResponse: 111 | """ 112 | The predict method that all LLMs should implement. 113 | Args: 114 | *args: 115 | **kwargs: 116 | 117 | Returns: llm response object 118 | 119 | """ 120 | raise NotImplementedError() 121 | 122 | 123 | LLMParamsType = TypeVar("LLMParamsType", bound=BaseLLMParams) 124 | """Type variable for LLM params""" 125 | LLM = TypeVar("LLM", bound=BaseLLM) 126 | """Type variable for LLM""" 127 | -------------------------------------------------------------------------------- /docs/js/custom.js: -------------------------------------------------------------------------------- 1 | function setupTermynal() { 2 | document.querySelectorAll(".use-termynal").forEach(node => { 3 | node.style.display = "block"; 4 | new Termynal(node, { 5 | lineDelay: 500 6 | }); 7 | }); 8 | const progressLiteralStart = "---> 100%"; 9 | const promptLiteralStart = "$ "; 10 | const customPromptLiteralStart = "# "; 11 | const termynalActivateClass = "termy"; 12 | let termynals = []; 13 | 14 | function createTermynals() { 15 | document 16 | .querySelectorAll(`.${termynalActivateClass} .highlight`) 17 | .forEach(node => { 18 | const text = node.textContent; 19 | const lines = text.split("\n"); 20 | const useLines = []; 21 | let buffer = []; 22 | 23 | function saveBuffer() { 24 | if (buffer.length) { 25 | let isBlankSpace = true; 26 | buffer.forEach(line => { 27 | if (line) { 28 | isBlankSpace = false; 29 | } 30 | }); 31 | dataValue = {}; 32 | if (isBlankSpace) { 33 | dataValue["delay"] = 0; 34 | } 35 | if (buffer[buffer.length - 1] === "") { 36 | // A last single
won't have effect 37 | // so put an additional one 38 | buffer.push(""); 39 | } 40 | const bufferValue = buffer.join("
"); 41 | dataValue["value"] = bufferValue; 42 | useLines.push(dataValue); 43 | buffer = []; 44 | } 45 | } 46 | 47 | for (let line of lines) { 48 | if (line === progressLiteralStart) { 49 | saveBuffer(); 50 | useLines.push({ 51 | type: "progress" 52 | }); 53 | } else if (line.startsWith(promptLiteralStart)) { 54 | saveBuffer(); 55 | const value = line.replace(promptLiteralStart, "").trimEnd(); 56 | useLines.push({ 57 | type: "input", 58 | value: value 59 | }); 60 | } else if (line.startsWith("// ")) { 61 | saveBuffer(); 62 | const value = "💬 " + line.replace("// ", "").trimEnd(); 63 | useLines.push({ 64 | value: value, 65 | class: "termynal-comment", 66 | delay: 0 67 | }); 68 | } else if (line.startsWith(customPromptLiteralStart)) { 69 | saveBuffer(); 70 | const promptStart = line.indexOf(promptLiteralStart); 71 | if (promptStart === -1) { 72 | console.error("Custom prompt found but no end delimiter", line) 73 | } 74 | const prompt = line.slice(0, promptStart).replace(customPromptLiteralStart, "") 75 | let value = line.slice(promptStart + promptLiteralStart.length); 76 | useLines.push({ 77 | type: "input", 78 | value: value, 79 | prompt: prompt 80 | }); 81 | } else { 82 | buffer.push(line); 83 | } 84 | } 85 | saveBuffer(); 86 | const div = document.createElement("div"); 87 | node.replaceWith(div); 88 | const termynal = new Termynal(div, { 89 | lineData: useLines, 90 | noInit: true, 91 | lineDelay: 500 92 | }); 93 | termynals.push(termynal); 94 | }); 95 | } 96 | 97 | function loadVisibleTermynals() { 98 | termynals = termynals.filter(termynal => { 99 | if (termynal.container.getBoundingClientRect().top - innerHeight <= 0) { 100 | termynal.init(); 101 | return false; 102 | } 103 | return true; 104 | }); 105 | } 106 | 107 | window.addEventListener("scroll", loadVisibleTermynals); 108 | createTermynals(); 109 | loadVisibleTermynals(); 110 | } 111 | 112 | async function main() { 113 | setupTermynal(); 114 | } 115 | 116 | main(); 117 | -------------------------------------------------------------------------------- /src/declarai/evals/README.md: -------------------------------------------------------------------------------- 1 | # Evals 2 | The evals library is a companion to declarai and helps us, and you, monitor prompts across models and over time. 3 | We plan to run a suite of evaluations for every release of the package to ensure that changes in the prompt 4 | infrastructure will not reduce the quality of results. 5 | 6 | ## Running the evaluations 7 | To run the evaluations, you will need to install the `declarai` package. You can do this by running 8 | ```bash 9 | pip install declarai 10 | ``` 11 | 12 | Once you have installed the package, you can run the evaluations by running 13 | ```bash 14 | python -m evals.evaluator 15 | ``` 16 | 17 | After the evaluations have finished running, you should be able to view the results in your terminal: 18 | ```bash 19 | Running Metadata-Significance scenarios... 20 | generate_a_poem_no_metadata... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00 21 | generate_a_poem_only_return_type... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00 22 | ┏━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ 23 | ┃ Provider ┃ Model ┃ version ┃ Scenario ┃ runtime ┃ output ┃ 24 | ┡━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ 25 | │ openai │ gpt-3.5-turbo │ latest │ generate_a_poem_no_metadata │ 1.235s │ Using LLMs is fun! │ 26 | │ openai │ gpt-3.5-turbo │ 0301 │ generate_a_poem_no_metadata │ 0.891s │ Using LLMs is fun! │ 27 | │ │ │ │ │ │ It's like playing with words │ 28 | │ │ │ │ │ │ Creating models that learn │ 29 | │ │ │ │ │ │ And watching them fly like birds │ 30 | │ openai │ gpt-3.5-turbo │ 0613 │ generate_a_poem_no_metadata │ 1.071s │ Using LLMs is fun! │ 31 | │ openai │ gpt-4 │ latest │ generate_a_poem_no_metadata │ 3.494s │ {'poem': 'Using LLMs, a joyous run,\n In the world of AI, under the sun.\nWith every task, they │ 32 | │ │ │ │ │ │ stun,\nIndeed, using LLMs is fun!'} │ 33 | │ openai │ gpt-4 │ 0613 │ generate_a_poem_no_metadata │ 4.992s │ {'title': 'Using LLMs is fun!', 'poem': "With LLMs, the fun's just begun, \nCoding and learning, │ 34 | │ │ │ │ │ │ second to none. \nComplex tasks become a simple run, \nOh, the joy when the work is done!"} │ 35 | │ openai │ gpt-3.5-turbo │ latest │ generate_a_poem_only_return_type │ 2.1s │ Learning with LLMs, a delightful run, │ 36 | │ │ │ │ │ │ Exploring new knowledge, it's never done. │ 37 | │ │ │ │ │ │ With every challenge, we rise and we stun, │ 38 | │ │ │ │ │ │ Using LLMs, the learning is always fun! │ 39 | │... │... │... │... │... │ ... │ 40 | └─────────────────┴─────────────────┴─────────────────┴──────────────────────────────────────────┴────────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────┘ 41 | ``` 42 | -------------------------------------------------------------------------------- /src/declarai/operators/openai_operators/task_operator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Task implementation for openai operator. 3 | """ 4 | import logging 5 | 6 | from declarai.operators.registry import register_operator 7 | from declarai.operators.message import Message, MessageRole 8 | from declarai.operators.operator import BaseOperator, CompiledTemplate 9 | from ..utils import can_be_jinja 10 | from declarai.operators.templates import ( 11 | InstructFunctionTemplate, 12 | StructuredOutputInstructionPrompt, 13 | compile_output_prompt, 14 | ) 15 | 16 | from .openai_llm import AzureOpenAILLM, OpenAILLM 17 | 18 | logger = logging.getLogger("OpenAITaskOperator") 19 | 20 | INPUTS_TEMPLATE = "Inputs:\n{inputs}\n" 21 | INPUT_LINE_TEMPLATE = "{param}: {{{param}}}" 22 | NEW_LINE_INPUT_LINE_TEMPLATE = "\n{param}: {{{param}}}" 23 | 24 | 25 | @register_operator(provider="openai", operator_type="task") 26 | class OpenAITaskOperator(BaseOperator): 27 | """ 28 | Task implementation for openai operator. This is a child of the BaseOperator class. See the BaseOperator class for further documentation. 29 | Implements the compile method which compiles a parsed function into a message. 30 | Uses the OpenAILLM to generate a response based on the given template. 31 | 32 | Attributes: 33 | llm: OpenAILLM 34 | """ 35 | 36 | llm: OpenAILLM 37 | 38 | def _compile_input_placeholder(self) -> str: 39 | """ 40 | Creates a placeholder for the input of the function. 41 | The input format is based on the function input schema. 42 | 43 | !!! example 44 | for example a function signature of: 45 | ```py 46 | def foo(a: int, b: str, c: float = 1.0): 47 | ``` 48 | 49 | will result in the following placeholder: 50 | ```md 51 | Inputs: 52 | a: {a} 53 | b: {b} 54 | c: {c} 55 | ``` 56 | """ 57 | inputs = "" 58 | 59 | if not self.parsed.signature_kwargs.keys(): 60 | return inputs 61 | 62 | for i, param in enumerate(self.parsed.signature_kwargs.keys()): 63 | if i == 0: 64 | inputs += INPUT_LINE_TEMPLATE.format(param=param) 65 | continue 66 | inputs += NEW_LINE_INPUT_LINE_TEMPLATE.format(param=param) 67 | 68 | return INPUTS_TEMPLATE.format(inputs=inputs) 69 | 70 | def _compile_output_prompt(self, template) -> str: 71 | if not self.parsed.has_any_return_defs: 72 | logger.warning( 73 | "Couldn't create output schema for function %s." 74 | "Falling back to unstructured output." 75 | "Please add at least one of the following: return type, return doc, return name", 76 | self.parsed.name, 77 | ) 78 | return "" 79 | 80 | signature_return = self.parsed.signature_return 81 | return_name, return_doc = self.parsed.docstring_return 82 | return compile_output_prompt( 83 | return_type=signature_return.str_schema, 84 | str_schema=return_name, 85 | return_docstring=return_doc, 86 | return_magic=self.parsed.magic.return_name, 87 | structured=self.parsed.has_structured_return_type, 88 | structured_template=template, 89 | ) 90 | 91 | def compile_template(self) -> CompiledTemplate: 92 | """ 93 | Unique compilation method for the OpenAITaskOperator class. 94 | Uses the InstructFunctionTemplate and StructuredOutputInstructionPrompt templates to create a message. 95 | And the _compile_input_placeholder method to create a placeholder for the input of the function. 96 | Returns: 97 | Dict[str, List[Message]]: A dictionary containing a list of messages. 98 | 99 | """ 100 | instruction_template = InstructFunctionTemplate 101 | structured_template = StructuredOutputInstructionPrompt 102 | output_schema = self._compile_output_prompt(structured_template) 103 | 104 | messages = [] 105 | if output_schema: 106 | messages.append(Message(message=output_schema, role=MessageRole.system)) 107 | 108 | if not can_be_jinja(self.parsed.docstring_freeform): 109 | instruction_message = instruction_template.format( 110 | input_instructions=self.parsed.docstring_freeform, 111 | input_placeholder=self._compile_input_placeholder(), 112 | ) 113 | else: 114 | instruction_message = self.parsed.docstring_freeform 115 | 116 | messages.append(Message(message=instruction_message, role=MessageRole.user)) 117 | return messages 118 | 119 | 120 | @register_operator(provider="azure-openai", operator_type="task") 121 | class AzureOpenAITaskOperator(OpenAITaskOperator): 122 | """ 123 | Task implementation for openai operator that uses Azure as the llm provider. 124 | 125 | Attributes: 126 | llm: AzureOpenAILLM 127 | """ 128 | 129 | llm: AzureOpenAILLM 130 | -------------------------------------------------------------------------------- /docs/beginners-guide/controlling-task-behavior.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide: 3 | - footer 4 | --- 5 | 6 | # Controlling task behavior :control_knobs: 7 | 8 | Task behavior can be controlled by any of the available interfaces in Python. 9 | Controlling these parameters is key to achieving the desired results from the model. 10 | 11 | ### Passing parameters to the task :label: 12 | 13 | In the following example, we'll create a task that suggests movies to watch based on a given input. 14 | 15 | ```python 16 | import declarai 17 | 18 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 19 | 20 | 21 | @gpt_35.task 22 | def movie_recommender(user_input: str): # (1)! 23 | """ 24 | Recommend a movie to watch based on the user input 25 | :param user_input: The user's input 26 | """ # (2)! 27 | ``` 28 | 29 | 1. Notice how providing a type hint for the `user_input` parameter allows declarai to understand the expected input 30 | type. 31 | 2. Adding the param to the docstring allows declarai to communicate the **meaning** of this parameter to the model. 32 | 33 | ```python 34 | print(movie_recommender(user_input="I want to watch a movie about space")) 35 | > 'Interstellar' 36 | ``` 37 | 38 | ### Using return types to control the output :gear: 39 | 40 | This is a good start, 41 | but let's say we want to have a selection of movies instead of a single suggestion. 42 | 43 | ```python 44 | from typing import List 45 | import declarai 46 | 47 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 48 | 49 | 50 | @gpt_35.task 51 | def movie_recommender(user_input: str) -> List[str]: # (1)! 52 | """ 53 | Recommend a selection of movies to watch based on the user input 54 | :param user_input: The user's input 55 | :return: A list of movie recommendations 56 | """ # (2)! 57 | ``` 58 | 59 | 1. Adding a return type hint allows declarai to parse the output of the llm into the provided type, 60 | in our case a list of strings. 61 | 2. Explaining the return value aids the model in returning the expected output and avoiding hallucinations. 62 | 63 | ```python 64 | print(movie_recommender(user_input="I want to watch a movie about space")) 65 | > ['Interstellar', 'Gravity', 'The Martian', 'Apollo 13', '2001: A Space Odyssey', 'Moon', 'Sunshine', 'Contact', 66 | 'The Right Stuff', 'Hidden Figures'] 67 | ``` 68 | 69 | !!! info 70 | 71 | Notice How the text in our documentation has changed from singular to plural form. 72 | Maintaining consistency between the task's description and the return type is important for the model to understand the expected output.
73 | For more best-practices, see [here](../../best-practices). 74 | 75 | Awesome! 76 | 77 | Now we have a list of movies to choose from! 78 | 79 | But what if we want to go even further :thinking:?
80 | Let's say we want the model to also provide a short description of each movie. 81 | 82 | ```python 83 | from typing import Dict 84 | import declarai 85 | 86 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 87 | 88 | 89 | @gpt_35.task 90 | def movie_recommender(user_input: str) -> Dict[str, str]: # (1)! 91 | """ 92 | Recommend a selection of movies to watch based on the user input 93 | For each movie provide a short description as well 94 | :param user_input: The user's input 95 | :return: A dictionary of movie names and descriptions 96 | """ # (2)! 97 | ``` 98 | 99 | 1. We've updated the return value to allow for the creation of a dictionary of movie names and descriptions. 100 | 2. We re-enforce the description of the return value to ensure the model understands the expected output. 101 | 102 | ```python 103 | print(movie_recommender(user_input="I want to watch a movie about space")) 104 | > { 105 | 'Interstellar': "A team of explorers travel through a wormhole in space in an attempt to ensure humanity's survival.", 106 | 'Gravity': 'Two astronauts work together to survive after an accident leaves them stranded in space.', 107 | 'The Martian': 'An astronaut is left behind on Mars after his team assumes he is dead and must find a way to survive and signal for rescue.', 108 | 'Apollo 13': 'The true story of the Apollo 13 mission, where an explosion in space jeopardizes the lives of the crew and their safe return to Earth.', 109 | '2001: A Space Odyssey': "A journey through human evolution and the discovery of a mysterious black monolith that may hold the key to humanity's future." 110 | } 111 | ``` 112 | 113 | !!! info 114 | 115 | A good practice for code readability as well as great performing models is to use type hints and context in the docstrings. 116 | The better you describe the task, `:params` and `:return` sections within the docstring, the better the results will be. 117 | 118 | !!! tip 119 | 120 | Try experimenting with various descriptions and see how far you can push the model's understanding! 121 | who knows what you'll find :open_mouth:! 122 | 123 | 131 | -------------------------------------------------------------------------------- /docs/providers/azure_openai.md: -------------------------------------------------------------------------------- 1 | # Azure OpenAI 2 | 3 | To use Azure OpenAI models, you can set the following configuration options: 4 | 5 | ```py 6 | import declarai 7 | 8 | azure_model = declarai.azure_openai( 9 | azure_openai_key="", 10 | azure_openai_api_base="", 11 | deployment_name="", 12 | api_version="", 13 | headers={"": ""}, 14 | timeout="", 15 | request_timeout="", 16 | stream="", 17 | ) 18 | ``` 19 | 20 | | Argument |
Env Variable
| Required? | 21 | |-----------------------|---------------------------------------------|:---------:| 22 | | azure_openai_key | `DECLARAI_AZURE_OPENAI_KEY` | ✅ | 23 | | azure_openai_api_base | `DECLARAI_AZURE_OPENAI_API_BASE` | ✅ | 24 | | deployment_name | `DECLARAI_AZURE_OPENAI_DEPLOYMENT_NAME` | ✅ | 25 | | api_version | `DECLARAI_AZURE_OPENAI_API_VERSION` | | 26 | | headers | | | 27 | | timeout | | | 28 | | request_timeout | | | 29 | | stream | | | 30 | 31 | ## Getting an API key, API base, and Deployment name 32 | 33 | To obtain the above settings, you will need to create an account on 34 | the [Azure OpenAI](https://azure.microsoft.com/en-us/services/cognitive-services/) 35 | website. Once you have created an account, you will need to create a resource. 36 | 37 | Please follow the instructions on 38 | the [Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/quickstart?tabs=command-line&pivots=programming-language-python) 39 | 40 | ## Setting the API key 41 | 42 | You can set your API key at runtime like this: 43 | 44 | ```python 45 | import declarai 46 | 47 | my_azure_model = declarai.azure_openai( 48 | deployment_name="my-model", 49 | azure_openai_key="", 50 | azure_openai_api_base="https://.com", 51 | ) 52 | ``` 53 | 54 | However, it is preferable to pass sensitive settings as an environment variable: `DECLARAI_AZURE_OPENAI_API_KEY`. 55 | 56 | To establish your Azure OpenAI API key as an environment variable, launch your terminal and execute the following 57 | command, 58 | substituting with your actual key: 59 | 60 | ```shell 61 | export DECLARAI_AZURE_OPENAI_KEY= 62 | ``` 63 | 64 | This action will maintain the key for the duration of your terminal session. To ensure a longer retention, modify your 65 | terminal's settings or corresponding environment files. 66 | 67 | ## Control LLM Parameters 68 | 69 | OpenAI models have a number of parameters that can be tuned to control the output of the model. These parameters are 70 | passed to the declarai task/chat interface as a dictionary. The following parameters are supported: 71 | 72 | | Parameter | Type | Description | Default | 73 | |---------------------|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------| 74 | | `temperature` | `float` | Controls the randomness of the model. Lower values make the model more deterministic and repetitive. Higher values make the model more random and creative. | `0` | 75 | | `max_tokens` | `int` | Controls the length of the output. | `3000` | 76 | | `top_p` | `float` | Controls the diversity of the model. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `1` | 77 | | `frequency_penalty` | `float` | Controls how often the model repeats itself. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `0` | 78 | | `presence_penalty` | `float` | Controls how often the model generates new topics. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `0` | 79 | 80 | Pass your custom parameters to the declarai task/chat interface as a dictionary: 81 | 82 | ```python 83 | import declarai 84 | 85 | azure_model = declarai.azure_openai( 86 | deployment_name="my-model", 87 | azure_openai_key="", 88 | azure_openai_api_base="https://.com", 89 | headers="" 90 | ) 91 | 92 | 93 | @azure_model.task(llm_params={"temperature": 0.5, "max_tokens": 1000}) # (1)! 94 | def generate_song(): 95 | """ 96 | Generate a song about declarai 97 | """ 98 | 99 | ``` 100 | 101 | 1. Pass only the parameters you want to change. The rest will be set to their default values. 102 | -------------------------------------------------------------------------------- /src/declarai/python_parser/parser.py: -------------------------------------------------------------------------------- 1 | """PythonParser 2 | An interface to extract different parts of the provided python code into a simple metadata object. 3 | """ 4 | 5 | import inspect 6 | from functools import lru_cache as memoized 7 | from typing import Any, Dict, Optional 8 | 9 | from pydantic import parse_obj_as, parse_raw_as 10 | from pydantic.error_wrappers import ValidationError 11 | 12 | from declarai.python_parser.magic_parser import Magic, extract_magic_args 13 | from declarai.python_parser.type_annotation_to_schema import ( 14 | type_annotation_to_str_schema, 15 | ) 16 | from declarai.python_parser.types import ( 17 | ArgName, 18 | ArgType, 19 | DocstringFreeform, 20 | DocstringParams, 21 | DocstringReturn, 22 | SignatureReturn, 23 | ) 24 | 25 | from .docstring_parsers.reST import ReSTDocstringParser 26 | 27 | 28 | class OutputParsingError(Exception): 29 | pass 30 | 31 | 32 | class PythonParser: 33 | """ 34 | A unified interface for accessing python parsed data. 35 | """ 36 | 37 | is_func: bool 38 | is_class: bool 39 | decorated: Any 40 | name: str 41 | signature_return_type: Any 42 | docstring_freeform: DocstringFreeform 43 | docstring_params: DocstringParams 44 | docstring_return: DocstringReturn 45 | 46 | def __init__(self, decorated: Any): 47 | self.is_func = inspect.isfunction(decorated) 48 | self.is_class = inspect.isclass(decorated) 49 | self.decorated = decorated 50 | 51 | # Static attributes: 52 | self.name = self.decorated.__name__ 53 | 54 | self._signature = inspect.signature(self.decorated) 55 | self.signature_return_type = self.signature_return.type_ 56 | 57 | docstring = inspect.getdoc(self.decorated) 58 | self._parsed_docstring = ReSTDocstringParser(docstring or "") 59 | self.docstring_freeform = self._parsed_docstring.freeform 60 | self.docstring_params = self._parsed_docstring.params 61 | self.docstring_return = self._parsed_docstring.returns 62 | 63 | @property 64 | @memoized(maxsize=1) 65 | def signature_kwargs(self) -> Dict[ArgName, ArgType]: 66 | return { 67 | param.name: param.annotation 68 | for param in dict(self._signature.parameters).values() 69 | if param.name != "self" 70 | } 71 | 72 | @property 73 | @memoized(maxsize=1) 74 | def signature_return(self) -> Optional[SignatureReturn]: 75 | return_annotation = self._signature.return_annotation 76 | if return_annotation == inspect._empty: 77 | return SignatureReturn() 78 | string_schema = type_annotation_to_str_schema(self._signature.return_annotation) 79 | return SignatureReturn( 80 | name=str(self._signature.return_annotation), 81 | str_schema=string_schema, 82 | type_=self._signature.return_annotation, 83 | ) 84 | 85 | @property 86 | @memoized(maxsize=1) 87 | def magic(self) -> Magic: 88 | func_str = inspect.getsource(self.decorated) 89 | if "magic(" not in func_str: 90 | return Magic() 91 | return extract_magic_args(func_str) 92 | 93 | @property 94 | @memoized(maxsize=1) 95 | def return_name(self) -> str: 96 | return self.magic.return_name or self.docstring_return[0] or "declarai_result" 97 | 98 | @property 99 | @memoized(maxsize=1) 100 | def has_any_return_defs(self) -> bool: 101 | """ 102 | A return definition is any of the following: 103 | - return type annotation 104 | - return reference in docstring 105 | - return referenced in magic placeholder # TODO: Address magic reference as well. 106 | """ 107 | return any( 108 | [ 109 | self.docstring_return[0], 110 | self.docstring_return[1], 111 | self.signature_return, 112 | ] 113 | ) 114 | 115 | @property 116 | @memoized(maxsize=1) 117 | def has_structured_return_type(self) -> bool: 118 | """ 119 | Except for the following types, a dedicated output parsing 120 | behavior is required to return the expected return type of the task. 121 | """ 122 | return any( 123 | [ 124 | self.docstring_return[0], 125 | self.signature_return.name 126 | not in ( 127 | None, 128 | "", 129 | "", 130 | "", 131 | "", 132 | ), 133 | ] 134 | ) 135 | 136 | def parse(self, raw_result: str): 137 | if self.has_structured_return_type: 138 | parsed_result = parse_raw_as(dict, raw_result) 139 | root_key = self.return_name or "declarai_result" 140 | parsed_result = parsed_result[root_key] 141 | else: 142 | parsed_result = raw_result 143 | 144 | if self.signature_return_type: 145 | try: 146 | return parse_obj_as(self.signature_return_type, parsed_result) 147 | except ValidationError: 148 | raise OutputParsingError( 149 | f"\nFailed parsing result into type:\n" 150 | f"{self.signature_return_type}\n" 151 | "----------------------------------\n" 152 | f"raw_result:\n" 153 | f"{raw_result}" 154 | ) 155 | else: 156 | return parsed_result 157 | -------------------------------------------------------------------------------- /docs/best-practices/index.md: -------------------------------------------------------------------------------- 1 | # Best practices 2 | 3 | Prompt engineering is no simple task and there are various things to consider when creating a prompt. 4 | In this page we provide our view and understanding of the best practices for prompt engineering. 5 | These will help you create reliably performing tasks and chatbots that won't surprise you when deploying in production. 6 | 7 | !!! warning 8 | 9 | While this guide will should help in creating reliable prompts for most cases, it is still possible that the model will not generate the desired output. 10 | For this reason we strongly recommend you test your tasks and bots on various inputs before deploying to production.
11 | You can acheive this by writing integration tests or using our provided `evals` library to discover which models and wich 12 | versions perform best for your specific use case. 13 | 14 | 15 | ### Explicit is better than implicit 16 | 17 | When creating a prompt, it is important to be as explicit as possible. 18 | Declarai provide various interfaces to provide context and guidance to the model. 19 | 20 | Reviewing the movie recommender example from the beginner's guide, we can see a collection of techniques to provide context to the model: 21 | ```python 22 | from typing import Dict 23 | import declarai 24 | 25 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 26 | @gpt_35.task 27 | def movie_recommender(user_input: str) -> Dict[str, str]: 28 | """ 29 | Recommend a selection of movies to watch based on the user input 30 | For each movie provide a short description as well 31 | :param user_input: The user's input 32 | :return: A dictionary of movie names and descriptions 33 | """ 34 | ``` 35 | 36 | **Using type annotations** in the input and output create predictability in software and enforce a strict interface with the model.
37 | The types are read and enforced by Declarai at runtime so that a produced result of the wrong type will raise an error instead of 38 | returned and causing unexpected behavior down the line. 39 | 40 | **Docstrings** are used to provide context to the model and to the user. 41 | 42 | - **Task description** - The first part of the docstring is the task itself, make sure to address the expected inputs and how to use them 43 | You can implement various popular techniques into the prompt such as `few-shot`, which means providing example inputs and outputs for the model to learn from. 44 | 45 | - **Param descriptions** - Explaining the meaning of the input parameters helps the model better perform with the provided inputs. 46 | For example. when passing an argument called `input`, if you know that the expected input will be an email, or user message, it is best to explain this to the model. 47 | 48 | - **Return description** - While typing are a great base layer for declaring the expected output, 49 | explaining the exact structure and logic behind this structure will help the model better perform. 50 | For example, given a return type of `Dict[str, str]`, explaining that this object will contain a mapping of movie names to their respective description 51 | will help to model properly populate the resulting object. 52 | 53 | ### Language consistency and ambiguity 54 | 55 | When providing prompts to the model, it is best practice to use language that correlates with the expected input and output. 56 | For example, in the following, the prompt is written in single form, while the resulting output is in plural form. (i.e. a list) 57 | ```python 58 | from typing import List 59 | import declarai 60 | 61 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 62 | 63 | @gpt_35.task 64 | def movie_recommender(user_input: str) -> List[str]: 65 | """ 66 | Recommend a movie to watch based on the user input 67 | :param user_input: The user's input 68 | :return: Recommended movie 69 | """ 70 | ``` 71 | This may easily confuse the model and cause it to produce unexpected results which will fail when parsing the results. 72 | Instead, we could write the prompt as follows: 73 | ```python 74 | from typing import List 75 | import declarai 76 | 77 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 78 | @gpt_35.task 79 | def movie_recommender(user_input: str) -> List[str]: 80 | """ 81 | Recommend a selection of movies to watch based on the user input 82 | :param user_input: The user's input 83 | :return: A list of recommended movies 84 | """ 85 | ``` 86 | This way it is clear to the model that we are expecting a list of movies and not a single movie. 87 | 88 | 89 | ### Falling back to string 90 | 91 | In some cases, you might be working on a task or chat that has a mixture of behaviors that may not be consistent. 92 | For example in this implementation of a calculator bot, the bot usually returns numbers, but for the scenario that an error occurs, it returns a string. 93 | ```python 94 | from typing import Union 95 | import declarai 96 | 97 | gpt_35 = declarai.openai(model="gpt-3.5-turbo") 98 | 99 | @gpt_35.experimental.chat 100 | class CalculatorBot: 101 | """ 102 | You a calculator bot, 103 | given a request, you will return the result of the calculation 104 | If you have a problem with the provided input, you should return an error explaining the problem. 105 | For example, for the input: "1 + a" where 'a' is unknown to you, you should return: "Unknown symbol 'a'" 106 | """ 107 | def send(self, message: str) -> Union[str, int]: 108 | ... 109 | ``` 110 | When using the created bot it should look like this: 111 | ```python 112 | calc_bot = CalculatorBot() 113 | print(calc_bot.send(message="1 + 3")) 114 | #> 4 115 | print(calc_bot.send(message="34 * b")) 116 | #> Unknown symbol 'b' 117 | ``` 118 | This way, instead of raising an error, the bot returns a string that explains the problem and allows the user to recover from the 'broken' state. 119 | --------------------------------------------------------------------------------