35 |
--------------------------------------------------------------------------------
/src/declarai/operators/utils.py:
--------------------------------------------------------------------------------
1 | import jinja2
2 |
3 |
4 | def can_be_jinja(string: str) -> bool:
5 | """
6 | Checks if a string can be compiled using the jinja2 template engine.
7 | """
8 | if "{{" in string or "{%" in string or "{#" in string:
9 | try:
10 | jinja2.Template(string)
11 | return True
12 | except jinja2.exceptions.TemplateSyntaxError:
13 | return False
14 | else:
15 | return False
16 |
17 |
18 | def format_prompt_msg(_string: str, **kwargs) -> str:
19 | """
20 | Formats a string using the jinja2 template engine if possible, otherwise uses the python string format.
21 | Args:
22 | _string: The string to format
23 | **kwargs: The kwargs to pass to the template
24 |
25 | Returns: The formatted string
26 | """
27 | if can_be_jinja(_string):
28 | return jinja2.Template(_string).render(**kwargs)
29 | else:
30 | return _string.format(**kwargs)
31 |
--------------------------------------------------------------------------------
/tests/operators/test_utils.py:
--------------------------------------------------------------------------------
1 | from declarai.operators.utils import can_be_jinja, format_prompt_msg
2 |
3 |
4 | # Tests for can_be_jinja function
5 | def test_can_be_jinja_valid():
6 | assert can_be_jinja("Hello {{ name }}") == True
7 |
8 |
9 | def test_can_be_jinja_invalid():
10 | assert can_be_jinja("Hello {{ name") == False
11 |
12 |
13 | def test_can_be_jinja_no_jinja_syntax():
14 | assert can_be_jinja("Hello name") == False
15 |
16 |
17 | # Tests for format_prompt_msg function
18 | def test_format_prompt_msg_valid_jinja():
19 | assert format_prompt_msg("Hello {{ name }}", name="John") == "Hello John"
20 |
21 |
22 | def test_format_prompt_msg_invalid_jinja():
23 | assert format_prompt_msg("Hello {{ name", name="John") == "Hello { name"
24 |
25 |
26 | def test_format_prompt_msg_python_format():
27 | assert format_prompt_msg("Hello {name}", name="John") == "Hello John"
28 |
29 |
30 | def test_format_prompt_msg_no_format():
31 | assert format_prompt_msg("Hello name") == "Hello name"
32 |
--------------------------------------------------------------------------------
/docs/features/chat/debugging-chat.md:
--------------------------------------------------------------------------------
1 | # Debugging Chat :bug:
2 |
3 | Similarly to debugging tasks, understanding the prompts being sent to the llm is crucial to debugging chatbots.
4 | Declarai exposes the `compile` method for chat instances as well!
5 |
6 | ## Compiling chat
7 | ```py
8 | import declarai
9 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
10 |
11 | @gpt_35.experimental.chat
12 | class SQLBot:
13 | """
14 | You are a sql assistant. You help with SQL queries with one-line answers.
15 | """
16 | greeting = "Hello, I am a SQL assistant. How can I assist you today?"
17 |
18 | sql_bot = SQLBot()
19 | print(sql_bot.compile())
20 | ```
21 | ```py
22 | > {
23 | 'messages':
24 | [
25 | "system: You are a sql assistant. You help with SQL queries with one-line answers.",
26 | "assistant: Hello, I am a SQL assistant. How can I assist you today?"
27 | ]
28 | }
29 | ```
30 | Wonderful right? We can view the chatbot's messages in the format they will be sent to the language model.
31 |
--------------------------------------------------------------------------------
/src/declarai/memory/base.py:
--------------------------------------------------------------------------------
1 | """
2 | Base class for the memory module.
3 | """
4 | from abc import ABC, abstractmethod # pylint: disable=E0611
5 | from typing import List
6 |
7 | from declarai.operators import Message
8 |
9 |
10 | class BaseChatMessageHistory(ABC):
11 | """
12 | Abstract class to store the chat message history.
13 |
14 | See `ChatMessageHistory` for default implementation.
15 |
16 | """
17 |
18 | @property
19 | @abstractmethod
20 | def history(self) -> List[Message]:
21 | """
22 | Return the chat message history
23 |
24 | Returns:
25 | List of Message objects
26 | """
27 |
28 | @abstractmethod
29 | def add_message(self, message: Message) -> None:
30 | """
31 | Add a Message object to the state.
32 |
33 | Args:
34 | message: Message object to add to the state
35 | """
36 |
37 | @abstractmethod
38 | def clear(self) -> None:
39 | """
40 | Remove all messages from the state
41 | """
42 |
--------------------------------------------------------------------------------
/src/declarai/memory/in_memory.py:
--------------------------------------------------------------------------------
1 | """
2 | This module contains the in-memory implementation of the chat message history.
3 | """
4 | from typing import List
5 |
6 | from pydantic.main import BaseModel
7 |
8 | from declarai.operators import Message
9 |
10 | from .base import BaseChatMessageHistory
11 |
12 |
13 | class InMemoryMessageHistory(BaseChatMessageHistory, BaseModel):
14 | """
15 | This memory implementation stores all messages in memory in a list.
16 | """
17 |
18 | messages: List[Message] = []
19 |
20 | @property
21 | def history(self) -> List[Message]:
22 | """
23 | Returns the list of messages stored in memory.
24 | :return: List of messages
25 | """
26 | return self.messages
27 |
28 | def add_message(self, message: Message) -> None:
29 | """
30 | Adds a message to the list of messages stored in memory.
31 | :param message: the message content and role
32 | """
33 | self.messages.append(message)
34 |
35 | def clear(self) -> None:
36 | self.messages = []
37 |
--------------------------------------------------------------------------------
/tests/orchestrator/test_future_llm_task.py:
--------------------------------------------------------------------------------
1 | # from unittest.mock import MagicMock
2 | #
3 | # from declarai.orchestrator.future_llm_task import FutureTask
4 | #
5 | #
6 | # def test_future_llm_task():
7 | # exec_func = MagicMock()
8 | # exec_func.return_value = "output-value"
9 | # kwargs = {
10 | # "input": "input-value",
11 | # "output": "output-value",
12 | # }
13 | # compiled_template = "{input} | {output}"
14 | # populated_prompt = "input-value | output-value"
15 | #
16 | # future_llm_task = FutureTask(
17 | # exec_func=exec_func,
18 | # kwargs=kwargs,
19 | # compiled_template=compiled_template,
20 | # populated_prompt=populated_prompt,
21 | # )
22 | #
23 | # task_res = future_llm_task()
24 | # assert task_res == "output-value"
25 | # exec_func.assert_called_with(populated_prompt)
26 | #
27 | # assert future_llm_task.populated_prompt == populated_prompt
28 | #
29 | # assert future_llm_task.compiled_template == compiled_template
30 | #
31 | # assert future_llm_task.task_kwargs == kwargs
32 |
--------------------------------------------------------------------------------
/tests/python_parser/test_magic_parser.py:
--------------------------------------------------------------------------------
1 | import inspect
2 | from unittest.mock import MagicMock
3 |
4 | from declarai.python_parser.magic_parser import Magic, extract_magic_args
5 |
6 | magic = MagicMock()
7 |
8 |
9 | def test_magic_parser():
10 | """
11 | TODO: This doesn't currently support aliases in the magic function, only string literals
12 | """
13 |
14 | def mock_magic_parser_function(arg: str):
15 | return magic(
16 | "return_name",
17 | task_desc="This is a task description",
18 | input_desc={"arg": "This is an argument desc"},
19 | output_desc="This is an output desc",
20 | arg=arg,
21 | )
22 |
23 | code = inspect.getsource(mock_magic_parser_function)
24 | _magic = extract_magic_args(code)
25 |
26 | assert isinstance(_magic, Magic)
27 | assert _magic.return_name == "return_name"
28 | assert _magic.task_desc == "This is a task description"
29 | assert _magic.input_desc == {"arg": "This is an argument desc"}
30 | assert _magic.output_desc == "This is an output desc"
31 |
--------------------------------------------------------------------------------
/.coveragerc:
--------------------------------------------------------------------------------
1 | # .coveragerc to control coverage.py
2 |
3 | [run]
4 | omit =
5 | src/declarai/operators/shared/templates/**
6 | **/test_*
7 | **/types.py
8 | **/types/*.py
9 |
10 | # This is currently in experimental and not properly covered with tests yet
11 | src/declarai/evals/**
12 | src/declarai/middlewares/**
13 | src/declarai/orchestrator/sequence.py
14 |
15 | [report]
16 | # Regexes for lines to exclude from consideration
17 | exclude_lines =
18 | # Have to re-enable the standard pragma
19 | pragma: no cover
20 | @overload
21 | pass
22 |
23 | # Don't complain about missing debug-only code:
24 | def __repr__
25 | if self\.debug
26 |
27 | # Don't complain if tests don't hit defensive assertion code:
28 | raise AssertionError
29 | raise NotImplementedError
30 |
31 | # Don't complain if non-runnable code isn't run:
32 | if 0:
33 | if __name__ == .__main__.:
34 |
35 | # Don't complain about abstract methods, they aren't run:
36 | @(abc\.)?abstractmethod
37 |
38 | ignore_errors = True
39 |
40 | [html]
41 | directory = coverage_html_report
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 vendi-ai
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/docs/features/chat/customizing-chat-response.md:
--------------------------------------------------------------------------------
1 | # Customizing the Chat Response
2 |
3 | The default response type of the language model messages is `str`. However, you can overwrite the `send` method to return a different type.
4 | Just like tasks, you can control the type hints by declaring the return type of the `send` method.
5 |
6 | ```py
7 | from typing import List
8 | import declarai
9 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
10 |
11 | @gpt_35.experimental.chat
12 | class SQLBot:
13 | """
14 | You are a sql assistant."""
15 | ...
16 |
17 | def send(self, operation: str) -> List[str]:
18 | ...
19 |
20 | sql_bot = SQLBot()
21 | print(sql_bot.send(message="Offer two sql queries that use the 'SELECT' operation"))
22 | > [
23 | "SELECT * FROM table_name;",
24 | "SELECT column_name FROM table_name;"
25 | ]
26 | ```
27 |
28 | !!! warning
29 |
30 | As with tasks, the message is sent along with the expected return types.
31 | This means that if not careful, a message conflicting with the expected results could cause weird behavior in the llm responses.
32 | For more best-practices, see [here](../../../best-practices).
33 |
--------------------------------------------------------------------------------
/src/declarai/operators/openai_operators/chat_operator.py:
--------------------------------------------------------------------------------
1 | """
2 | Chat implementation of OpenAI operator.
3 | """
4 | import logging
5 | from declarai.operators.openai_operators.openai_llm import AzureOpenAILLM, OpenAILLM
6 | from declarai.operators.operator import BaseChatOperator
7 | from declarai.operators.registry import register_operator
8 |
9 | logger = logging.getLogger("OpenAIChatOperator")
10 |
11 |
12 | @register_operator(provider="openai", operator_type="chat")
13 | class OpenAIChatOperator(BaseChatOperator):
14 | """
15 | Chat implementation of OpenAI operator. This is a child of the BaseChatOperator class. See the BaseChatOperator class for further documentation.
16 |
17 | Attributes:
18 | llm: OpenAILLM
19 | """
20 |
21 | llm: OpenAILLM
22 |
23 |
24 | @register_operator(provider="azure-openai", operator_type="chat")
25 | class AzureOpenAIChatOperator(OpenAIChatOperator):
26 | """
27 | Chat implementation of OpenAI operator. This is a child of the BaseChatOperator class. See the BaseChatOperator class for further documentation.
28 |
29 | Attributes:
30 | llm: AzureOpenAILLM
31 | """
32 |
33 | llm: AzureOpenAILLM
34 |
--------------------------------------------------------------------------------
/tests/python_parser/test_function_parser.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from declarai.python_parser.parser import PythonParser, SignatureReturn
4 |
5 |
6 | def test_output_prompt():
7 | def my_func(a_param: str, b_param: int) -> List[str]:
8 | """
9 | This is the method docstring
10 | :param a_param: ths param is a string
11 | :param b_param: this param is an integer
12 | :return: This returns a list of strings
13 | """
14 |
15 | parsed_func = PythonParser(my_func)
16 | assert parsed_func.name == "my_func"
17 | assert parsed_func.signature_kwargs == {"a_param": str, "b_param": int}
18 | return_signature = SignatureReturn(
19 | name="typing.List[str]",
20 | str_schema="List[string]",
21 | type_=List[str],
22 | )
23 | assert parsed_func.signature_return.name == return_signature.name
24 | assert parsed_func.signature_return.str_schema == return_signature.str_schema
25 | assert parsed_func.signature_return.type_ == return_signature.type_
26 | assert parsed_func.docstring_freeform == "This is the method docstring"
27 | assert parsed_func.docstring_params == {
28 | "a_param": "ths param is a string",
29 | "b_param": "this param is an integer",
30 | }
31 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiohttp==3.8.5 ; python_version >= "3.8" and python_version < "4.0"
2 | aiosignal==1.3.1 ; python_version >= "3.8" and python_version < "4.0"
3 | async-timeout==4.0.2 ; python_version >= "3.8" and python_version < "4.0"
4 | attrs==23.1.0 ; python_version >= "3.8" and python_version < "4.0"
5 | certifi==2023.7.22 ; python_version >= "3.8" and python_version < "4.0"
6 | charset-normalizer==3.2.0 ; python_version >= "3.8" and python_version < "4.0"
7 | colorama==0.4.6 ; python_version >= "3.8" and python_version < "4.0" and platform_system == "Windows"
8 | frozenlist==1.3.3 ; python_version >= "3.8" and python_version < "4.0"
9 | idna==3.4 ; python_version >= "3.8" and python_version < "4.0"
10 | jsonref==1.1.0 ; python_version >= "3.8" and python_version < "4.0"
11 | multidict==6.0.4 ; python_version >= "3.8" and python_version < "4.0"
12 | openai==0.27.8 ; python_version >= "3.8" and python_version < "4.0"
13 | pydantic==1.10.12 ; python_version >= "3.8" and python_version < "4.0"
14 | requests==2.31.0 ; python_version >= "3.8" and python_version < "4.0"
15 | tqdm==4.65.0 ; python_version >= "3.8" and python_version < "4.0"
16 | typing-extensions==4.7.1 ; python_version >= "3.8" and python_version < "4.0"
17 | urllib3==2.0.4 ; python_version >= "3.8" and python_version < "4.0"
18 | yarl==1.9.2 ; python_version >= "3.8" and python_version < "4.0"
19 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "declarai"
3 | version = "0.1.13"
4 | description = "Declarai, turning Python code into LLM tasks, easy to use, and production-ready."
5 | authors = ["Aviv Almashanu "]
6 | readme = "README.md"
7 | packages = [{include = "declarai", from = "src"}]
8 |
9 | [tool.poetry.dependencies]
10 | python = "^3.8"
11 | openai = "^0.27.8"
12 | pydantic = "^1.8.2"
13 | jsonref = "^1.1.0"
14 | wandb = {version = "^0.15.8", optional = true}
15 | jinja2 = "^3.1.2"
16 |
17 |
18 | [tool.poetry.group.dev.dependencies]
19 | pylint = "^2.13.9"
20 | black = "^23.3.0"
21 | isort = "^5.11.5"
22 | pytest = "^7.4.0"
23 | pytest-cov = "^4.1.0"
24 | rich = "^13.4.2"
25 | mkdocs-material = "9.2.0b1"
26 | mkdocstrings = {version = "^0.22.0"}
27 | mkdocs-autorefs = "^0.5.0"
28 | mkdocstrings-python = "^1.5.0"
29 | mkdocstrings-crystal = "0.3.6"
30 | mkdocs-gen-files = "^0.5.0"
31 | mkdocs-literate-nav = "^0.6.0"
32 | mkdocs-section-index = "^0.3.5"
33 | pylint-pydantic = "^0.2.4"
34 |
35 | [tool.poetry.extras]
36 | wandb = ["wandb"]
37 | postgresql = ["psycopg2"]
38 | redis = ["redis"]
39 | mongo = ["pymongo"]
40 |
41 | [build-system]
42 | requires = ["poetry-core"]
43 | build-backend = "poetry.core.masonry.api"
44 |
45 | [tool.isort]
46 | profile = "black"
47 | line_length = 150
48 |
49 | [tool.pylint.format]
50 | max-line-length = "150"
51 |
--------------------------------------------------------------------------------
/docs/features/chat/chat-memory/file-memory.md:
--------------------------------------------------------------------------------
1 | # File Memory :material-file:
2 |
3 | For chat that requires a persistent message history, you can use a file to store the conversation history.
4 |
5 | ## Set file memory
6 |
7 | ```py
8 | import declarai
9 | from declarai.memory import FileMessageHistory
10 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
11 |
12 | @gpt_35.experimental.chat(chat_history=FileMessageHistory("sql_bot_history.txt")) # (1)!
13 | class SQLBot:
14 | """
15 | You are a sql assistant. You help with SQL related questions with one-line answers.
16 | """
17 |
18 | sql_bot = SQLBot()
19 | ```
20 |
21 |
22 | 1. file path is not mandatory. If you do not provide a file path, the default file path is stored in a tmp directory.
23 |
24 | We can also initialize the `FileMessageHistory` class with a custom file path.
25 |
26 |
27 | ## Set file memory at runtime
28 | In case you want to set the file memory at runtime, you can use the `set_memory` method.
29 |
30 | ```py
31 | import declarai
32 | from declarai.memory import FileMessageHistory
33 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
34 |
35 | @gpt_35.experimental.chat
36 | class SQLBot:
37 | """
38 | You are a sql assistant. You help with SQL related questions with one-line answers.
39 | """
40 |
41 | sql_bot = SQLBot(chat_history=FileMessageHistory("sql_bot_history.txt"))
42 | ```
43 |
--------------------------------------------------------------------------------
/tests/python_parser/docstring_parsers/reST/test_parser.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from declarai.python_parser.docstring_parsers.reST.parser import ReSTDocstringParser
4 |
5 | multiline_docstring = """This is the documentation\nwith multiple lines
6 | :param param1: This is the first parameter
7 | with additional description
8 | :param param2: This is the second parameter
9 | with more details
10 | :return: This is the return value\n with multiple lines
11 | """
12 |
13 |
14 | @pytest.mark.parametrize(
15 | "docstring, freeform, params, returns",
16 | [
17 | (
18 | multiline_docstring,
19 | "This is the documentation\nwith multiple lines",
20 | {
21 | "param1": "This is the first parameter\n with additional description",
22 | "param2": "This is the second parameter\n with more details",
23 | },
24 | ("", "This is the return value\n with multiple lines"),
25 | ),
26 | ],
27 | )
28 | def test_reST_docstring_parser(docstring, freeform, params, returns):
29 | parsed_docstring = ReSTDocstringParser(docstring)
30 | assert parsed_docstring.freeform == freeform
31 | for param_name, param_doc in parsed_docstring.params.items():
32 | assert param_name in params
33 | assert param_doc == params[param_name]
34 | assert parsed_docstring.returns == returns
35 |
--------------------------------------------------------------------------------
/docs/features/chat/advanced-initialization.md:
--------------------------------------------------------------------------------
1 | # Initialization :beginner:
2 |
3 | Although using the docstring and class properties is the recommended way to initialize a chatbot, it is not the only way.
4 | In cases were relying on the class docstring and properties is problematic, we allow manually passing the chat arguments to the class constructor.
5 | This takes away from the magic that Declarai provides, but we are aware not everyone may be comfortable with it.
6 |
7 |
8 | ## Initialization by passing parameters
9 | Let's see how we can initialize a chatbot by passing the `system` and `greeting` parameters as arguments.
10 |
11 | ```py
12 | import declarai
13 |
14 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
15 | @gpt_35.experimental.chat
16 | class SQLBot:
17 | ...
18 |
19 |
20 | sql_bot = SQLBot(
21 | system="You are a sql assistant. You help with SQL queries with one-line answers.",
22 | greeting="Hello, I am a SQL assistant. How can I assist you today?",
23 | )
24 |
25 | print(sql_bot.send("Tell me your preferred SQL operation"))
26 | ```
27 |
28 | ```py
29 | > "As an SQL assistant, I don't have a preferred SQL operation. I am here to assist with any SQL operation you need help with."
30 | ```
31 |
32 |
33 | ## Next steps
34 |
35 | You are welcome to explore our [**Features**](../../../features/) section, where you can find the full list of supported features and how to use them.
36 |
--------------------------------------------------------------------------------
/src/declarai/operators/message.py:
--------------------------------------------------------------------------------
1 | """
2 | Message definition for the operators.
3 | """
4 | from enum import Enum
5 |
6 | from pydantic import BaseModel
7 |
8 |
9 | class MessageRole(str, Enum):
10 | """
11 | Message role enum for the Message class to indicate the role of the message in the chat.
12 |
13 | Attributes:
14 | system: The message is the system message, usually used as the first message in the chat.
15 | user: Every message that is sent by the user.
16 | assistant: Every message that is sent by the assistant.
17 | function: Every message that is sent by the assistant that is a function call.
18 | """
19 |
20 | system: str = "system"
21 | user: str = "user"
22 | assistant: str = "assistant"
23 | function: str = "function"
24 |
25 |
26 | class Message(BaseModel):
27 | """
28 | Represents a message in the chat.
29 |
30 | Args:
31 | message: The message string
32 | role: The role of the message in the chat
33 |
34 | Attributes:
35 | message: The message string
36 | role: The role of the message in the chat
37 | """
38 |
39 | message: str
40 | role: MessageRole
41 |
42 | def __str__(self):
43 | return self.message
44 |
45 | def __repr__(self):
46 | return f"{self.role.value}: {self.message}"
47 |
48 | def __eq__(self, other):
49 | return self.message == other.message and self.role == other.role
50 |
--------------------------------------------------------------------------------
/src/declarai/evals/generation/structured_strict_complex.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from pydantic import BaseModel
4 |
5 | from declarai import Declarai
6 |
7 |
8 | class TimeFrame(BaseModel):
9 | start: int
10 | end: Optional[int]
11 |
12 |
13 | class BusinessTrait(BaseModel):
14 | time_frame: TimeFrame
15 | title: str
16 | description: str
17 | experience: str
18 |
19 |
20 | class Recommendation(BaseModel):
21 | recommender: str
22 | recommendation: str
23 |
24 |
25 | class BusinessProfile(BaseModel):
26 | bio: str
27 | traits: List[BusinessTrait]
28 | previous_jobs: List[str]
29 | recommendations: List[Recommendation]
30 |
31 |
32 | def structured_strict_complex(name: str, skills: List[str]) -> BusinessProfile:
33 | """
34 | Generate a business profile based on the given name and skills
35 | Produce a short bio and a mapping of the skills and where they can be used
36 | for fields with missing data, you can make up data to fill in the gaps
37 | :param name: The name of the person
38 | :param skills: The skills of the person
39 | :return: The generated business profile
40 | """
41 | return Declarai.magic(name=name, skills=skills)
42 |
43 |
44 | structured_strict_complex_kwargs = {
45 | "name": "Bob grapes",
46 | "skills": [
47 | "Management",
48 | "entrepreneurship",
49 | "programming",
50 | "investing",
51 | "Machine Learning",
52 | ],
53 | }
54 |
--------------------------------------------------------------------------------
/src/declarai/operators/openai_operators/settings.py:
--------------------------------------------------------------------------------
1 | """
2 | Environment level configurations for working with openai and Azure openai providers.
3 | """
4 | import os
5 | from declarai.core.core_settings import DECLARAI_PREFIX
6 |
7 | OPENAI_API_KEY: str = os.getenv(
8 | f"{DECLARAI_PREFIX}_OPENAI_API_KEY", os.getenv("OPENAI_API_KEY", "")
9 | ) # pylint: disable=E1101
10 | "API key for openai provider."
11 |
12 | OPENAI_MODEL: str = os.getenv(
13 | f"{DECLARAI_PREFIX}_OPENAI_MODEL", "gpt-3.5-turbo"
14 | ) # pylint: disable=E1101
15 | "Model name for openai provider."
16 |
17 | # Azure specific configurations
18 | AZURE_OPENAI_KEY: str = os.getenv(
19 | f"{DECLARAI_PREFIX}_AZURE_OPENAI_KEY", os.getenv("AZURE_OPENAI_KEY", "")
20 | ) # pylint: disable=E1101
21 | "API key for Azure openai provider."
22 |
23 | AZURE_OPENAI_API_BASE: str = os.getenv(
24 | f"{DECLARAI_PREFIX}_AZURE_OPENAI_API_BASE",
25 | os.getenv("AZURE_OPENAI_API_BASE", ""),
26 | ) # pylint: disable=E1101
27 | "Endpoint for Azure openai provider."
28 |
29 | AZURE_API_VERSION: str = os.getenv(
30 | f"{DECLARAI_PREFIX}_AZURE_API_VERSION",
31 | os.getenv("AZURE_API_VERSION", "2023-05-15"),
32 | ) # pylint: disable=E1101
33 | "API version for Azure openai provider."
34 |
35 |
36 | DEPLOYMENT_NAME: str = os.getenv(
37 | f"{DECLARAI_PREFIX}_AZURE_OPENAI_DEPLOYMENT_NAME",
38 | os.getenv("DEPLOYMENT_NAME", ""),
39 | ) # pylint: disable=E1101
40 | "Deployment name for the model in Azure openai provider."
41 |
--------------------------------------------------------------------------------
/tests/operators/openai_operators/test_chat_operator.py:
--------------------------------------------------------------------------------
1 | from declarai.operators import OpenAILLM, OpenAIChatOperator
2 | from declarai.python_parser.parser import PythonParser
3 |
4 |
5 | def test_chat_openai_operator():
6 | openai_operator_class = OpenAIChatOperator
7 | llm = OpenAILLM(
8 | openai_token="test-token",
9 | model="test-model",
10 | )
11 |
12 | class MyChat:
13 | """
14 | This is my beloved chat
15 | """
16 |
17 | parsed = PythonParser(MyChat)
18 | openai_operator_instance = openai_operator_class(parsed=parsed, llm=llm)
19 | assert openai_operator_instance.parsed.name == MyChat.__name__
20 | compiled = openai_operator_instance.compile(messages=[])
21 | assert isinstance(compiled, dict)
22 | messages = list(compiled["messages"])
23 | assert len(messages) == 1
24 | assert messages[0].message == "This is my beloved chat"
25 | assert messages[0].role == "system"
26 |
27 | # def openai_task():
28 | # ...
29 | #
30 | # parsed = PythonParser(openai_task)
31 | # openai_operator_instance = openai_operator_class(parsed=parsed, llm=llm)
32 | # assert openai_operator_instance.parsed.name == openai_task.__name__
33 | # compiled = openai_operator_instance.compile()
34 | # assert isinstance(compiled, dict)
35 | # messages = list(compiled["messages"])
36 | # assert len(messages) == 1
37 | # assert messages[0].message == "\n\n"
38 | # assert messages[0].role == "user"
39 |
--------------------------------------------------------------------------------
/tests/operators/openai_operators/test_operator.py:
--------------------------------------------------------------------------------
1 | from declarai.operators import OpenAILLM, OpenAITaskOperator
2 | from declarai.python_parser.parser import PythonParser
3 |
4 |
5 | def test_openai_operator():
6 | openai_operator_class = OpenAITaskOperator
7 | llm = OpenAILLM(
8 | openai_token="test-token",
9 | model="test-model",
10 | )
11 |
12 | def openai_task(argument: str) -> str:
13 | """
14 | This is a test task
15 | :param argument: this is a test argument
16 | :return: this is a test return
17 | """
18 |
19 | parsed = PythonParser(openai_task)
20 | openai_operator_instance = openai_operator_class(parsed=parsed, llm=llm)
21 | assert openai_operator_instance.parsed.name == openai_task.__name__
22 | compiled = openai_operator_instance.compile()
23 | assert isinstance(compiled, dict)
24 | messages = list(compiled["messages"])
25 | assert len(messages) == 1
26 | assert (
27 | messages[0].message == "This is a test task\nInputs:\nargument: {argument}\n\n"
28 | )
29 | assert messages[0].role == "user"
30 |
31 | def openai_task():
32 | ...
33 |
34 | parsed = PythonParser(openai_task)
35 | openai_operator_instance = openai_operator_class(parsed=parsed, llm=llm)
36 | assert openai_operator_instance.parsed.name == openai_task.__name__
37 | compiled = openai_operator_instance.compile()
38 | assert isinstance(compiled, dict)
39 | messages = list(compiled["messages"])
40 | assert len(messages) == 1
41 | assert messages[0].message == "\n\n"
42 | assert messages[0].role == "user"
43 |
--------------------------------------------------------------------------------
/scripts/gen_ref_pages.py:
--------------------------------------------------------------------------------
1 | """Generate the code reference pages and navigation."""
2 |
3 | from pathlib import Path
4 |
5 | import mkdocs_gen_files
6 |
7 | nav = mkdocs_gen_files.Nav()
8 | exclude_patterns = [Path("src", "declarai", "evals")]
9 | mod_symbol = ''
10 | for path in sorted(Path("src").rglob("*.py")):
11 | skip = False
12 | for pattern in exclude_patterns:
13 | try:
14 | if path.relative_to(pattern):
15 | print(f"Skipping {path}")
16 | skip = True
17 | break
18 | except ValueError:
19 | pass
20 | if skip:
21 | continue
22 |
23 | module_path = path.relative_to("src").with_suffix("")
24 | doc_path = path.relative_to("src").with_suffix(".md")
25 | full_doc_path = Path("reference", doc_path)
26 |
27 | parts = tuple(module_path.parts)
28 |
29 | if parts[-1] == "__init__":
30 | parts = parts[:-1]
31 | doc_path = doc_path.with_name("index.md")
32 | full_doc_path = full_doc_path.with_name("index.md")
33 | elif parts[-1] == "__main__":
34 | continue
35 |
36 | nav_parts = [f"{mod_symbol} {part}" for part in parts]
37 | nav[tuple(nav_parts)] = doc_path.as_posix()
38 |
39 | with mkdocs_gen_files.open(full_doc_path, "w") as fd:
40 | ident = ".".join(parts)
41 | fd.write(f"::: {ident}")
42 |
43 | mkdocs_gen_files.set_edit_path(full_doc_path, ".." / path)
44 |
45 | with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file:
46 | nav_file.writelines(nav.build_literate_nav())
47 |
--------------------------------------------------------------------------------
/docs/contribute.md:
--------------------------------------------------------------------------------
1 | # Contribute :rocket:
2 |
3 | Do you like **Declarai**?
4 |
5 | Spread the word!
6 |
7 | - **Star** :star: the repository
8 | - **Share** the [link](https://github.com/vendi-ai/declarai) to the repository with your friends and colleagues
9 | - **Watch** the github repository to get notified about new releases.
10 |
11 | ## Development :material-source-pull:
12 | Once you have cloned the repository, install the requirements:
13 |
14 | Using `venv`
15 |
16 | === "Poetry"
17 |
18 |
19 | ```console
20 | poetry install
21 | ```
22 |
23 |
24 | === "Venv"
25 |
26 |
27 | ```console
28 | python -m venv env
29 | source env/bin/activate
30 | python -m pip install --upgrade pip
31 | pip install -r requirements.txt
32 | ```
33 |
34 |
35 | ## Documentation :material-book-open-variant:
36 |
37 | The documentation is built using [MkDocs](https://www.mkdocs.org/).
38 | To view the documentation locally, run the following command:
39 |
40 |
41 |
42 | ```console
43 | $ cd docs
44 | $ mkdocs serve
45 | INFO - [11:37:30] Serving on http://127.0.0.1:8000/
46 | ```
47 |
48 |
49 | ## Testing
50 | The testing framework used is [pytest](https://docs.pytest.org/en/stable/).
51 | To run the tests, run the following command:
52 |
53 |
54 | ```bash
55 | pytest --cov=src
56 | ```
57 |
58 | ## Pull Requests
59 | It should be extermly easy to contribute to this project.
60 | If you have any ideas, just open an pull request and we will discuss it.
61 |
62 | ```bash
63 | git checkout -b my-new-feature
64 | git commit -am 'Add some feature'
65 | git push origin my-new-feature
66 | ```
67 |
--------------------------------------------------------------------------------
/docs/features/chat/index.md:
--------------------------------------------------------------------------------
1 | # Chatbots :speech_balloon:
2 |
3 | Unlike tasks, chatbots are meant to keep the conversation going.
4 | Instead of executing a single operation, they are built to manage conversation context over time.
5 |
6 | Declarai can be used to create chatbots. The simplest way to do this is to use the `@declarai.experimental.chat` decorator.
7 |
8 | We declare a "system prompt" in the docstring of the class definition.
9 | The system prompt is the initial command that instructs the bot on who they are and what's expected in the conversation.
10 |
11 |
12 | ```py
13 | import declarai
14 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
15 |
16 | @gpt_35.experimental.chat
17 | class SQLBot:
18 | """
19 | You are a sql assistant. You help with SQL related questions
20 | """ # (1)!
21 | ```
22 |
23 | 1. The docstring represents the chatbot's description and is used to generate the prompt.
24 |
25 | ```py
26 | sql_bot = SQLBot()
27 | sql_bot.send("When should I use a LEFT JOIN?") # (1)!
28 |
29 | > "You should use a LEFT JOIN when you want to return all rows from the left table, and the matched rows from the right table."
30 | ```
31 |
32 | 1. The created bot exposes a `send` method, by which you can interact and send messages.
33 | Every call to send results with a response from the bot.
34 |
35 |
36 | !!! tip
37 | You can also declare the chatbot system prompt by doing the following
38 | ```py
39 | @declarai.experimental.chat
40 | class SQLBot:
41 | pass
42 | sql_bot = SQLBot(system="You are a sql assistant. You help with SQL related questions with one-line answers.")
43 | ```
44 |
--------------------------------------------------------------------------------
/tests/operators/test_operator_resolver.py:
--------------------------------------------------------------------------------
1 | import os
2 | from unittest.mock import patch
3 |
4 | import pytest
5 |
6 | from declarai.operators import (
7 | LLMSettings,
8 | resolve_operator,
9 | resolve_llm,
10 | AzureOpenAITaskOperator,
11 | )
12 | from declarai.operators.openai_operators import OpenAIError, OpenAITaskOperator
13 |
14 |
15 | def test_resolve_openai_operator_with_token():
16 | kwargs = {"openai_token": "test_token"}
17 | llm = resolve_llm(provider="openai", model="davinci", **kwargs)
18 | operator = resolve_operator(llm_instance=llm, operator_type="task")
19 | assert operator == OpenAITaskOperator
20 | assert llm.model == "davinci"
21 | assert llm.api_key == kwargs["openai_token"]
22 |
23 |
24 | @patch(
25 | "declarai.operators.openai_operators.openai_llm.OPENAI_API_KEY",
26 | "test_token",
27 | )
28 | def test_resolve_openai_operator_without_token():
29 | llm = resolve_llm(provider="openai", model="davinci")
30 | operator = resolve_operator(llm, operator_type="task")
31 | assert operator == OpenAITaskOperator
32 |
33 |
34 | def test_resolve_openai_operator_no_token_raises_error():
35 | with pytest.raises(OpenAIError):
36 | llm = resolve_llm(provider="openai", model="davinci")
37 | resolve_operator(llm, operator_type="task")
38 |
39 |
40 | def test_resolve_azure_operator():
41 | llm = resolve_llm(
42 | provider="azure-openai",
43 | model="test",
44 | azure_openai_key="123",
45 | azure_openai_api_base="456",
46 | )
47 | operator = resolve_operator(llm, operator_type="task")
48 | assert operator == AzureOpenAITaskOperator
49 |
--------------------------------------------------------------------------------
/docs/features/planning-future-tasks.md:
--------------------------------------------------------------------------------
1 | ## Plan task :material-airplane-clock:
2 | Once you have defined your task, you can create a plan for it that is already populated with the real values of the parameters.
3 |
4 | The plan is an object you call and get the results. This is very helpful when you want to populate the task with the real values of the parameters but delay the execution of it.
5 |
6 | ```py
7 | import declarai
8 |
9 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
10 |
11 | @gpt_35.task
12 | def say_something_about_movie(movie: str) -> str:
13 | """
14 | Say something short about the following movie
15 | :param movie: The movie name
16 | """
17 |
18 | return declarai.magic(movie)
19 |
20 | plan = say_something_about_movie.plan(movie="Avengers")
21 |
22 | print(plan)
23 | > #
24 |
25 |
26 | # Execute the task by calling the plan
27 | plan()
28 | > ['I liked the action-packed storyline and the epic battle scenes.',
29 | "I didn't like the lack of character development for some of the Avengers."]
30 | ```
31 |
32 |
33 | !!! warning "Important"
34 | The plan is an object you call and get the results. This is very helpful when you want to populate the task with the real values of the parameters but delay the execution of it.
35 | If you just want to execute the task, you can call the task directly.
36 |
37 | ```py
38 | res = say_something_about_movie(movie="Avengers")
39 |
40 | > ['I liked the action-packed storyline and the epic battle scenes.',
41 | "I didn't like the lack of character development for some of the Avengers."]
42 | ```
43 |
--------------------------------------------------------------------------------
/.github/workflows/test.yaml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on: [pull_request]
4 |
5 | jobs:
6 | pre-commit:
7 | permissions:
8 | pull-requests: write
9 |
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - uses: actions/checkout@v2
14 |
15 | - name: Set up Python 3.8
16 | uses: actions/setup-python@v2
17 | with:
18 | python-version: 3.8
19 |
20 | - name: Install dependencies
21 | run: |
22 | python -m pip install --upgrade pip
23 | pip install poetry pre-commit
24 | poetry install
25 |
26 | - name: Run pre-commit
27 | run: pre-commit run --all-files
28 |
29 | test:
30 | permissions:
31 | pull-requests: write
32 |
33 | runs-on: ubuntu-latest
34 |
35 | steps:
36 | - uses: actions/checkout@v2
37 |
38 | - name: Set up Python 3.8
39 | uses: actions/setup-python@v2
40 | with:
41 | python-version: 3.8
42 |
43 | - name: Install dependencies
44 | run: |
45 | python -m pip install --upgrade pip
46 | pip install poetry
47 | poetry install
48 |
49 | - name: Run tests with pytest
50 | run: |
51 | poetry run pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=src tests | tee pytest-coverage.txt
52 |
53 | - name: Upload coverage to GitHub Artifacts
54 | uses: actions/upload-artifact@v2
55 | with:
56 | name: coverage
57 | path: coverage.xml
58 |
59 | - name: Pytest coverage comment
60 | if: github.event.pull_request.head.repo.full_name == github.repository
61 | uses: MishaKav/pytest-coverage-comment@main
62 | with:
63 | pytest-coverage-path: ./pytest-coverage.txt
64 | junitxml-path: ./pytest.xml
65 |
--------------------------------------------------------------------------------
/docs/integrations/index.md:
--------------------------------------------------------------------------------
1 | # Integrations
2 |
3 | Declarai comes with minimal dependencies out of the box, to keep the core of the library clean and simple.
4 | If you would like to extend the functionality of Declarai, you can install one of the following integrations.
5 |
6 | ## [Wandb](https://wandb.ai/site)
7 |
8 | Weights & Biases is a popular tool for tracking machine learning experiments.
9 | Recently they have provided an API for their tracking prompts in their platform.
10 | The platform has a free tier which you can use to experiment!
11 |
12 | ```bash
13 | pip install declarai[wandb]
14 | ```
15 |
16 | !!! info
17 |
18 | To use this integration you will need to create an account at wandb. Once created,
19 | you can create a new project and get your API key from the settings page.
20 |
21 |
22 | Once set up, you can use the `WandDBMonitorCreator` to track your prompts in the platform.
23 |
24 | ```python
25 | from typing import Dict
26 | import declarai
27 | from declarai.middleware import WandDBMonitorCreator
28 |
29 |
30 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
31 |
32 | WandDBMonitor = WandDBMonitorCreator(
33 | name="",
34 | project="",
35 | key="",
36 | )
37 |
38 |
39 | @gpt_35.task(middlewares=[WandDBMonitor])
40 | def extract_info(text: str) -> Dict[str, str]:
41 | """
42 | Extract the phone number, name and email from the provided text
43 | :param text: content to extract the info from
44 | :return: The info extracted from the text
45 | """
46 | return declarai.magic(text=text)
47 | ```
48 | The tracked prompts should look like this:
49 |
50 |
51 |
52 |
--------------------------------------------------------------------------------
/docs/css/mkdocstrings.css:
--------------------------------------------------------------------------------
1 | /* Indentation. */
2 | div.doc-contents:not(.first) {
3 | padding-left: 25px;
4 | border-left: .05rem solid var(--md-typeset-table-color);
5 | }
6 |
7 | /* Mark external links as such. */
8 | a.external::after,
9 | a.autorefs-external::after {
10 | /* https://primer.style/octicons/arrow-up-right-24 */
11 | mask-image: url('data:image/svg+xml,');
12 | content: ' ';
13 |
14 | display: inline-block;
15 | vertical-align: middle;
16 | position: relative;
17 |
18 | height: 1em;
19 | width: 1em;
20 | background-color: var(--md-typeset-a-color);
21 | }
22 |
23 | a.external:hover::after,
24 | a.autorefs-external:hover::after {
25 | background-color: var(--md-accent-fg-color);
26 | }
27 |
28 | /* Mark external links as such (also in nav) */
29 | a.external:hover::after, a.md-nav__link[href^="https:"]:hover::after {
30 | /* https://primer.style/octicons/link-external-16 */
31 | background-image: url('data:image/svg+xml,');
32 | height: 0.8em;
33 | width: 0.8em;
34 | margin-left: 0.2em;
35 | content: ' ';
36 | display: inline-block;
37 | }
38 |
--------------------------------------------------------------------------------
/tests/orchestrator/test_task_orchestrator.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import MagicMock
2 |
3 | from declarai.python_parser.parser import PythonParser
4 | from declarai.task import Task
5 |
6 |
7 | def test_task():
8 | operator = MagicMock()
9 | instantiated_operator = MagicMock()
10 | instantiated_operator.streaming = False
11 | operator.return_value = instantiated_operator
12 |
13 | instantiated_operator.compile.return_value = "compiled_result"
14 | llm_response = MagicMock()
15 | llm_response.response = "predicted_result"
16 | instantiated_operator.predict.return_value = llm_response
17 |
18 | def test_task() -> str:
19 | pass
20 |
21 | instantiated_operator.parse_output.return_value = PythonParser(test_task).parse(
22 | llm_response.response
23 | )
24 |
25 | task = Task(instantiated_operator)
26 | assert task.compile() == "compiled_result"
27 |
28 | # TODO: Implement test when plan is implemented
29 | # task_orchestrator.plan()
30 |
31 | res = task()
32 | assert res == "predicted_result"
33 |
34 | res = task(llm_params={"temperature": 0.5})
35 | instantiated_operator.predict.assert_called_with(llm_params={"temperature": 0.5})
36 |
37 |
38 | def test_task_streaming():
39 | operator = MagicMock()
40 | instantiated_operator = MagicMock()
41 | instantiated_operator.streaming = True
42 | operator.return_value = instantiated_operator
43 |
44 | instantiated_operator.compile.return_value = "compiled_result"
45 | llm_response = MagicMock()
46 | llm_response.response = "predicted_result"
47 | instantiated_operator.predict.return_value = [llm_response]
48 |
49 | def test_task() -> str:
50 | pass
51 |
52 | task = Task(instantiated_operator)
53 | assert list(task()) == [llm_response]
54 |
--------------------------------------------------------------------------------
/docs/features/chat/chat-memory/redis-memory.md:
--------------------------------------------------------------------------------
1 |
2 | # Redis Memory :material-database:
3 |
4 | For chat that requires a fast and scalable message history, you can use a Redis database to store the conversation history.
5 |
6 | ## Set Redis memory
7 |
8 | ```py
9 | import declarai
10 | from declarai.memory import RedisMessageHistory
11 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
12 |
13 | @gpt_35.experimental.chat(
14 | chat_history=RedisMessageHistory(
15 | session_id="unique_chat_id",
16 | url="redis://localhost:6379/0"
17 | )
18 | ) # (1)!
19 | class SQLBot:
20 | """
21 | You are a sql assistant. You help with SQL related questions with one-line answers.
22 | """
23 |
24 | sql_bot = SQLBot()
25 | ```
26 |
27 | 1. The `url` parameter specifies the connection details for the Redis server. Replace `localhost` and `6379` with your specific Redis connection details. The `session_id` parameter uniquely identifies the chat session for which the history is being stored.
28 |
29 | We can also initialize the `RedisMessageHistory` class with custom connection details.
30 |
31 | ## Set Redis memory at runtime
32 |
33 | In case you want to set the Redis memory at runtime, you can use the `set_memory` method.
34 |
35 | ```py
36 | import declarai
37 | from declarai.memory import RedisMessageHistory
38 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
39 |
40 | @gpt_35.experimental.chat
41 | class SQLBot:
42 | """
43 | You are a sql assistant. You help with SQL related questions with one-line answers.
44 | """
45 |
46 | sql_bot = SQLBot(chat_history=RedisMessageHistory(session_id="unique_chat_id", url="redis://localhost:6379/0"))
47 | ```
48 |
49 | ## Dependencies
50 |
51 | Make sure to install the following dependencies before using Redis memory.
52 |
53 | ```bash
54 | pip install declarai[redis]
55 | ```
56 |
--------------------------------------------------------------------------------
/src/declarai/middleware/internal/log_middleware.py:
--------------------------------------------------------------------------------
1 | """
2 | Logger Middleware
3 | """
4 | import logging
5 | from time import time
6 |
7 | from declarai._base import TaskType
8 | from declarai.middleware.base import TaskMiddleware
9 |
10 | logger = logging.getLogger("PromptLogger")
11 |
12 |
13 | class LoggingMiddleware(TaskMiddleware):
14 | """
15 | Creates a Simple logging middleware for a given task.
16 |
17 | Example:
18 | ```py
19 | @openai.task(middlewares=[LoggingMiddleware])
20 | def generate_a_poem(title: str):
21 | '''
22 | Generate a poem based on the given title
23 | :return: The generated poem
24 | '''
25 | return declarai.magic("poem", title)
26 | ```
27 | """
28 |
29 | start_time: time = None
30 |
31 | def before(self, _):
32 | """
33 | Before execution of the task, set the start time.
34 | """
35 | self.start_time = time()
36 |
37 | def after(self, task: TaskType):
38 | """
39 | After execution of the task, log the task details.
40 | Args:
41 | task: the task to be logged
42 |
43 | Returns:
44 | (Dict[str, Any]): the task details like execution time, task name, template, compiled template, result, time.
45 |
46 | """
47 | end_time = time() - self.start_time
48 | log_record = {
49 | "task_name": task.__name__,
50 | "llm_model": task.llm_response.model,
51 | "template": str(task.compile()),
52 | "call_kwargs": str(self._kwargs),
53 | "compiled_template": str(task.compile(**self._kwargs)),
54 | "result": task.llm_response.response,
55 | "time": end_time,
56 | }
57 | logger.info(log_record)
58 | print(log_record)
59 |
--------------------------------------------------------------------------------
/docs/beginners-guide/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide:
3 | - footer
4 | ---
5 |
6 | # Tutorial - Beginners guide
7 |
8 | This tutorial is a step-by-step guide to using **Declarai**. It walks you through the most basic features of the library.
9 |
10 | Each section gradually builds on the previous one while sections are structured by topic,
11 | so that you can skip to whichever part is relevant to you.
12 |
13 | ## Before we start
14 |
15 | If you haven't already, install the Declarai library as follows:
16 |
17 | ```bash
18 | $ pip install declarai
19 | ```
20 | !!! info
21 |
22 | For this tutorial you will need an openai token. This token is completely your's and is not shared, stored or managed
23 | anywhere but on your machine! you can see more information about obtaining a token here: [openai](/declarai/src/providers/openai/)
24 |
25 | After installation, open a python file and start with setting up your declarai app:
26 |
27 | Once completed, the rest of the examples in this module should be as simple as copy/paste.
28 |
29 |
30 |
31 | ```python title="declarai_tutorial.py"
32 | import declarai
33 |
34 | gpt_35 = declarai.openai(model="gpt-3.5-turbo", openai_token="")
35 | ```
36 |
37 |
38 | !!! info
39 |
40 | Do your best to copy, run and edit the code in your editor to really understand how powerful Declarai is.
41 |
42 |
28 |
29 | ### Evaluations
30 | The output table will allow you to review the performance of your task across models and provides and make an informed
31 | decision on which model and provider to use for your task.
32 |
33 | | Provider | Model | version | Scenario | runtime |
output
|
34 | |:---------|:--------------|:--------|:---------------------------------|:--------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
35 | | openai | gpt-3.5-turbo | latest | generate_a_poem_no_metadata | 1.235s | Using LLMs is fun! |
36 | | openai | gpt-3.5-turbo | 0301 | generate_a_poem_no_metadata | 0.891s | Using LLMs is fun! It's like playing with words Creating models that learn And watching them fly like birds |
37 | | openai | gpt-3.5-turbo | 0613 | generate_a_poem_no_metadata | 1.071s | Using LLMs is fun! |
38 | | openai | gpt-4 | latest | generate_a_poem_no_metadata | 3.494s | {'poem': 'Using LLMs, a joyous run,\nIn the world of AI, under the sun.\nWith every task, they stun,\nIndeed, using LLMs is fun!'} |
39 | | openai | gpt-4 | 0613 | generate_a_poem_no_metadata | 4.992s | {'title': 'Using LLMs is fun!', 'poem': "With LLMs, the fun's just begun, \nCoding and learning, second to none. \nComplex tasks become a simple run, \nOh, the joy when the work is done!"} |
40 | | openai | gpt-3.5-turbo | latest | generate_a_poem_only_return_type | 2.1s | Learning with LLMs, a delightful run, Exploring new knowledge, it's never done. With every challenge, we rise and we stun, Using LLMs, the learning is always fun! |
41 |
--------------------------------------------------------------------------------
/docs/newsletter.md:
--------------------------------------------------------------------------------
1 | Subscribe to our newsletter to stay up to date with the latest news about the declarai, and other cool stuff 📬
2 |
3 |
4 |
5 |
10 |
11 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/docs/features/chat/chat-memory/index.md:
--------------------------------------------------------------------------------
1 | # Chat memory :brain:
2 |
3 | A chat instance saves the message history and uses it to future responses.
4 | Here is an example of a chatbot that retains conversation history across multiple `send` requests.
5 | ```py
6 | @declarai.experimental.chat
7 | class SQLBot:
8 | """
9 | You are a sql assistant. You help with SQL related questions with one-line answers.
10 | """
11 |
12 | sql_bot = SQLBot()
13 |
14 | sql_bot.send("When should I use a LEFT JOIN?") # (1)!
15 | > "You should use a LEFT JOIN when you want to retrieve all records from the left table and matching records from the right table."
16 |
17 | sql_bot.send("But how is it different from a RIGHT JOIN?") # (2)!
18 | > "A LEFT JOIN retrieves all records from the left table and matching records from the right table, while a RIGHT JOIN retrieves all records from the right table and matching records from the left table."
19 | ```
20 |
21 | 1. The first message is sent with the system prompt.
22 | 2. The second message is sent with the previous conversation and therefore the model is aware of the first question.
23 |
24 |
25 | ## Conversation History
26 | You can view the conversation history by accessing the `conversation` attribute.
27 |
28 | ```py
29 | sql_bot.conversation
30 |
31 | > [
32 | user: When should I use a LEFT JOIN?,
33 | assistant: You should use a LEFT JOIN when you want to retrieve all records from the left table and matching records from the right table.,
34 | user: But how is it different from a RIGHT JOIN?,
35 | assistant: A LEFT JOIN retrieves all records from the left table and matching records from the right table, while a RIGHT JOIN retrieves all records from the right table and matching records from the left table.
36 | ]
37 |
38 | ```
39 |
40 | !!! warning
41 |
42 | Keep in mind that the conversation history does not contain the system prompt. It only contains the user messages and the chatbot responses.
43 |
44 | If you want to access the system message, you can use the `system` attribute.
45 |
46 | ```py
47 | sql_bot.system
48 |
49 | > "system: You are a sql assistant. You help with SQL related questions with one-line answers.\n"
50 | ```
51 |
52 |
53 | ## Default Memory
54 |
55 | **The default message history of a chat is a simple in-memory list**. This means that history exists only for the duration of the chatbot session.
56 |
57 | If you prefer to have a persistent history, you can use the `FileMessageHistory` class from the `declarai.memory` module.
58 |
59 |
60 | ## Setting up a memory
61 | Setting up a memory is done by passing `chat_history` as a keyword argument to the `declarai.experimental.chat` decorator.
62 |
63 | ```py
64 | import declarai
65 | from declarai.memory import FileMessageHistory
66 |
67 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
68 |
69 | @gpt_35.experimental.chat(chat_history=FileMessageHistory("sql_bot_history.txt")) # (1)!
70 | class SQLBot:
71 | """
72 | You are a sql assistant. You help with SQL related questions with one-line answers.
73 | """
74 | ```
75 |
76 | 1. file path is not mandatory. If you do not provide a file path, the default file path is stored in a tmp directory.
77 |
78 | We can also initialize the chat_history at runtime
79 |
80 | ```py
81 | import declarai
82 | from declarai.memory import FileMessageHistory
83 |
84 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
85 |
86 | @gpt_35.experimental.chat
87 | class SQLBot:
88 | """
89 | You are a sql assistant. You help with SQL related questions with one-line answers.
90 | """
91 | sql_bot = SQLBot(chat_history=FileMessageHistory("sql_bot_history.txt"))
92 | ```
93 |
--------------------------------------------------------------------------------
/src/declarai/memory/postgres.py:
--------------------------------------------------------------------------------
1 | """
2 | This module contains the PostgresMessageHistory class, which is used to store chat message history in a PostgreSQL database.
3 |
4 | """
5 | import json
6 | import logging
7 | from typing import List, Optional
8 |
9 | from ..operators import Message
10 | from .base import BaseChatMessageHistory
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 | DEFAULT_TABLE_NAME = "message_store"
15 | "A table name for the PostgreSQL database."
16 | DEFAULT_CONNECTION_STRING = "postgresql://postgres:postgres@localhost:5432/postgres"
17 | "A connection string for a PostgreSQL database."
18 |
19 |
20 | class PostgresMessageHistory(BaseChatMessageHistory):
21 | """
22 | Chat message history that stores history in a PostgreSQL database.
23 |
24 | Args:
25 | session_id: Arbitrary key that is used to store the messages for a single chat session.
26 | connection_string: Database connection string.
27 | table_name: Name of the table to use.
28 | """
29 |
30 | def __init__(
31 | self,
32 | session_id: str,
33 | connection_string: Optional[str] = DEFAULT_CONNECTION_STRING,
34 | table_name: str = DEFAULT_TABLE_NAME,
35 | ):
36 | try:
37 | import psycopg2 # pylint: disable=import-outside-toplevel
38 | except ImportError:
39 | raise ImportError(
40 | "Cannot import psycopg2."
41 | "Please install psycopg2 to use PostgresMessageHistory."
42 | )
43 | self.conn = psycopg2.connect(connection_string)
44 | self.cursor = self.conn.cursor()
45 | self.table_name = table_name
46 | self.session_id = session_id
47 | self._initialize_tables()
48 |
49 | def _initialize_tables(self):
50 | """Initialize the tables if they don't exist."""
51 | create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} (
52 | id SERIAL PRIMARY KEY,
53 | session_id TEXT NOT NULL,
54 | message JSONB NOT NULL
55 | );"""
56 | self.cursor.execute(create_table_query)
57 | self.conn.commit()
58 |
59 | @property
60 | def history(self) -> List[Message]:
61 | """Retrieve the messages from the database."""
62 | query = (
63 | f"SELECT message FROM {self.table_name} WHERE session_id = %s ORDER BY id;"
64 | )
65 | self.cursor.execute(query, (self.session_id,))
66 | rows = self.cursor.fetchall()
67 | messages = [Message.parse_obj(row[0]) for row in rows]
68 | return messages
69 |
70 | def add_message(self, message: Message) -> None:
71 | """Add a message to the database."""
72 | from psycopg2 import sql
73 |
74 | query = sql.SQL("INSERT INTO {} (session_id, message) VALUES (%s, %s);").format(
75 | sql.Identifier(self.table_name)
76 | )
77 | self.cursor.execute(query, (self.session_id, json.dumps(message.dict())))
78 | self.conn.commit()
79 |
80 | def clear(self) -> None:
81 | """Clear session memory from the database."""
82 | query = f"DELETE FROM {self.table_name} WHERE session_id = %s;"
83 | self.cursor.execute(query, (self.session_id,))
84 | self.conn.commit()
85 |
86 | def close(self):
87 | """Close cursor and connection."""
88 | self.cursor.close()
89 | self.conn.close()
90 |
91 | def __del__(self):
92 | """Destructor to close cursor and connection."""
93 | if hasattr(self, "cursor"):
94 | self.cursor.close()
95 | if hasattr(self, "conn"):
96 | self.conn.close()
97 |
--------------------------------------------------------------------------------
/docs/features/chat/controlling-chat-behavior.md:
--------------------------------------------------------------------------------
1 | ## Greetings :material-human-greeting:
2 |
3 | Greetings are used to start the conversation with a bot message instead of a user message.
4 | The `greeting` attribute defines this first message and is added to the conversation on initialization.
5 |
6 | ```py
7 | import declarai
8 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
9 |
10 | @gpt_35.experimental.chat
11 | class SQLBot:
12 | """
13 | You are a sql assistant. You help with SQL queries with one-line answers.
14 | """
15 | greeting = "Hello, I am a SQL assistant. How can I assist you today?"
16 | ```
17 |
18 | The greeting attribute is later available as a property of the chatbot instance to use when implementing your interface.
19 | ```py
20 | sql_bot = SQLBot()
21 | sql_bot.greeting
22 |
23 | > "Hello, I am a SQL assistant. How can I assist you today?"
24 | ```
25 |
26 | ```py
27 |
28 | sql_bot.send("When should I use a LEFT JOIN?")
29 |
30 | > 'You should use a LEFT JOIN when you want to retrieve all records from the left table and matching records from the right table.'
31 |
32 | sql_bot.conversation
33 |
34 | > [ # (1)!
35 | assistant: Hello, I am a SQL assistant. How can I assist you today?,
36 | user: When should I use a LEFT JOIN?,
37 | assistant: You should use a LEFT JOIN when you want to retrieve all records from the left table and matching records from the right table.
38 | ]
39 | ```
40 |
41 | 1. We can see here that the greeting, initiated by the assistant, is the first message in the conversation.
42 |
43 | ## Inject a message to the memory
44 |
45 | Declarai enables injecting custom messages into the conversation history by using the `add_message` method.
46 |
47 | This is super useful when you want to intervene with the conversation flow without necessarily triggering another response from the model.
48 |
49 | Consider using it for:
50 |
51 | * Creating a prefilled conversation even before the user's interaction.
52 | * Modifying the chatbot memory after the chatbot has generated a response.
53 | * Modifying the chatbot system prompt.
54 | * Guiding the conversation flow given certain criteria met in the user-bot interaction.
55 |
56 | ```py
57 | sql_bot = SQLBot()
58 | sql_bot.add_message("From now on, answer I DONT KNOW on any question asked by the user", role="system")
59 | # (1)!
60 | sql_bot.send("What is your favorite SQL operation?")
61 |
62 | > "I don't know."
63 | ```
64 |
65 | 1. The chatbot's conversation history now contains the injected message and reacts accordingly.
66 |
67 |
68 | ## Dynamic system prompting
69 | In the following example, we will pass a parameter to the chatbot system prompt.
70 | This value will be populated at runtime and will allow us to easily create base chatbots with varying behaviors.
71 |
72 | ```py
73 | import declarai
74 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
75 |
76 | @gpt_35.experimental.chat
77 | class JokeGenerator:
78 | """
79 | You are a joke generator. You generate jokes that a {character} would tell.
80 | """ # (1)!
81 |
82 |
83 | generator = JokeGenerator()
84 | favorite_joke = generator.send(character="Spongebob", message="What is your favorite joke?")
85 | squidward_joke = generator.send(message="What jokes can you tell about squidward?")
86 |
87 | print(favorite_joke)
88 | print(squidward_joke)
89 | ```
90 |
91 | 1. The system prompt now contains the parameter `{character}`. This parameter will be replaced by the value passed to the `send` method.
92 |
93 | ```py
94 | > "Why did the jellyfish go to school? Because it wanted to improve its "sting-uage" skills!"
95 | > "Why did Squidward bring a ladder to work? Because he wanted to climb up the corporate "sour-cules"!"
96 | ```
97 |
--------------------------------------------------------------------------------
/src/declarai/middleware/third_party/wandb_monitor.py:
--------------------------------------------------------------------------------
1 | """
2 | Wandb Monitor Middleware used to monitor the execution on wandb.
3 | """
4 | from __future__ import annotations
5 |
6 | import importlib
7 | from time import time
8 |
9 | from declarai.middleware.base import TaskMiddleware
10 |
11 |
12 | class WandDBMonitorCreator:
13 | """
14 | Creates a WandDBMonitor middleware for a given task.
15 |
16 | Args:
17 | name (str): The name of the run on wandb
18 | project (str): The name of the project on wandb
19 | key (str): The api key for wandb
20 |
21 | Returns:
22 | (WandDBMonitor): A WandDBMonitor middleware
23 |
24 | Example:
25 | ```py
26 | WandDBMonitor = WandDBMonitorCreator(
27 | name="",
28 | project="",
29 | key="",
30 | )
31 |
32 | @openai.task(middlewares=[WandDBMonitor])
33 | def generate_a_poem(title: str):
34 | '''
35 | Generate a poem based on the given title
36 | :return: The generated poem
37 | '''
38 | return declarai.magic("poem", title)
39 | ```
40 | """
41 |
42 | def __new__(cls, name: str, project: str, key: str) -> "WandDBMonitor": # noqa
43 | if importlib.util.find_spec("wandb"):
44 | import wandb
45 | from wandb.sdk.data_types.trace_tree import Trace
46 |
47 | wandb.login(key=key)
48 | wandb.init(id=name, name=name, project=project, resume="allow")
49 | else:
50 | raise ImportError("wandb is not installed")
51 |
52 | class WandDBMonitor(TaskMiddleware):
53 | """
54 | WandDBMonitor middleware.
55 | """
56 |
57 | _start_time_ms: time = None
58 |
59 | def before(self, _):
60 | self._start_time_ms = int(time() / 1000)
61 |
62 | def after(self, task):
63 | status = "success"
64 | status_message = ""
65 | end_time_ms = int(time() / 1000) # logged in milliseconds
66 | root_span = Trace(
67 | name=task.__name__,
68 | kind="llms",
69 | status_code=status,
70 | status_message=status_message,
71 | metadata={
72 | "structured": task.prompt_config.structured,
73 | "multi_results": task.prompt_config.multi_results,
74 | "return_name": task.prompt_config.return_name,
75 | "temperature": task.prompt_config.temperature,
76 | "max_tokens": task.prompt_config.max_tokens,
77 | "top_p": task.prompt_config.top_p,
78 | "frequency_penalty": task.prompt_config.frequency_penalty,
79 | "presence_penalty": task.prompt_config.presence_penalty,
80 | "response": task.llm_response.response,
81 | "model": task.llm.model,
82 | "prompt_tokens": task.llm_response.prompt_tokens,
83 | "completion_tokens": task.llm_response.completion_tokens,
84 | "total_tokens": task.llm_response.total_tokens,
85 | },
86 | start_time_ms=self._start_time_ms,
87 | end_time_ms=end_time_ms,
88 | inputs={"query": task.compile(**self._kwargs)},
89 | outputs={"response": task.llm_response.response},
90 | )
91 |
92 | # log the span to wandb
93 | root_span.log(name=task.__name__)
94 |
95 | return WandDBMonitor
96 |
--------------------------------------------------------------------------------
/src/declarai/evals/runner.py:
--------------------------------------------------------------------------------
1 | from time import time
2 | from typing import Any, Callable, Dict
3 |
4 | from rich.progress import Progress
5 | from rich.table import Table
6 |
7 | from declarai import Declarai
8 |
9 |
10 | def evaluate_single_task_scenario(
11 | scenario_name: str,
12 | scenario: Callable,
13 | scenario_kwargs: Dict[str, Any],
14 | models: Dict[str, Declarai],
15 | table: Table,
16 | ):
17 | with Progress() as progress:
18 | evaluator = progress.add_task(f"[red]{scenario_name}...", total=len(models))
19 |
20 | for model, declarai in models.items():
21 | try:
22 | initialized_scenario = declarai.task(scenario)
23 |
24 | start_time = time()
25 | res = initialized_scenario(**scenario_kwargs)
26 | total_time = time() - start_time
27 | progress.update(evaluator, advance=1)
28 |
29 | try:
30 | input_tokens = str(initialized_scenario.llm_response.prompt_tokens)
31 | output_tokens = str(
32 | initialized_scenario.llm_response.completion_tokens
33 | )
34 | except: # noqa
35 | input_tokens = "error"
36 | output_tokens = "error"
37 |
38 | table.add_row(
39 | declarai.llm_config.provider,
40 | declarai.llm_config.model,
41 | declarai.llm_config.version or "latest",
42 | scenario_name,
43 | f"{round(total_time, 3)}s",
44 | input_tokens,
45 | output_tokens,
46 | str(res),
47 | )
48 | except Exception as e:
49 | print(f"Error: {e}")
50 | table.add_row(
51 | declarai.llm_config.provider,
52 | declarai.llm_config.model,
53 | declarai.llm_config.version or "latest",
54 | scenario_name,
55 | "error",
56 | "error",
57 | "error",
58 | repr(e),
59 | )
60 |
61 |
62 | def evaluate_sequence_task_scenario(
63 | scenario_name: str,
64 | scenario: Callable,
65 | scenario_kwargs: Dict[str, Any],
66 | models: Dict[str, Declarai],
67 | table: Table,
68 | ):
69 | with Progress() as progress:
70 | evaluator = progress.add_task(f"[red]{scenario_name}...", total=len(models))
71 |
72 | for model, declarai in models.items():
73 | try:
74 | initialized_scenario = scenario(declarai, **scenario_kwargs)
75 | start_time = time()
76 | res = initialized_scenario()
77 | total_time = time() - start_time
78 | progress.update(evaluator, advance=1)
79 |
80 | try:
81 | input_tokens = str(initialized_scenario.llm_response.prompt_tokens)
82 | output_tokens = str(
83 | initialized_scenario.llm_response.completion_tokens
84 | )
85 | except: # noqa
86 | input_tokens = "error"
87 | output_tokens = "error"
88 |
89 | table.add_row(
90 | declarai.llm_config.provider,
91 | declarai.llm_config.model,
92 | declarai.llm_config.version or "latest",
93 | scenario_name,
94 | f"{round(total_time, 3)}s",
95 | input_tokens,
96 | output_tokens,
97 | str(res),
98 | )
99 | except Exception as e:
100 | print(f"Error: {e}")
101 | table.add_row(
102 | declarai.llm_config.provider,
103 | declarai.llm_config.model,
104 | declarai.llm_config.version or "latest",
105 | scenario_name,
106 | "error",
107 | "error",
108 | "error",
109 | repr(e),
110 | )
111 |
--------------------------------------------------------------------------------
/tests/tasks/test_llm_task.py:
--------------------------------------------------------------------------------
1 | # from typing import Dict
2 | # from unittest.mock import MagicMock
3 | #
4 | # from declarai.orchestrator.future_llm_task import FutureTask
5 | # from declarai.operators.base.types import LLMTask
6 | #
7 | # TEST_TASK_TEMPLATE = "{input} | {output}"
8 | # TEMPLATE_KWARGS = {
9 | # "input": "input-value: {input_val}",
10 | # "output": "output-value: {output_val}",
11 | # }
12 | # TASK_KWARGS = {
13 | # "input_val": "input-value",
14 | # "output_val": "output-value",
15 | # }
16 | #
17 | #
18 | # def test_llm_task():
19 | # test_llm = MagicMock()
20 | # test_llm.predict.return_value = MagicMock()
21 | # test_llm.predict.return_value.response = '{"declarai_result": "output-value"}'
22 | #
23 | # llm_task = LLMTask(
24 | # template=TEST_TASK_TEMPLATE,
25 | # template_kwargs=TEMPLATE_KWARGS,
26 | # llm=test_llm,
27 | # prompt_kwargs={"return_type": str},
28 | # )
29 | #
30 | # compiled_task_template = "input-value: {input_val} | output-value: {output_val}"
31 | # compiled_task_with_values = "input-value: input-value | output-value: output-value"
32 | #
33 | # assert llm_task.compile() == compiled_task_template
34 | # assert llm_task.compile(**TASK_KWARGS) == compiled_task_with_values
35 | #
36 | # llm_res = llm_task(**TASK_KWARGS)
37 | # assert llm_res == "output-value"
38 | # assert test_llm.predict.called
39 | #
40 | #
41 | # def test_llm_task_result_name_override():
42 | # test_llm = MagicMock()
43 | # test_llm.predict.return_value = MagicMock()
44 | # test_llm.predict.return_value.response = '{"result": "output-value"}'
45 | #
46 | # llm_task = LLMTask(
47 | # template=TEST_TASK_TEMPLATE,
48 | # template_kwargs=TEMPLATE_KWARGS,
49 | # llm=test_llm,
50 | # prompt_kwargs={
51 | # "return_name": "result",
52 | # "return_type": str,
53 | # },
54 | # )
55 | # compiled_task_template = "input-value: {input_val} | output-value: {output_val}"
56 | # compiled_task_with_values = "input-value: input-value | output-value: output-value"
57 | # assert llm_task.compile() == compiled_task_template
58 | # assert llm_task.compile(**TASK_KWARGS) == compiled_task_with_values
59 | #
60 | # llm_res = llm_task(**TASK_KWARGS)
61 | # assert llm_res == "output-value"
62 | # assert test_llm.predict.called
63 | #
64 | #
65 | # def test_llm_task_unstructured_result():
66 | # test_llm = MagicMock()
67 | # test_llm.predict.return_value = MagicMock()
68 | # test_llm.predict.return_value.response = "output-value"
69 | #
70 | # llm_task = LLMTask(
71 | # template=TEST_TASK_TEMPLATE,
72 | # template_kwargs=TEMPLATE_KWARGS,
73 | # llm=test_llm,
74 | # prompt_kwargs={"structured": False},
75 | # )
76 | # llm_res = llm_task(**TASK_KWARGS)
77 | # assert llm_res == "output-value"
78 | # assert test_llm.predict.called
79 | #
80 | #
81 | # def test_llm_task_multiple_results():
82 | # test_llm = MagicMock()
83 | # test_llm.predict.return_value = MagicMock()
84 | # test_llm.predict.return_value.response = (
85 | # '{"result1": "output-value1"}\n\n\n{"result2": "output-value2"}'
86 | # )
87 | #
88 | # llm_task = LLMTask(
89 | # template=TEST_TASK_TEMPLATE,
90 | # template_kwargs=TEMPLATE_KWARGS,
91 | # llm=test_llm,
92 | # prompt_kwargs={"multi_results": True},
93 | # )
94 | # llm_res = llm_task(**TASK_KWARGS)
95 | # assert llm_res == {"result1": "output-value1", "result2": "output-value2"}
96 | # assert test_llm.predict.called
97 | #
98 | #
99 | # def test_future_llm_task():
100 | # test_llm = MagicMock()
101 | # test_llm.predict.return_value = MagicMock()
102 | # test_llm.predict.return_value.response = '{"declarai_result": "output-value"}'
103 | #
104 | # llm_task = LLMTask(
105 | # template=TEST_TASK_TEMPLATE,
106 | # template_kwargs=TEMPLATE_KWARGS,
107 | # llm=test_llm,
108 | # prompt_kwargs={"return_type": str},
109 | # )
110 | # compiled_task_with_values = "input-value: input-value | output-value: output-value"
111 | # future_task = llm_task.plan(**TASK_KWARGS)
112 | # assert isinstance(future_task, FutureTask)
113 | # assert future_task.populated_prompt == compiled_task_with_values
114 | # assert future_task() == "output-value"
115 |
--------------------------------------------------------------------------------
/docs/providers/openai.md:
--------------------------------------------------------------------------------
1 | # OpenAI
2 |
3 | To use OpenAI models, you can set the following configuration options:
4 |
5 | ```py
6 | import declarai
7 |
8 | openai_model = declarai.openai(
9 | model="",
10 | openai_token="",
11 | headers={"": ""},
12 | timeout="",
13 | request_timeout="",
14 | stream="",
15 | )
16 | ```
17 |
18 |
19 | | Setting |
Env Variable
| Required? |
20 | |-----------------|---------------------------------------------|:---------:|
21 | | Model | | ✅ |
22 | | API key | `OPENAI_API_KEY` | ✅ |
23 | | Headers | | |
24 | | Timeout | | |
25 | | Request timeout | | |
26 | | Stream | | |
27 |
28 | ## Getting an API key
29 |
30 | To obtain an OpenAI API key, follow these steps:
31 |
32 | 1. [Log in](https://platform.openai.com/) to your OpenAI account (sign up if you don't have one)
33 | 2. Go to the "API Keys" [page](https://platform.openai.com/account/api-keys) under your account settings.
34 | 3. Click "Create new secret key." A new API key will be generated. Make sure to copy the key to your clipboard, as you
35 | will not be able to see it again.
36 |
37 | ## Setting the API key
38 |
39 | You can set your API key at runtime like this:
40 |
41 | ```python
42 | import declarai
43 |
44 | gpt4 = declarai.openai(model="gpt4", openai_token="")
45 | ```
46 |
47 | However, it is preferable to pass sensitive settings as an environment variable: `OPENAI_API_KEY`.
48 |
49 | To establish your OpenAI API key as an environment variable, launch your terminal and execute the following command,
50 | substituting with your actual key:
51 |
52 | ```shell
53 | export OPENAI_API_KEY=
54 | ```
55 |
56 | This action will maintain the key for the duration of your terminal session. To ensure a longer retention, modify your
57 | terminal's settings or corresponding environment files.
58 |
59 | ## Control LLM Parameters
60 |
61 | OpenAI models have a number of parameters that can be tuned to control the output of the model. These parameters are
62 | passed to the declarai task/chat interface as a dictionary. The following parameters are supported:
63 |
64 | | Parameter | Type | Description | Default |
65 | |---------------------|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|
66 | | `temperature` | `float` | Controls the randomness of the model. Lower values make the model more deterministic and repetitive. Higher values make the model more random and creative. | `0` |
67 | | `max_tokens` | `int` | Controls the length of the output. | `3000` |
68 | | `top_p` | `float` | Controls the diversity of the model. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `1` |
69 | | `frequency_penalty` | `float` | Controls how often the model repeats itself. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `0` |
70 | | `presence_penalty` | `float` | Controls how often the model generates new topics. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `0` |
71 |
72 | Pass your custom parameters to the declarai task/chat interface as a dictionary:
73 |
74 | ```python
75 | import declarai
76 |
77 | gpt4 = declarai.openai(model="gpt-4", openai_token="")
78 |
79 |
80 | @gpt4.task(llm_params={"temperature": 0.5, "max_tokens": 1000}) # (1)!
81 | def generate_song():
82 | """
83 | Generate a song about declarai
84 | """
85 |
86 | ```
87 |
88 | 1. Pass only the parameters you want to change. The rest will be set to their default values.
89 |
--------------------------------------------------------------------------------
/docs/beginners-guide/debugging-tasks.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide:
3 | - footer
4 | ---
5 |
6 | # Debugging tasks
7 |
8 | So it all seems pretty magical up to this point, but what if you want to see what's going on behind the scenes?
9 | Being able to debug your tasks is a very important part of the development process, and **Declarai** makes it easy for you.
10 |
11 | ## Compiling tasks
12 | The first and simplest tool to better understand what's happening under the hood is the `compile` method.
13 | Declarai has an `evals` module as well for advanced debugging and benchmarking which you can review later here: [evals](../../features/evals/)
14 |
15 | Let's take the last task from the previous section and add a call to the `compile` method:
16 | ```python
17 | from typing import Dict
18 | import declarai
19 |
20 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
21 |
22 | @gpt_35.task
23 | def movie_recommender(user_input: str) -> Dict[str, str]:
24 | """
25 | Recommend a selection of movies to watch based on the user input
26 | For each movie provide a short description as well
27 | :param user_input: The user's input
28 | :return: A dictionary of movie names and descriptions
29 | """
30 |
31 | movie_recommender.compile()
32 |
33 | > {
34 | 'messages': [ # (1)!
35 | # (2)!
36 | system: You are a REST api endpoint.
37 | You only answer in JSON structures with a single key named 'declarai_result', nothing else.
38 | The expected format is: "declarai_result": Dict[string, string] # A dictionary of movie names and descriptions,
39 | # (3)!
40 | user: Recommend a selection of movies to watch based on the user input
41 | For each movie provide a short description as well.
42 | Inputs: user_input: {user_input} # (4)!
43 | ]
44 | }
45 | ```
46 |
47 | 1. As we are working with the openai llm provider, which exposes a chat interface, we translate the task into **messages** as defined by openai's API.
48 | 2. In order to guide the task with the correct output format, we provide a **system** message that explains LLM's role and expected responses
49 | 3. The **user message** is the actual translation of the task at hand, with the user's input as a placeholder for the actual value.
50 | 4. **{user_input}** will be populated with the actual value when the task is being called at runtime.
51 |
52 | What we're seeing here is the template for this specific task. It is built so that when called at runtime,
53 | it will be populated with the real values passed to our task.
54 |
55 | !!! warning
56 |
57 | As you can see, that the actual prompt being sent to the model is a bit different than the original docstring.
58 | Even though Declarai incorporates best practices for prompt engineering while maintaining as little interference as possible with user prompts,
59 | it is still possible that the model will not generate the desired output. For this reason it is important to be able to debug your tasks and understand what actually got sent to the model
60 |
61 | ## Compiling tasks with real values
62 | The `compile` method can also be used to view the prompt with the real values provided to the task.
63 | This is useful when prompts might behave differently for different inputs.
64 |
65 | ```python hl_lines="10"
66 | print(movie_recommender.compile(user_input="I want to watch a movie about space"))
67 |
68 | > {
69 | 'messages': [
70 | system: You are a REST api endpoint.
71 | You only answer in JSON structures with a single key named 'declarai_result', nothing else.
72 | The expected format is: "declarai_result": Dict[string, string] # A dictionary of movie names and descriptions,
73 | user: Recommend a selection of movies to watch based on the user input
74 | For each movie provide a short description as well.
75 | Inputs: user_input: I want to watch a movie about space # (1)!
76 | ]}
77 | ```
78 |
79 | 1. The actual **value** of the parameter is now populated in the placeholder and we have our final prompt!
80 |
81 |
82 | !!! tip
83 |
84 | With the `compile` method, you can always take your prompts anywhere you like,
85 | if it's for monitoring, debugging or just for documentation, we've got you covered!
86 |
87 |
88 |
96 |
--------------------------------------------------------------------------------
/src/declarai/operators/llm.py:
--------------------------------------------------------------------------------
1 | """
2 | This module defines the base classes for the LLM interface.
3 | """
4 | from __future__ import annotations
5 |
6 | from abc import abstractmethod
7 | from typing import Optional, TypedDict, TypeVar
8 |
9 | from pydantic.main import BaseModel
10 |
11 |
12 | class LLMResponse(BaseModel):
13 | """
14 | The response from the LLM.
15 |
16 | Attributes:
17 | response: The raw response from the LLM
18 | model: The model that was used to generate the response
19 | prompt_tokens: The number of tokens in the prompt
20 | completion_tokens: The number of tokens in the completion
21 | total_tokens: The total number of tokens in the response
22 | """
23 |
24 | response: str
25 | model: Optional[str] = None
26 | prompt_tokens: Optional[int] = None
27 | completion_tokens: Optional[int] = None
28 | total_tokens: Optional[int] = None
29 | role: str = "assistant"
30 | raw_response: Optional[dict] = None
31 |
32 |
33 | class BaseLLMParams(TypedDict):
34 | """
35 | The base LLM params that are common to all LLMs.
36 | """
37 |
38 | # Define any common/generic params here
39 | pass
40 |
41 |
42 | class LLMSettings:
43 | """
44 | The settings for the LLM. Defines the model and version to use.
45 |
46 | Args:
47 | provider: The provider of the model (openai, cohere, etc.)
48 | model: The model to use (gpt-4, gpt-3.5-turbo, etc.)
49 | version: The version of the model to use (optional)
50 | **_: Any additional params that are specific to the provider that will be ignored.
51 |
52 |
53 | Attributes:
54 | provider (str): The provider of the model (openai, cohere, etc.)
55 | model: The full model name to use.
56 | version: The version of the model to use (optional)
57 | """
58 |
59 | def __init__(
60 | self,
61 | provider: str,
62 | model: str,
63 | version: Optional[str] = None,
64 | **_,
65 | ):
66 | self.provider = provider
67 | self._model = model
68 | self.version = version
69 |
70 | @property
71 | def model(self, delimiter: Optional[str] = "-", with_version: bool = True) -> str:
72 | """
73 | Some model providers allow defining a base model as well as a sub-model.
74 | Often the base model is an alias to latest model served on that model.
75 | for example, when sending gpt-3.5-turbo to OpenAI, the actual model will be one of the
76 | publicly available snapshots or an internally exposed version as described on their website:
77 | as of 27/07/2023 - https://platform.openai.com/docs/models/continuous-model-upgrades
78 | | With the release of gpt-3.5-turbo, some of our models are now being continually updated.
79 | | We also offer static model versions that developers can continue using for at least
80 | | three months after an updated model has been introduced.
81 |
82 | Another use-case for sub models is using your own fine-tuned models.
83 | As described in the documentation:
84 | https://platform.openai.com/docs/guides/fine-tuning/customize-your-model-name
85 |
86 | You will likely build your fine-tuned model names by concatenating the base model name
87 | with the fine-tuned model name, separated by a hyphen.
88 | For example
89 | gpt-3.5-turbo-declarai-text-classification-2023-03
90 | or
91 | gpt-3.5-turbo:declarai:text-classification-2023-03
92 |
93 | In any case you can always pass the full model name in the model parameter and leave the
94 | sub_model parameter empty if you prefer.
95 | """
96 | if self.version and with_version:
97 | return f"{self._model}{delimiter}{self.version}"
98 | return self._model
99 |
100 |
101 | class BaseLLM:
102 | """
103 | The base LLM class that all LLMs should inherit from.
104 | """
105 |
106 | provider: str
107 | model: str
108 |
109 | @abstractmethod
110 | def predict(self, *args, **kwargs) -> LLMResponse:
111 | """
112 | The predict method that all LLMs should implement.
113 | Args:
114 | *args:
115 | **kwargs:
116 |
117 | Returns: llm response object
118 |
119 | """
120 | raise NotImplementedError()
121 |
122 |
123 | LLMParamsType = TypeVar("LLMParamsType", bound=BaseLLMParams)
124 | """Type variable for LLM params"""
125 | LLM = TypeVar("LLM", bound=BaseLLM)
126 | """Type variable for LLM"""
127 |
--------------------------------------------------------------------------------
/docs/js/custom.js:
--------------------------------------------------------------------------------
1 | function setupTermynal() {
2 | document.querySelectorAll(".use-termynal").forEach(node => {
3 | node.style.display = "block";
4 | new Termynal(node, {
5 | lineDelay: 500
6 | });
7 | });
8 | const progressLiteralStart = "---> 100%";
9 | const promptLiteralStart = "$ ";
10 | const customPromptLiteralStart = "# ";
11 | const termynalActivateClass = "termy";
12 | let termynals = [];
13 |
14 | function createTermynals() {
15 | document
16 | .querySelectorAll(`.${termynalActivateClass} .highlight`)
17 | .forEach(node => {
18 | const text = node.textContent;
19 | const lines = text.split("\n");
20 | const useLines = [];
21 | let buffer = [];
22 |
23 | function saveBuffer() {
24 | if (buffer.length) {
25 | let isBlankSpace = true;
26 | buffer.forEach(line => {
27 | if (line) {
28 | isBlankSpace = false;
29 | }
30 | });
31 | dataValue = {};
32 | if (isBlankSpace) {
33 | dataValue["delay"] = 0;
34 | }
35 | if (buffer[buffer.length - 1] === "") {
36 | // A last single won't have effect
37 | // so put an additional one
38 | buffer.push("");
39 | }
40 | const bufferValue = buffer.join(" ");
41 | dataValue["value"] = bufferValue;
42 | useLines.push(dataValue);
43 | buffer = [];
44 | }
45 | }
46 |
47 | for (let line of lines) {
48 | if (line === progressLiteralStart) {
49 | saveBuffer();
50 | useLines.push({
51 | type: "progress"
52 | });
53 | } else if (line.startsWith(promptLiteralStart)) {
54 | saveBuffer();
55 | const value = line.replace(promptLiteralStart, "").trimEnd();
56 | useLines.push({
57 | type: "input",
58 | value: value
59 | });
60 | } else if (line.startsWith("// ")) {
61 | saveBuffer();
62 | const value = "💬 " + line.replace("// ", "").trimEnd();
63 | useLines.push({
64 | value: value,
65 | class: "termynal-comment",
66 | delay: 0
67 | });
68 | } else if (line.startsWith(customPromptLiteralStart)) {
69 | saveBuffer();
70 | const promptStart = line.indexOf(promptLiteralStart);
71 | if (promptStart === -1) {
72 | console.error("Custom prompt found but no end delimiter", line)
73 | }
74 | const prompt = line.slice(0, promptStart).replace(customPromptLiteralStart, "")
75 | let value = line.slice(promptStart + promptLiteralStart.length);
76 | useLines.push({
77 | type: "input",
78 | value: value,
79 | prompt: prompt
80 | });
81 | } else {
82 | buffer.push(line);
83 | }
84 | }
85 | saveBuffer();
86 | const div = document.createElement("div");
87 | node.replaceWith(div);
88 | const termynal = new Termynal(div, {
89 | lineData: useLines,
90 | noInit: true,
91 | lineDelay: 500
92 | });
93 | termynals.push(termynal);
94 | });
95 | }
96 |
97 | function loadVisibleTermynals() {
98 | termynals = termynals.filter(termynal => {
99 | if (termynal.container.getBoundingClientRect().top - innerHeight <= 0) {
100 | termynal.init();
101 | return false;
102 | }
103 | return true;
104 | });
105 | }
106 |
107 | window.addEventListener("scroll", loadVisibleTermynals);
108 | createTermynals();
109 | loadVisibleTermynals();
110 | }
111 |
112 | async function main() {
113 | setupTermynal();
114 | }
115 |
116 | main();
117 |
--------------------------------------------------------------------------------
/src/declarai/evals/README.md:
--------------------------------------------------------------------------------
1 | # Evals
2 | The evals library is a companion to declarai and helps us, and you, monitor prompts across models and over time.
3 | We plan to run a suite of evaluations for every release of the package to ensure that changes in the prompt
4 | infrastructure will not reduce the quality of results.
5 |
6 | ## Running the evaluations
7 | To run the evaluations, you will need to install the `declarai` package. You can do this by running
8 | ```bash
9 | pip install declarai
10 | ```
11 |
12 | Once you have installed the package, you can run the evaluations by running
13 | ```bash
14 | python -m evals.evaluator
15 | ```
16 |
17 | After the evaluations have finished running, you should be able to view the results in your terminal:
18 | ```bash
19 | Running Metadata-Significance scenarios...
20 | generate_a_poem_no_metadata... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00
21 | generate_a_poem_only_return_type... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00
22 | ┏━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
23 | ┃ Provider ┃ Model ┃ version ┃ Scenario ┃ runtime ┃ output ┃
24 | ┡━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
25 | │ openai │ gpt-3.5-turbo │ latest │ generate_a_poem_no_metadata │ 1.235s │ Using LLMs is fun! │
26 | │ openai │ gpt-3.5-turbo │ 0301 │ generate_a_poem_no_metadata │ 0.891s │ Using LLMs is fun! │
27 | │ │ │ │ │ │ It's like playing with words │
28 | │ │ │ │ │ │ Creating models that learn │
29 | │ │ │ │ │ │ And watching them fly like birds │
30 | │ openai │ gpt-3.5-turbo │ 0613 │ generate_a_poem_no_metadata │ 1.071s │ Using LLMs is fun! │
31 | │ openai │ gpt-4 │ latest │ generate_a_poem_no_metadata │ 3.494s │ {'poem': 'Using LLMs, a joyous run,\n In the world of AI, under the sun.\nWith every task, they │
32 | │ │ │ │ │ │ stun,\nIndeed, using LLMs is fun!'} │
33 | │ openai │ gpt-4 │ 0613 │ generate_a_poem_no_metadata │ 4.992s │ {'title': 'Using LLMs is fun!', 'poem': "With LLMs, the fun's just begun, \nCoding and learning, │
34 | │ │ │ │ │ │ second to none. \nComplex tasks become a simple run, \nOh, the joy when the work is done!"} │
35 | │ openai │ gpt-3.5-turbo │ latest │ generate_a_poem_only_return_type │ 2.1s │ Learning with LLMs, a delightful run, │
36 | │ │ │ │ │ │ Exploring new knowledge, it's never done. │
37 | │ │ │ │ │ │ With every challenge, we rise and we stun, │
38 | │ │ │ │ │ │ Using LLMs, the learning is always fun! │
39 | │... │... │... │... │... │ ... │
40 | └─────────────────┴─────────────────┴─────────────────┴──────────────────────────────────────────┴────────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────┘
41 | ```
42 |
--------------------------------------------------------------------------------
/src/declarai/operators/openai_operators/task_operator.py:
--------------------------------------------------------------------------------
1 | """
2 | Task implementation for openai operator.
3 | """
4 | import logging
5 |
6 | from declarai.operators.registry import register_operator
7 | from declarai.operators.message import Message, MessageRole
8 | from declarai.operators.operator import BaseOperator, CompiledTemplate
9 | from ..utils import can_be_jinja
10 | from declarai.operators.templates import (
11 | InstructFunctionTemplate,
12 | StructuredOutputInstructionPrompt,
13 | compile_output_prompt,
14 | )
15 |
16 | from .openai_llm import AzureOpenAILLM, OpenAILLM
17 |
18 | logger = logging.getLogger("OpenAITaskOperator")
19 |
20 | INPUTS_TEMPLATE = "Inputs:\n{inputs}\n"
21 | INPUT_LINE_TEMPLATE = "{param}: {{{param}}}"
22 | NEW_LINE_INPUT_LINE_TEMPLATE = "\n{param}: {{{param}}}"
23 |
24 |
25 | @register_operator(provider="openai", operator_type="task")
26 | class OpenAITaskOperator(BaseOperator):
27 | """
28 | Task implementation for openai operator. This is a child of the BaseOperator class. See the BaseOperator class for further documentation.
29 | Implements the compile method which compiles a parsed function into a message.
30 | Uses the OpenAILLM to generate a response based on the given template.
31 |
32 | Attributes:
33 | llm: OpenAILLM
34 | """
35 |
36 | llm: OpenAILLM
37 |
38 | def _compile_input_placeholder(self) -> str:
39 | """
40 | Creates a placeholder for the input of the function.
41 | The input format is based on the function input schema.
42 |
43 | !!! example
44 | for example a function signature of:
45 | ```py
46 | def foo(a: int, b: str, c: float = 1.0):
47 | ```
48 |
49 | will result in the following placeholder:
50 | ```md
51 | Inputs:
52 | a: {a}
53 | b: {b}
54 | c: {c}
55 | ```
56 | """
57 | inputs = ""
58 |
59 | if not self.parsed.signature_kwargs.keys():
60 | return inputs
61 |
62 | for i, param in enumerate(self.parsed.signature_kwargs.keys()):
63 | if i == 0:
64 | inputs += INPUT_LINE_TEMPLATE.format(param=param)
65 | continue
66 | inputs += NEW_LINE_INPUT_LINE_TEMPLATE.format(param=param)
67 |
68 | return INPUTS_TEMPLATE.format(inputs=inputs)
69 |
70 | def _compile_output_prompt(self, template) -> str:
71 | if not self.parsed.has_any_return_defs:
72 | logger.warning(
73 | "Couldn't create output schema for function %s."
74 | "Falling back to unstructured output."
75 | "Please add at least one of the following: return type, return doc, return name",
76 | self.parsed.name,
77 | )
78 | return ""
79 |
80 | signature_return = self.parsed.signature_return
81 | return_name, return_doc = self.parsed.docstring_return
82 | return compile_output_prompt(
83 | return_type=signature_return.str_schema,
84 | str_schema=return_name,
85 | return_docstring=return_doc,
86 | return_magic=self.parsed.magic.return_name,
87 | structured=self.parsed.has_structured_return_type,
88 | structured_template=template,
89 | )
90 |
91 | def compile_template(self) -> CompiledTemplate:
92 | """
93 | Unique compilation method for the OpenAITaskOperator class.
94 | Uses the InstructFunctionTemplate and StructuredOutputInstructionPrompt templates to create a message.
95 | And the _compile_input_placeholder method to create a placeholder for the input of the function.
96 | Returns:
97 | Dict[str, List[Message]]: A dictionary containing a list of messages.
98 |
99 | """
100 | instruction_template = InstructFunctionTemplate
101 | structured_template = StructuredOutputInstructionPrompt
102 | output_schema = self._compile_output_prompt(structured_template)
103 |
104 | messages = []
105 | if output_schema:
106 | messages.append(Message(message=output_schema, role=MessageRole.system))
107 |
108 | if not can_be_jinja(self.parsed.docstring_freeform):
109 | instruction_message = instruction_template.format(
110 | input_instructions=self.parsed.docstring_freeform,
111 | input_placeholder=self._compile_input_placeholder(),
112 | )
113 | else:
114 | instruction_message = self.parsed.docstring_freeform
115 |
116 | messages.append(Message(message=instruction_message, role=MessageRole.user))
117 | return messages
118 |
119 |
120 | @register_operator(provider="azure-openai", operator_type="task")
121 | class AzureOpenAITaskOperator(OpenAITaskOperator):
122 | """
123 | Task implementation for openai operator that uses Azure as the llm provider.
124 |
125 | Attributes:
126 | llm: AzureOpenAILLM
127 | """
128 |
129 | llm: AzureOpenAILLM
130 |
--------------------------------------------------------------------------------
/docs/beginners-guide/controlling-task-behavior.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide:
3 | - footer
4 | ---
5 |
6 | # Controlling task behavior :control_knobs:
7 |
8 | Task behavior can be controlled by any of the available interfaces in Python.
9 | Controlling these parameters is key to achieving the desired results from the model.
10 |
11 | ### Passing parameters to the task :label:
12 |
13 | In the following example, we'll create a task that suggests movies to watch based on a given input.
14 |
15 | ```python
16 | import declarai
17 |
18 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
19 |
20 |
21 | @gpt_35.task
22 | def movie_recommender(user_input: str): # (1)!
23 | """
24 | Recommend a movie to watch based on the user input
25 | :param user_input: The user's input
26 | """ # (2)!
27 | ```
28 |
29 | 1. Notice how providing a type hint for the `user_input` parameter allows declarai to understand the expected input
30 | type.
31 | 2. Adding the param to the docstring allows declarai to communicate the **meaning** of this parameter to the model.
32 |
33 | ```python
34 | print(movie_recommender(user_input="I want to watch a movie about space"))
35 | > 'Interstellar'
36 | ```
37 |
38 | ### Using return types to control the output :gear:
39 |
40 | This is a good start,
41 | but let's say we want to have a selection of movies instead of a single suggestion.
42 |
43 | ```python
44 | from typing import List
45 | import declarai
46 |
47 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
48 |
49 |
50 | @gpt_35.task
51 | def movie_recommender(user_input: str) -> List[str]: # (1)!
52 | """
53 | Recommend a selection of movies to watch based on the user input
54 | :param user_input: The user's input
55 | :return: A list of movie recommendations
56 | """ # (2)!
57 | ```
58 |
59 | 1. Adding a return type hint allows declarai to parse the output of the llm into the provided type,
60 | in our case a list of strings.
61 | 2. Explaining the return value aids the model in returning the expected output and avoiding hallucinations.
62 |
63 | ```python
64 | print(movie_recommender(user_input="I want to watch a movie about space"))
65 | > ['Interstellar', 'Gravity', 'The Martian', 'Apollo 13', '2001: A Space Odyssey', 'Moon', 'Sunshine', 'Contact',
66 | 'The Right Stuff', 'Hidden Figures']
67 | ```
68 |
69 | !!! info
70 |
71 | Notice How the text in our documentation has changed from singular to plural form.
72 | Maintaining consistency between the task's description and the return type is important for the model to understand the expected output.
73 | For more best-practices, see [here](../../best-practices).
74 |
75 | Awesome!
76 |
77 | Now we have a list of movies to choose from!
78 |
79 | But what if we want to go even further :thinking:?
80 | Let's say we want the model to also provide a short description of each movie.
81 |
82 | ```python
83 | from typing import Dict
84 | import declarai
85 |
86 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
87 |
88 |
89 | @gpt_35.task
90 | def movie_recommender(user_input: str) -> Dict[str, str]: # (1)!
91 | """
92 | Recommend a selection of movies to watch based on the user input
93 | For each movie provide a short description as well
94 | :param user_input: The user's input
95 | :return: A dictionary of movie names and descriptions
96 | """ # (2)!
97 | ```
98 |
99 | 1. We've updated the return value to allow for the creation of a dictionary of movie names and descriptions.
100 | 2. We re-enforce the description of the return value to ensure the model understands the expected output.
101 |
102 | ```python
103 | print(movie_recommender(user_input="I want to watch a movie about space"))
104 | > {
105 | 'Interstellar': "A team of explorers travel through a wormhole in space in an attempt to ensure humanity's survival.",
106 | 'Gravity': 'Two astronauts work together to survive after an accident leaves them stranded in space.',
107 | 'The Martian': 'An astronaut is left behind on Mars after his team assumes he is dead and must find a way to survive and signal for rescue.',
108 | 'Apollo 13': 'The true story of the Apollo 13 mission, where an explosion in space jeopardizes the lives of the crew and their safe return to Earth.',
109 | '2001: A Space Odyssey': "A journey through human evolution and the discovery of a mysterious black monolith that may hold the key to humanity's future."
110 | }
111 | ```
112 |
113 | !!! info
114 |
115 | A good practice for code readability as well as great performing models is to use type hints and context in the docstrings.
116 | The better you describe the task, `:params` and `:return` sections within the docstring, the better the results will be.
117 |
118 | !!! tip
119 |
120 | Try experimenting with various descriptions and see how far you can push the model's understanding!
121 | who knows what you'll find :open_mouth:!
122 |
123 |
| Required? |
21 | |-----------------------|---------------------------------------------|:---------:|
22 | | azure_openai_key | `DECLARAI_AZURE_OPENAI_KEY` | ✅ |
23 | | azure_openai_api_base | `DECLARAI_AZURE_OPENAI_API_BASE` | ✅ |
24 | | deployment_name | `DECLARAI_AZURE_OPENAI_DEPLOYMENT_NAME` | ✅ |
25 | | api_version | `DECLARAI_AZURE_OPENAI_API_VERSION` | |
26 | | headers | | |
27 | | timeout | | |
28 | | request_timeout | | |
29 | | stream | | |
30 |
31 | ## Getting an API key, API base, and Deployment name
32 |
33 | To obtain the above settings, you will need to create an account on
34 | the [Azure OpenAI](https://azure.microsoft.com/en-us/services/cognitive-services/)
35 | website. Once you have created an account, you will need to create a resource.
36 |
37 | Please follow the instructions on
38 | the [Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/quickstart?tabs=command-line&pivots=programming-language-python)
39 |
40 | ## Setting the API key
41 |
42 | You can set your API key at runtime like this:
43 |
44 | ```python
45 | import declarai
46 |
47 | my_azure_model = declarai.azure_openai(
48 | deployment_name="my-model",
49 | azure_openai_key="",
50 | azure_openai_api_base="https://.com",
51 | )
52 | ```
53 |
54 | However, it is preferable to pass sensitive settings as an environment variable: `DECLARAI_AZURE_OPENAI_API_KEY`.
55 |
56 | To establish your Azure OpenAI API key as an environment variable, launch your terminal and execute the following
57 | command,
58 | substituting with your actual key:
59 |
60 | ```shell
61 | export DECLARAI_AZURE_OPENAI_KEY=
62 | ```
63 |
64 | This action will maintain the key for the duration of your terminal session. To ensure a longer retention, modify your
65 | terminal's settings or corresponding environment files.
66 |
67 | ## Control LLM Parameters
68 |
69 | OpenAI models have a number of parameters that can be tuned to control the output of the model. These parameters are
70 | passed to the declarai task/chat interface as a dictionary. The following parameters are supported:
71 |
72 | | Parameter | Type | Description | Default |
73 | |---------------------|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|
74 | | `temperature` | `float` | Controls the randomness of the model. Lower values make the model more deterministic and repetitive. Higher values make the model more random and creative. | `0` |
75 | | `max_tokens` | `int` | Controls the length of the output. | `3000` |
76 | | `top_p` | `float` | Controls the diversity of the model. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `1` |
77 | | `frequency_penalty` | `float` | Controls how often the model repeats itself. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `0` |
78 | | `presence_penalty` | `float` | Controls how often the model generates new topics. Lower values make the model more repetitive and conservative. Higher values make the model more random and creative. | `0` |
79 |
80 | Pass your custom parameters to the declarai task/chat interface as a dictionary:
81 |
82 | ```python
83 | import declarai
84 |
85 | azure_model = declarai.azure_openai(
86 | deployment_name="my-model",
87 | azure_openai_key="",
88 | azure_openai_api_base="https://.com",
89 | headers=""
90 | )
91 |
92 |
93 | @azure_model.task(llm_params={"temperature": 0.5, "max_tokens": 1000}) # (1)!
94 | def generate_song():
95 | """
96 | Generate a song about declarai
97 | """
98 |
99 | ```
100 |
101 | 1. Pass only the parameters you want to change. The rest will be set to their default values.
102 |
--------------------------------------------------------------------------------
/src/declarai/python_parser/parser.py:
--------------------------------------------------------------------------------
1 | """PythonParser
2 | An interface to extract different parts of the provided python code into a simple metadata object.
3 | """
4 |
5 | import inspect
6 | from functools import lru_cache as memoized
7 | from typing import Any, Dict, Optional
8 |
9 | from pydantic import parse_obj_as, parse_raw_as
10 | from pydantic.error_wrappers import ValidationError
11 |
12 | from declarai.python_parser.magic_parser import Magic, extract_magic_args
13 | from declarai.python_parser.type_annotation_to_schema import (
14 | type_annotation_to_str_schema,
15 | )
16 | from declarai.python_parser.types import (
17 | ArgName,
18 | ArgType,
19 | DocstringFreeform,
20 | DocstringParams,
21 | DocstringReturn,
22 | SignatureReturn,
23 | )
24 |
25 | from .docstring_parsers.reST import ReSTDocstringParser
26 |
27 |
28 | class OutputParsingError(Exception):
29 | pass
30 |
31 |
32 | class PythonParser:
33 | """
34 | A unified interface for accessing python parsed data.
35 | """
36 |
37 | is_func: bool
38 | is_class: bool
39 | decorated: Any
40 | name: str
41 | signature_return_type: Any
42 | docstring_freeform: DocstringFreeform
43 | docstring_params: DocstringParams
44 | docstring_return: DocstringReturn
45 |
46 | def __init__(self, decorated: Any):
47 | self.is_func = inspect.isfunction(decorated)
48 | self.is_class = inspect.isclass(decorated)
49 | self.decorated = decorated
50 |
51 | # Static attributes:
52 | self.name = self.decorated.__name__
53 |
54 | self._signature = inspect.signature(self.decorated)
55 | self.signature_return_type = self.signature_return.type_
56 |
57 | docstring = inspect.getdoc(self.decorated)
58 | self._parsed_docstring = ReSTDocstringParser(docstring or "")
59 | self.docstring_freeform = self._parsed_docstring.freeform
60 | self.docstring_params = self._parsed_docstring.params
61 | self.docstring_return = self._parsed_docstring.returns
62 |
63 | @property
64 | @memoized(maxsize=1)
65 | def signature_kwargs(self) -> Dict[ArgName, ArgType]:
66 | return {
67 | param.name: param.annotation
68 | for param in dict(self._signature.parameters).values()
69 | if param.name != "self"
70 | }
71 |
72 | @property
73 | @memoized(maxsize=1)
74 | def signature_return(self) -> Optional[SignatureReturn]:
75 | return_annotation = self._signature.return_annotation
76 | if return_annotation == inspect._empty:
77 | return SignatureReturn()
78 | string_schema = type_annotation_to_str_schema(self._signature.return_annotation)
79 | return SignatureReturn(
80 | name=str(self._signature.return_annotation),
81 | str_schema=string_schema,
82 | type_=self._signature.return_annotation,
83 | )
84 |
85 | @property
86 | @memoized(maxsize=1)
87 | def magic(self) -> Magic:
88 | func_str = inspect.getsource(self.decorated)
89 | if "magic(" not in func_str:
90 | return Magic()
91 | return extract_magic_args(func_str)
92 |
93 | @property
94 | @memoized(maxsize=1)
95 | def return_name(self) -> str:
96 | return self.magic.return_name or self.docstring_return[0] or "declarai_result"
97 |
98 | @property
99 | @memoized(maxsize=1)
100 | def has_any_return_defs(self) -> bool:
101 | """
102 | A return definition is any of the following:
103 | - return type annotation
104 | - return reference in docstring
105 | - return referenced in magic placeholder # TODO: Address magic reference as well.
106 | """
107 | return any(
108 | [
109 | self.docstring_return[0],
110 | self.docstring_return[1],
111 | self.signature_return,
112 | ]
113 | )
114 |
115 | @property
116 | @memoized(maxsize=1)
117 | def has_structured_return_type(self) -> bool:
118 | """
119 | Except for the following types, a dedicated output parsing
120 | behavior is required to return the expected return type of the task.
121 | """
122 | return any(
123 | [
124 | self.docstring_return[0],
125 | self.signature_return.name
126 | not in (
127 | None,
128 | "",
129 | "",
130 | "",
131 | "",
132 | ),
133 | ]
134 | )
135 |
136 | def parse(self, raw_result: str):
137 | if self.has_structured_return_type:
138 | parsed_result = parse_raw_as(dict, raw_result)
139 | root_key = self.return_name or "declarai_result"
140 | parsed_result = parsed_result[root_key]
141 | else:
142 | parsed_result = raw_result
143 |
144 | if self.signature_return_type:
145 | try:
146 | return parse_obj_as(self.signature_return_type, parsed_result)
147 | except ValidationError:
148 | raise OutputParsingError(
149 | f"\nFailed parsing result into type:\n"
150 | f"{self.signature_return_type}\n"
151 | "----------------------------------\n"
152 | f"raw_result:\n"
153 | f"{raw_result}"
154 | )
155 | else:
156 | return parsed_result
157 |
--------------------------------------------------------------------------------
/docs/best-practices/index.md:
--------------------------------------------------------------------------------
1 | # Best practices
2 |
3 | Prompt engineering is no simple task and there are various things to consider when creating a prompt.
4 | In this page we provide our view and understanding of the best practices for prompt engineering.
5 | These will help you create reliably performing tasks and chatbots that won't surprise you when deploying in production.
6 |
7 | !!! warning
8 |
9 | While this guide will should help in creating reliable prompts for most cases, it is still possible that the model will not generate the desired output.
10 | For this reason we strongly recommend you test your tasks and bots on various inputs before deploying to production.
11 | You can acheive this by writing integration tests or using our provided `evals` library to discover which models and wich
12 | versions perform best for your specific use case.
13 |
14 |
15 | ### Explicit is better than implicit
16 |
17 | When creating a prompt, it is important to be as explicit as possible.
18 | Declarai provide various interfaces to provide context and guidance to the model.
19 |
20 | Reviewing the movie recommender example from the beginner's guide, we can see a collection of techniques to provide context to the model:
21 | ```python
22 | from typing import Dict
23 | import declarai
24 |
25 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
26 | @gpt_35.task
27 | def movie_recommender(user_input: str) -> Dict[str, str]:
28 | """
29 | Recommend a selection of movies to watch based on the user input
30 | For each movie provide a short description as well
31 | :param user_input: The user's input
32 | :return: A dictionary of movie names and descriptions
33 | """
34 | ```
35 |
36 | **Using type annotations** in the input and output create predictability in software and enforce a strict interface with the model.
37 | The types are read and enforced by Declarai at runtime so that a produced result of the wrong type will raise an error instead of
38 | returned and causing unexpected behavior down the line.
39 |
40 | **Docstrings** are used to provide context to the model and to the user.
41 |
42 | - **Task description** - The first part of the docstring is the task itself, make sure to address the expected inputs and how to use them
43 | You can implement various popular techniques into the prompt such as `few-shot`, which means providing example inputs and outputs for the model to learn from.
44 |
45 | - **Param descriptions** - Explaining the meaning of the input parameters helps the model better perform with the provided inputs.
46 | For example. when passing an argument called `input`, if you know that the expected input will be an email, or user message, it is best to explain this to the model.
47 |
48 | - **Return description** - While typing are a great base layer for declaring the expected output,
49 | explaining the exact structure and logic behind this structure will help the model better perform.
50 | For example, given a return type of `Dict[str, str]`, explaining that this object will contain a mapping of movie names to their respective description
51 | will help to model properly populate the resulting object.
52 |
53 | ### Language consistency and ambiguity
54 |
55 | When providing prompts to the model, it is best practice to use language that correlates with the expected input and output.
56 | For example, in the following, the prompt is written in single form, while the resulting output is in plural form. (i.e. a list)
57 | ```python
58 | from typing import List
59 | import declarai
60 |
61 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
62 |
63 | @gpt_35.task
64 | def movie_recommender(user_input: str) -> List[str]:
65 | """
66 | Recommend a movie to watch based on the user input
67 | :param user_input: The user's input
68 | :return: Recommended movie
69 | """
70 | ```
71 | This may easily confuse the model and cause it to produce unexpected results which will fail when parsing the results.
72 | Instead, we could write the prompt as follows:
73 | ```python
74 | from typing import List
75 | import declarai
76 |
77 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
78 | @gpt_35.task
79 | def movie_recommender(user_input: str) -> List[str]:
80 | """
81 | Recommend a selection of movies to watch based on the user input
82 | :param user_input: The user's input
83 | :return: A list of recommended movies
84 | """
85 | ```
86 | This way it is clear to the model that we are expecting a list of movies and not a single movie.
87 |
88 |
89 | ### Falling back to string
90 |
91 | In some cases, you might be working on a task or chat that has a mixture of behaviors that may not be consistent.
92 | For example in this implementation of a calculator bot, the bot usually returns numbers, but for the scenario that an error occurs, it returns a string.
93 | ```python
94 | from typing import Union
95 | import declarai
96 |
97 | gpt_35 = declarai.openai(model="gpt-3.5-turbo")
98 |
99 | @gpt_35.experimental.chat
100 | class CalculatorBot:
101 | """
102 | You a calculator bot,
103 | given a request, you will return the result of the calculation
104 | If you have a problem with the provided input, you should return an error explaining the problem.
105 | For example, for the input: "1 + a" where 'a' is unknown to you, you should return: "Unknown symbol 'a'"
106 | """
107 | def send(self, message: str) -> Union[str, int]:
108 | ...
109 | ```
110 | When using the created bot it should look like this:
111 | ```python
112 | calc_bot = CalculatorBot()
113 | print(calc_bot.send(message="1 + 3"))
114 | #> 4
115 | print(calc_bot.send(message="34 * b"))
116 | #> Unknown symbol 'b'
117 | ```
118 | This way, instead of raising an error, the bot returns a string that explains the problem and allows the user to recover from the 'broken' state.
119 |
--------------------------------------------------------------------------------