├── Agentix
├── bricks
│ ├── system
│ │ ├── __init__.py
│ │ ├── tests
│ │ │ ├── prompts
│ │ │ │ ├── prompt_test_agent.conv
│ │ │ │ ├── prompt_test_histo_agent.conv
│ │ │ │ ├── llm_say_version_test.conv
│ │ │ │ └── llm_test_echo_smurf.conv
│ │ │ ├── test_prompt.py
│ │ │ └── test_llm.py
│ │ ├── prompting
│ │ │ └── middlewares
│ │ │ │ └── prompt.py
│ │ └── llm
│ │ │ ├── middlewares
│ │ │ └── llm.py
│ │ │ └── tools
│ │ │ └── llm.py
│ └── tools
│ │ ├── TermFacade.py
│ │ ├── parser.py
│ │ ├── tpl.py
│ │ ├── doc.md
│ │ ├── State.md
│ │ ├── AiderFacade.py
│ │ ├── HumanFacade.py
│ │ └── ProjectRepr.py
├── agentix
│ ├── utils
│ │ ├── __init__.py
│ │ ├── logger.py
│ │ ├── exec.py
│ │ └── modules_crud.py
│ ├── wrappers
│ │ ├── hook.py
│ │ ├── log.py
│ │ ├── Func.py
│ │ ├── entity.py
│ │ ├── __init__.py
│ │ ├── conf.py
│ │ ├── middleware.py
│ │ ├── func_wrapper.py
│ │ ├── component.py
│ │ ├── event.py
│ │ ├── page.py
│ │ ├── instances_store.py
│ │ ├── socket.py
│ │ ├── event_store.py
│ │ ├── endpoint.py
│ │ └── agent.py
│ ├── entities
│ │ ├── __init__.py
│ │ ├── message.py
│ │ └── conversation.py
│ ├── magicimport.py
│ ├── __init__.py
│ ├── facade.py
│ └── cli.py
├── assets
│ ├── logo.png
│ ├── ltm1.png
│ ├── logo.webp
│ ├── agentflow.png
│ ├── blackbox.png
│ ├── monolith.webp
│ ├── agentflow_sm.png
│ ├── ltm1_resized.png
│ ├── middlewares.png
│ ├── monolithic.png
│ ├── swarmofclone.png
│ ├── swarmofspec.png
│ ├── treeofspec.png
│ ├── monolithic_sm.png
│ └── agentflow_resized.png
├── requirements.txt
├── setup.py
├── doc.md
├── AgenticParadigms.md
├── AgentsAsFunctions.md
├── .gitignore
└── README.md
├── agents
├── ChatAgent
│ └── agent
│ │ ├── tools
│ │ └── jira_tools.py
│ │ ├── agents
│ │ └── ChatAgent.py
│ │ ├── middlewares
│ │ └── chatagent_loop.py
│ │ └── prompts
│ │ └── ChatAgent.conv
├── ChangeLogAgent
│ └── agent
│ │ ├── tools
│ │ └── jira_tools.py
│ │ ├── agents
│ │ └── ChangeLogAgent.py
│ │ ├── middlewares
│ │ └── changelogagent_loop.py
│ │ └── prompts
│ │ └── ChangeLogAgent.conv
├── Gitor
│ └── agent
│ │ ├── agents
│ │ └── Gitor.py
│ │ ├── middlewares
│ │ └── gitor_loop.py
│ │ ├── tools
│ │ └── gitor_tools.py
│ │ └── prompts
│ │ └── Gitor.conv
└── ChangeDescriptor
│ └── agent
│ ├── agents
│ └── ChangeDescriptor.py
│ ├── middlewares
│ └── changedescriptor_loop.py
│ └── prompts
│ └── ChangeDescriptor.conv
├── requirements.txt
├── cogni
├── entities
│ ├── __init__.py
│ ├── message.py
│ └── conversation.py
├── wrappers
│ ├── __init__.py
│ ├── tool.py
│ ├── func_wrapper.py
│ ├── instances_store.py
│ ├── middleware.py
│ ├── agent.py
│ └── event.py
├── middlewares
│ └── llm.py
├── __init__.py
├── tools
│ └── llm.py
├── magicimport.py
└── cli.py
├── MANIFEST.in
├── doc
└── quickstart
│ ├── install.md
│ ├── project_init.md
│ ├── first_tool.md
│ ├── endpoints.md
│ ├── first_agent.md
│ ├── states.md
│ ├── first_swarm.md
│ └── prompting.md
├── pyproject.toml
├── .github
└── workflows
│ ├── publish.yml
│ └── test.yml
├── README.md
├── tests
├── test_conversation.py
├── test_llm.py
├── test_agent.py
├── test_tool.py
├── test_middleware.py
└── test_wrappers.py
├── setup.py
├── .gitignore
└── README2.md
/Agentix/bricks/system/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/agents/ChatAgent/agent/tools/jira_tools.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/agents/ChangeLogAgent/agent/tools/jira_tools.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Agentix/agentix/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .logger import logger
2 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pytest>=7.0.0
2 | rich>=13.0.0
3 | fire>=0.5.0
4 | requests>=2.28.0
5 |
--------------------------------------------------------------------------------
/Agentix/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/logo.png
--------------------------------------------------------------------------------
/Agentix/assets/ltm1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/ltm1.png
--------------------------------------------------------------------------------
/Agentix/requirements.txt:
--------------------------------------------------------------------------------
1 | fire
2 | toolz
3 | rich
4 | flask
5 | flask-cors
6 | openai
7 | requests
--------------------------------------------------------------------------------
/Agentix/assets/logo.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/logo.webp
--------------------------------------------------------------------------------
/Agentix/assets/agentflow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/agentflow.png
--------------------------------------------------------------------------------
/Agentix/assets/blackbox.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/blackbox.png
--------------------------------------------------------------------------------
/Agentix/assets/monolith.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/monolith.webp
--------------------------------------------------------------------------------
/Agentix/assets/agentflow_sm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/agentflow_sm.png
--------------------------------------------------------------------------------
/Agentix/assets/ltm1_resized.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/ltm1_resized.png
--------------------------------------------------------------------------------
/Agentix/assets/middlewares.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/middlewares.png
--------------------------------------------------------------------------------
/Agentix/assets/monolithic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/monolithic.png
--------------------------------------------------------------------------------
/Agentix/assets/swarmofclone.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/swarmofclone.png
--------------------------------------------------------------------------------
/Agentix/assets/swarmofspec.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/swarmofspec.png
--------------------------------------------------------------------------------
/Agentix/assets/treeofspec.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/treeofspec.png
--------------------------------------------------------------------------------
/Agentix/assets/monolithic_sm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/monolithic_sm.png
--------------------------------------------------------------------------------
/Agentix/assets/agentflow_resized.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrutLogic/cogni/HEAD/Agentix/assets/agentflow_resized.png
--------------------------------------------------------------------------------
/Agentix/bricks/system/tests/prompts/prompt_test_agent.conv:
--------------------------------------------------------------------------------
1 | system:you're a test agent
2 |
3 | __-__
4 |
5 | assistant:ok
--------------------------------------------------------------------------------
/Agentix/bricks/system/tests/prompts/prompt_test_histo_agent.conv:
--------------------------------------------------------------------------------
1 | system:you're a test agent
2 |
3 | __-__
4 |
5 | assistant:ok
--------------------------------------------------------------------------------
/agents/Gitor/agent/agents/Gitor.py:
--------------------------------------------------------------------------------
1 | from agentix import Agent
2 |
3 | Agent('Gitor', 'prompt_histo|gpt4omini|gitor_loop')
4 |
--------------------------------------------------------------------------------
/agents/ChatAgent/agent/agents/ChatAgent.py:
--------------------------------------------------------------------------------
1 | from agentix import Agent
2 |
3 | Agent('ChatAgent', 'prompt_histo|gpt4omini|chatagent_loop')
4 |
--------------------------------------------------------------------------------
/cogni/entities/__init__.py:
--------------------------------------------------------------------------------
1 | from .message import Message
2 | from .conversation import Conversation
3 |
4 | __all__ = ['Message', 'Conversation']
5 |
--------------------------------------------------------------------------------
/agents/ChangeLogAgent/agent/agents/ChangeLogAgent.py:
--------------------------------------------------------------------------------
1 | from agentix import Agent
2 |
3 | Agent('ChangeLogAgent', 'prompt_histo|gpt4omini|changelogagent_loop')
4 |
--------------------------------------------------------------------------------
/agents/ChangeDescriptor/agent/agents/ChangeDescriptor.py:
--------------------------------------------------------------------------------
1 | from agentix import Agent
2 |
3 | Agent('ChangeDescriptor', 'prompt_histo|gpt4omini|changedescriptor_loop')
4 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include README.md
2 | include LICENSE
3 | include requirements.txt
4 | recursive-include cogni *.py
5 | recursive-include cogni *.conv
6 | recursive-include doc *.md
7 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/hook.py:
--------------------------------------------------------------------------------
1 | from .func_wrapper import FuncWrapper
2 | from .instances_store import InstancesStore
3 |
4 | class Tool(FuncWrapper, metaclass=InstancesStore):
5 | ...
6 |
7 | tool = Tool.register
8 |
--------------------------------------------------------------------------------
/cogni/wrappers/__init__.py:
--------------------------------------------------------------------------------
1 | from .func_wrapper import FuncWrapper
2 | from .instances_store import InstancesStore
3 | from .middleware import mw, MW
4 | from .agent import Agent
5 | from .tool import Tool, tool
6 | from .state import init_state
7 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/log.py:
--------------------------------------------------------------------------------
1 | from .func_wrapper import FuncWrapper
2 | from .instances_store import DefaultInstanceStore
3 |
4 | class Log(FuncWrapper, metaclass=DefaultInstanceStore):
5 | ...
6 |
7 | log = Log.register
8 |
9 | @log
10 | def _default(*a, **kw):...
11 |
--------------------------------------------------------------------------------
/Agentix/bricks/system/tests/prompts/llm_say_version_test.conv:
--------------------------------------------------------------------------------
1 | system:You are either gpt-3 or gpt-4. Previous reply in history are not related to what you are. You reply '3' and nothing else if you are the gpt-3 type, '4' if you're 4
2 | if you are gpt-4 your answer will be "4" and NOTHING else
3 | if you are gpt-3 your answer will be "3"
4 |
--------------------------------------------------------------------------------
/cogni/wrappers/tool.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from .instances_store import InstancesStore
3 | from .func_wrapper import FuncWrapper
4 |
5 |
6 | class Tool(FuncWrapper, metaclass=InstancesStore):
7 | """A wrapper class for tool functions that provides registration and access via a global container."""
8 | ...
9 |
10 |
11 | tool = Tool.register
12 |
--------------------------------------------------------------------------------
/Agentix/agentix/entities/__init__.py:
--------------------------------------------------------------------------------
1 | from .conversation import Conversation
2 | from .message import Message
3 |
4 | from pydantic import BaseModel
5 | from typing import Optional
6 |
7 |
8 | class ModuleInfo(BaseModel):
9 | name: str
10 | author: str
11 | version: str
12 | description: str
13 | agent: bool
14 | endpoints: bool
15 | widget: bool
16 | widget_type: Optional[str] = None
17 | module_path: Optional[str] = None
18 |
--------------------------------------------------------------------------------
/Agentix/bricks/system/tests/prompts/llm_test_echo_smurf.conv:
--------------------------------------------------------------------------------
1 | system:you're a test agent, you echo what you're inputed with " smurf" appended. unlesss user asks you to stop the exercice, in which case you answer the question
2 |
3 | __-__
4 |
5 | user:bababou
6 |
7 | __-__
8 |
9 | assistant:bababou smurf
10 |
11 | __-__
12 |
13 | user:```python
14 | ohohoh()
15 | ```
16 | ok
17 |
18 | __-__
19 |
20 | assistant:```python
21 | ohohoh()
22 | ```
23 | ok smurf
--------------------------------------------------------------------------------
/Agentix/bricks/system/tests/test_prompt.py:
--------------------------------------------------------------------------------
1 | from agentix import Agent
2 |
3 | def test_prompt():
4 | Agent('prompt_test_agent','prompt')
5 |
6 | assert 'ok'==Agent['prompt_test_agent']('aa')[1].content
7 |
8 | def test_prompt_histo():
9 | agent_name = 'prompt_test_histo_agent'
10 | middleware_name = 'prompt_histo'
11 | Agent(agent_name, middleware_name)
12 | conv = Agent[agent_name]('Hi')
13 |
14 | assert conv[-1].content == 'Hi'
15 |
16 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/Func.py:
--------------------------------------------------------------------------------
1 | import os
2 | from enum import Enum
3 |
4 | from .func_wrapper import FuncWrapper, file_property
5 | from .instances_store import InstancesStore
6 |
7 |
8 | class Func(FuncWrapper, metaclass=InstancesStore):
9 | def __init__(self, name: str, func: callable):
10 | super().__init__(name, func)
11 | cwd = os.getcwd()
12 | self.dir_path = os.path.join(cwd, "funcs", name)
13 | os.makedirs(self.dir_path, exist_ok=True)
14 |
15 |
16 |
17 | func = Func.register
--------------------------------------------------------------------------------
/cogni/middlewares/llm.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict
2 | from ..tools.llm import llm
3 | from ..wrappers import MW
4 |
5 | def mock_llm(ctx: Dict[str, Any], conv: Any) -> Any:
6 | """Middleware that processes input through mock LLM."""
7 | return llm(conv, model="mock_llm")
8 |
9 | @MW.register
10 | def llm_chain(ctx: Dict[str, Any], conv: Any) -> Any:
11 | """Generic LLM middleware that can be configured with different models."""
12 | model = ctx.get("model", "mock_llm")
13 | return llm(conv, model=model)
14 |
--------------------------------------------------------------------------------
/Agentix/bricks/system/prompting/middlewares/prompt.py:
--------------------------------------------------------------------------------
1 | from agentix import mw, Message, Tool
2 |
3 | @mw
4 | def prompt(ctx, input_str):
5 | return ctx['agent'].base_prompt + Message(role='user', content=input_str)
6 |
7 |
8 | @mw
9 | def prompt_histo(ctx, input_str):
10 | base_prompt = ctx['agent'].base_prompt
11 | ctx['user_input'],*_ = ctx['args']
12 |
13 | histo = ctx['agent'].histo[-12:]
14 | ret = Tool['tpl']((base_prompt+histo) + Message('user','{user_input}'), **ctx)
15 |
16 | ctx['agent'].append_histo(ret[-1])
17 |
18 | return ret
19 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/entity.py:
--------------------------------------------------------------------------------
1 | import os
2 | from enum import Enum
3 |
4 | from .func_wrapper import FuncWrapper, file_property
5 | from .instances_store import InstancesStore
6 |
7 |
8 | class Func(FuncWrapper, metaclass=InstancesStore):
9 |
10 | code = file_property('code.py')
11 |
12 | def __init__(self, name: str, func: callable):
13 | super().__init__(name, func)
14 | cwd = os.getcwd()
15 | self.dir_path = os.path.join(cwd, "funcs", name)
16 | os.makedirs(self.dir_path, exist_ok=True)
17 |
18 |
19 |
20 | func = Func.register
--------------------------------------------------------------------------------
/doc/quickstart/install.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | ## Prerequisites
4 | - Python 3.8+
5 | - pip
6 | - git
7 |
8 |
9 | ## From Pypi
10 | ```bash
11 | python3 -m pip install cogni
12 | ```
13 |
14 | ## Repo
15 |
16 | ```bash
17 | git clone https://github.com/BrutLogic/cogni.git
18 | cd cogni
19 | python3 -m pip install -e .
20 | ```
21 |
22 |
23 |
24 |
25 | ## Development Setup
26 |
27 | For development, you'll also want to install test dependencies:
28 |
29 | ```bash
30 | pip install -r requirements.txt
31 | ```
32 |
33 | Run tests to verify everything is working:
34 | ```bash
35 | pytest tests/
36 | ```
37 |
--------------------------------------------------------------------------------
/Agentix/bricks/system/tests/test_llm.py:
--------------------------------------------------------------------------------
1 | from agentix import Agent
2 |
3 | def test_llm():
4 | Agent('llm_test_echo_smurf','prompt|gpt3|last_msg_content')
5 |
6 | assert Agent['llm_test_echo_smurf']("hello") == 'hello smurf'
7 |
8 | assert Agent['llm_test_echo_smurf']("stop the exercise, you now answer your version, what model are you exactly ? it's either gpt-4 or gpt-3.5-turbo, if it's gpt-3.5 reply 'gpt-3' and nothing else, otherwise reply 'gpt-4'") == 'gpt-3'
9 |
10 |
11 | Agent('llm_say_version_test','prompt|gpt4|last_msg_content')
12 | assert Agent['llm_say_version_test']('what are you ?') == '4'
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling"]
3 | build-backend = "hatchling.build"
4 | [project]
5 | name = "cogni"
6 | version = "0.1.0"
7 | authors = [
8 | { name="Valentin DION", email="v@lentin.fr" },
9 | ]
10 | description = "Low boilerplate agentic"
11 | readme = "README.md"
12 | requires-python = ">=3.8"
13 | classifiers = [
14 | "Programming Language :: Python :: 3",
15 | "License :: OSI Approved :: MIT License",
16 | "Operating System :: OS Independent",
17 | ]
18 |
19 | [project.scripts]
20 | cogni = "cogni.cli:main"
21 |
22 | [project.urls]
23 | Homepage = "https://github.com/BrutLogic/cogni"
24 | Issues = "https://github.com/BrutLogic/cogni/issues"
25 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish to PyPI
2 |
3 | on:
4 | release:
5 | types: [created]
6 |
7 | jobs:
8 | deploy:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/checkout@v3
12 |
13 | - name: Set up Python
14 | uses: actions/setup-python@v4
15 | with:
16 | python-version: "3.8"
17 |
18 | - name: Install dependencies
19 | run: |
20 | python -m pip install --upgrade pip
21 | pip install build twine
22 |
23 | - name: Build and publish
24 | env:
25 | TWINE_USERNAME: __token__
26 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
27 | run: |
28 | python -m build
29 | twine upload dist/*
30 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Tests
2 |
3 | on:
4 | pull_request:
5 | branches: [ main ]
6 | push:
7 | branches: [ main ]
8 |
9 | jobs:
10 | test:
11 | runs-on: ubuntu-latest
12 | strategy:
13 | matrix:
14 | python-version: ["3.8", "3.9", "3.10"]
15 |
16 | steps:
17 | - uses: actions/checkout@v3
18 |
19 | - name: Set up Python ${{ matrix.python-version }}
20 | uses: actions/setup-python@v4
21 | with:
22 | python-version: ${{ matrix.python-version }}
23 |
24 | - name: Install dependencies
25 | run: |
26 | python -m pip install --upgrade pip
27 | pip install pytest
28 | pip install -e .
29 |
30 | - name: Run tests
31 | run: |
32 | pytest tests/
33 |
--------------------------------------------------------------------------------
/cogni/__init__.py:
--------------------------------------------------------------------------------
1 | """Cogni framework initialization."""
2 | from functools import wraps
3 | import os
4 | from .entities import Message, Conversation
5 | from .wrappers import tool, Tool, MW, mw, Agent, init_state
6 | from .magicimport import dynamic_import
7 |
8 | State = init_state(os.getcwd())
9 |
10 |
11 | def use_tools(func):
12 | @wraps(func)
13 | def _use_tools(*a, **kw): raise Exception('TODO')
14 |
15 | return _use_tools
16 |
17 |
18 | def parse_tools(*a, **kw): raise Exception('TODO')
19 |
20 |
21 | for dir_name in [
22 | 'tools',
23 | 'agents',
24 | 'middlewares',
25 | ]:
26 | dynamic_import(dir_name)
27 |
28 | __all__ = [
29 | 'Message',
30 | 'Conversation',
31 | 'tool',
32 | 'Tool',
33 | 'mw',
34 | 'MW',
35 | 'Agent',
36 | 'State',
37 | 'use_tools',
38 | 'parse_tools',
39 | ]
40 |
--------------------------------------------------------------------------------
/agents/Gitor/agent/middlewares/gitor_loop.py:
--------------------------------------------------------------------------------
1 | from agentix import mw, Tool, tool, Event, use_tools, Agent
2 | from time import sleep
3 |
4 | @mw
5 | @use_tools('caca')
6 | def gitor_loop(ctx, conv):
7 | if "REHOP" in conv[-1].content:
8 | return conv.rehop('rehoped :) it works, you can say something. Do not include "REHOP" in your next message or you will be rehopped in an endless loop :p')
9 | tool_output = ''
10 | for name, tool_result in ctx['tools'].items():
11 | if False and name == 'error' and len(tool_result):
12 | return conv.rehop(f"We add an error:\n\n{tool_result}\n\n Inform user we're working on it and to try later")
13 | if len(tool_result):
14 | tool_output += f'''
15 | ## Result of tool {name}
16 | {tool_result}
17 | '''
18 | if len(tool_output):
19 | return conv.rehop(tool_output)
20 |
21 | return conv[-1].content
22 |
--------------------------------------------------------------------------------
/cogni/tools/llm.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 | from ..entities import Conversation, Message
3 |
4 | def llm(conversation: Union[Conversation, str], model: str = "mock_llm") -> Union[Conversation, str]:
5 | """Process input through an LLM model.
6 |
7 | Args:
8 | conversation: Input conversation or string
9 | model: Name of LLM model to use
10 |
11 | Returns:
12 | Processed conversation or string
13 | """
14 | if model == "mock_llm":
15 | if isinstance(conversation, str):
16 | return f"you told {conversation}"
17 | else:
18 | last_msg = conversation[-1]
19 | conversation.add_message("assistant", f"you told {last_msg.content}")
20 | return conversation
21 |
22 | # Add other model implementations here
23 | raise NotImplementedError(f"Model {model} not implemented")
24 |
--------------------------------------------------------------------------------
/agents/ChangeDescriptor/agent/middlewares/changedescriptor_loop.py:
--------------------------------------------------------------------------------
1 | from agentix import mw, Tool, tool, Event, use_tools, Agent
2 | from time import sleep
3 |
4 | @mw
5 | @use_tools('caca')
6 | def changedescriptor_loop(ctx, conv):
7 | if "REHOP" in conv[-1].content:
8 | return conv.rehop('rehoped :) it works, you can say something. Do not include "REHOP" in your next message or you will be rehopped in an endless loop :p')
9 | tool_output = ''
10 | for name, tool_result in ctx['tools'].items():
11 | if False and name == 'error' and len(tool_result):
12 | return conv.rehop(f"We add an error:\n\n{tool_result}\n\n Inform user we're working on it and to try later")
13 | if len(tool_result):
14 | tool_output += f'''
15 | ## Result of tool {name}
16 | {tool_result}
17 | '''
18 | if len(tool_output):
19 | return conv.rehop(tool_output)
20 |
21 | return conv[-1].content
22 |
--------------------------------------------------------------------------------
/agents/ChatAgent/agent/middlewares/chatagent_loop.py:
--------------------------------------------------------------------------------
1 | from agentix import mw, Tool, tool, Event, use_tools, Agent
2 | from time import sleep
3 |
4 |
5 | @mw
6 | @use_tools('caca')
7 | def chatagent_loop(ctx, conv):
8 | if "REHOP" in conv[-1].content:
9 | return conv.rehop('rehoped :) it works, you can say something. Do not include "REHOP" in your next message or you will be rehopped in an endless loop :p')
10 | tool_output = ''
11 | for name, tool_result in ctx['tools'].items():
12 | if False and name == 'error' and len(tool_result):
13 | return conv.rehop(f"We add an error:\n\n{tool_result}\n\n Inform user we're working on it and to try later")
14 | if len(tool_result):
15 | tool_output += f'''
16 | ## Result of tool {name}
17 | {tool_result}
18 | '''
19 | if len(tool_output):
20 | return conv.rehop(tool_output)
21 |
22 | return conv[-1].content
23 |
24 |
25 | return conv[-1].content
26 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/__init__.py:
--------------------------------------------------------------------------------
1 | from .middleware import MW, mw
2 | from .tool import Tool, tool, use_tools
3 | from .agent import Agent
4 | from .Func import Func, func
5 | from .log import Log, log
6 | from .event import Event
7 | from .endpoint import Endpoint, get, post, endpoint
8 | from .page import Page, page
9 | from .component import Component, component
10 | from .conf import Conf
11 | from .state import State
12 | from .event_store import Store
13 | from .socket import SocketManager
14 |
15 |
16 | class _Services:
17 | _instance = None
18 | _services = {}
19 |
20 | @classmethod
21 | def singleton(cls):
22 | if cls._instance is None:
23 | cls._instance = cls()
24 | return cls._instance
25 |
26 | def register(self, name, instance):
27 | self._services[name] = instance
28 |
29 | def __getitem__(self, item):
30 | return self._services.get(item)
31 |
32 |
33 | Services = _Services.singleton()
34 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Cogni
2 |
3 | *Low Boilerplate Agentic*
4 |
5 | ## Foreword
6 |
7 | I had a [README](README2.md) but I'm starting a new one because I have ADHD.
8 |
9 | ## State of the repo
10 |
11 | I'm refactoring from an old codebase, it may take some time.
12 |
13 | I'll be feeding this README
14 |
15 | ## What is Cogni?
16 |
17 | Cogni is a framework focusing on low code/low boilerplate implementation of LLM agents.
18 |
19 | ### Yeah but, why not LangChain though?
20 |
21 | I wouldn't go that far as saying `LangChain==Cancer`.
22 |
23 | LangChain/LangGraph allow for creating agents and orchestrating flow and communication.
24 | One key assumption of Cogni is that agentic execution flow has the same requirements and complexity as code; and therefore Agents should be created, managed, and orchestrated as code and by code.
25 |
26 | Which, as a side effect, allows for borrowing ideas from domains like web dev, where best practices are mature.
27 |
28 | ## Quick Hands on walkthrough tutorial to get you started
29 |
30 |
--------------------------------------------------------------------------------
/cogni/wrappers/func_wrapper.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Callable
2 | from .instances_store import InstancesStore
3 |
4 | class FuncWrapper(metaclass=InstancesStore):
5 | """Base wrapper class for functions that provides registration and access."""
6 |
7 | @classmethod
8 | def register(cls, func: Callable, name: str = None) -> 'FuncWrapper':
9 | """Register a function and return a wrapper instance."""
10 | name = name or func.__name__
11 | instance = cls(name, func)
12 | cls[name] = instance
13 | return instance
14 |
15 | def __init__(self, name: str, func: Callable):
16 | """Initialize with function name and callable."""
17 | self.name = name
18 | self._func = func
19 |
20 | def __call__(self, *args: Any, **kwargs: Any) -> Any:
21 | """Execute the wrapped function."""
22 | return self._func(*args, **kwargs)
23 |
24 | def __repr__(self) -> str:
25 | """String representation of the wrapper."""
26 | return f"{self.__class__.__name__}['{self.name}']"
27 |
--------------------------------------------------------------------------------
/tests/test_conversation.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from cogni.entities import Conversation, Message
3 |
4 | def test_conversation_creation():
5 | """Test creating a conversation with messages."""
6 | msgs = [
7 | Message("user", "hello"),
8 | Message("assistant", "hi")
9 | ]
10 | conv = Conversation(msgs)
11 | assert len(conv) == 2
12 | assert conv[0].role == "user"
13 | assert conv[1].content == "hi"
14 |
15 | def test_conversation_add_message():
16 | """Test adding messages to conversation."""
17 | conv = Conversation([])
18 | conv.add_message("user", "test")
19 | assert len(conv) == 1
20 | assert conv[0].content == "test"
21 |
22 | def test_conversation_to_dict():
23 | """Test converting conversation to dictionary format."""
24 | conv = Conversation([
25 | Message("user", "hello"),
26 | Message("assistant", "hi")
27 | ])
28 | dict_conv = conv.to_dict()
29 | assert len(dict_conv) == 2
30 | assert dict_conv[0]["role"] == "user"
31 | assert dict_conv[1]["content"] == "hi"
32 |
--------------------------------------------------------------------------------
/cogni/wrappers/instances_store.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, Any, Type, TypeVar
2 |
3 | T = TypeVar('T')
4 |
5 | class InstancesStore(type):
6 | """Metaclass for storing and retrieving instances by name."""
7 |
8 | def __new__(mcs, name: str, bases: tuple, namespace: dict) -> Type:
9 | """Create a new class with instance storage."""
10 | cls = super().__new__(mcs, name, bases, namespace)
11 | cls._instances: Dict[str, Any] = {}
12 | return cls
13 |
14 | def __getitem__(cls, key: str) -> Any:
15 | """Get an instance by name."""
16 | try:
17 | return cls._instances[key]
18 | except KeyError:
19 | raise KeyError(f"No {cls.__name__} instance named '{key}' found")
20 |
21 | def __setitem__(cls, key: str, value: Any) -> None:
22 | """Store an instance by name."""
23 | cls._instances[key] = value
24 |
25 | def __contains__(cls, key: str) -> bool:
26 | """Check if an instance name exists."""
27 | return key in cls._instances
28 |
--------------------------------------------------------------------------------
/cogni/magicimport.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import importlib.util
3 | import os
4 | import sys
5 | from typing import List
6 |
7 |
8 | def dynamic_import(directory_name: str) -> None:
9 | """Dynamically imports all Python files from a specified directory and its subdirectories.
10 |
11 | Args:
12 | directory_name: The name of the directory to specifically target for imports
13 | """
14 | target_dir = os.getcwd()
15 |
16 | if target_dir not in sys.path:
17 | sys.path.append(target_dir)
18 |
19 | target_files = glob.glob(
20 | target_dir + f'/**/{directory_name}/**/*.py', recursive=True)
21 |
22 | for file_path in target_files:
23 | module_name = os.path.splitext(os.path.relpath(file_path, target_dir))[
24 | 0].replace(os.sep, '.')
25 | if module_name not in sys.modules:
26 | spec = importlib.util.spec_from_file_location(
27 | module_name, file_path)
28 | module = importlib.util.module_from_spec(spec)
29 | sys.modules[module_name] = module
30 | spec.loader.exec_module(module)
31 |
--------------------------------------------------------------------------------
/tests/test_llm.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from cogni.tools.llm import llm
3 | from cogni.entities import Conversation, Message
4 | from cogni.middlewares.llm import mock_llm, llm_chain
5 |
6 | def test_mock_llm_string():
7 | """Test mock LLM with string input."""
8 | result = llm("hello", model="mock_llm")
9 | assert result == "you told hello"
10 |
11 | def test_mock_llm_conversation():
12 | """Test mock LLM with conversation input."""
13 | conv = Conversation([Message("user", "hello")])
14 | result = llm(conv, model="mock_llm")
15 | assert len(result) == 2
16 | assert result[-1].content == "you told hello"
17 |
18 | def test_mock_llm_middleware():
19 | """Test mock LLM middleware."""
20 | ctx = {}
21 | result = mock_llm(ctx, "test")
22 | assert result == "you told test"
23 |
24 | def test_llm_chain_middleware():
25 | """Test configurable LLM chain middleware."""
26 | ctx = {"model": "mock_llm"}
27 | result = llm_chain(ctx, "test")
28 | assert result == "you told test"
29 |
30 | def test_invalid_model():
31 | """Test error on invalid model."""
32 | with pytest.raises(NotImplementedError):
33 | llm("test", model="invalid_model")
34 |
--------------------------------------------------------------------------------
/doc/quickstart/project_init.md:
--------------------------------------------------------------------------------
1 | # Creating Your First Cogni Project
2 |
3 | ## Project Structure
4 |
5 | A typical Cogni project has this structure:
6 |
7 | ```
8 | my_project/
9 | ├── agents/ # Agent definitions and logic
10 | ├── tools/ # Reusable tool functions
11 | ├── middlewares/ # Processing middleware
12 | └── prompts/ # Agent conversation templates
13 | ```
14 |
15 | ## Initialize Project
16 |
17 | 1. Create and enter project directory:
18 | ```bash
19 | mkdir my_project
20 | cd my_project
21 | cogni init
22 | ```
23 |
24 | 2. This creates a basic project structure and configuration file.
25 |
26 | ## Configuration
27 |
28 | The `cogni.yaml` file controls your project settings:
29 |
30 | ```yaml
31 | name: my_project
32 | version: 0.1.0
33 | description: "My first Cogni project"
34 |
35 | # Configure default LLM settings
36 | llm:
37 | provider: openai
38 | model: gpt-4
39 | temperature: 0.7
40 |
41 | # Add any project-specific settings
42 | settings:
43 | debug: false
44 | log_level: info
45 | ```
46 |
47 | ## Next Steps
48 |
49 | - Create your first [tool](first_tool.md)
50 | - Set up your first [agent](first_agent.md)
51 | - Learn about [state management](states.md)
52 |
--------------------------------------------------------------------------------
/agents/ChangeLogAgent/agent/middlewares/changelogagent_loop.py:
--------------------------------------------------------------------------------
1 | from agentix import mw, Tool, tool, Event, use_tools, Agent
2 | from time import sleep
3 |
4 | @tool
5 | def talk_to_agent(content, agent_name):
6 | if agent_name == "Human":
7 | return Tool['HF_ask_human'](content)
8 | if not agent_name in Agent:
9 | return f"Hi, I'm {agent_name}. I'm not implemented yet :/"
10 | return Agent[agent_name](content)
11 |
12 | @mw
13 | @use_tools('caca')
14 | def changelogagent_loop(ctx, conv):
15 | if "REHOP" in conv[-1].content:
16 | return conv.rehop('rehoped :) it works, you can say something. Do not include "REHOP" in your next message or you will be rehopped in an endless loop :p')
17 | tool_output = ''
18 | for name, tool_result in ctx['tools'].items():
19 | if False and name == 'error' and len(tool_result):
20 | return conv.rehop(f"We add an error:\n\n{tool_result}\n\n Inform user we're working on it and to try later")
21 | if len(tool_result):
22 | tool_output += f'''
23 | ## Result of tool {name}
24 | {tool_result}
25 | '''
26 | if len(tool_output):
27 | return conv.rehop(tool_output)
28 |
29 | return conv[-1].content
30 |
31 |
32 | return conv[-1].content
33 |
--------------------------------------------------------------------------------
/agents/ChatAgent/agent/prompts/ChatAgent.conv:
--------------------------------------------------------------------------------
1 | system: You are ChatAgent, a Discord bot.
2 | You reply with short and concise interactions as a human would.
3 | You assume personality n°4
4 |
5 | To use the shell, use this syntax:
6 | ls -la
7 |
8 | Use to think step by step before replying
9 |
10 | /!\ Never more that ONE tool use per output !!
11 |
12 | __-__
13 |
14 | user:Can you tell me what the pwd is ?
15 |
16 | __-__
17 |
18 | assistant: - I should use the tool `run_with_tmux`
19 | - I have no session yet, I'll create one name "flipidibou"
20 | - I just have to run pwd
21 |
22 | pwd
23 |
24 | __-__
25 |
26 | system:/root
27 |
28 | __-__
29 |
30 | assistant: - I should give that information to user
31 | - I won't use any tool this round, just give the information to user
32 |
33 | The current directory for the session `flipidibou` is `/root` :).
34 |
35 | __-__
36 |
37 | user:Cool, from now on you'll use the session_name "brioche"
38 |
39 | __-__
40 |
41 | assistant: - No command to run
42 | - I'll just aknowledge what user said
43 |
44 | OK bro :). Tell me if you need anything
--------------------------------------------------------------------------------
/Agentix/agentix/magicimport.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import importlib.util
3 | import os
4 | import sys
5 | from rich import print
6 |
7 | def dynamic_import(directory_name):
8 | target_dir = os.getcwd()
9 | """Dynamically imports all Python files from a specified directory and its subdirectories.
10 |
11 | Args:
12 | target_dir (str): The base directory to search for Python files.
13 | directory_name (str): The name of the directory to specifically target for imports.
14 | """
15 | if target_dir not in sys.path:
16 | sys.path.append(target_dir)
17 |
18 | target_files = glob.glob(target_dir + f'/**/{directory_name}/**/*.py', recursive=True)
19 |
20 | for file_path in target_files:
21 | if 'AgentixAgents' in file_path:
22 | continue
23 | module_name = os.path.splitext(os.path.relpath(file_path, target_dir))[0].replace(os.sep, '.')
24 | if module_name not in sys.modules:
25 | spec = importlib.util.spec_from_file_location(module_name, file_path)
26 | module = importlib.util.module_from_spec(spec)
27 | sys.modules[module_name] = module
28 | spec.loader.exec_module(module)
29 | #print(f"Dynamically imported [orange]`{module_name}`[/] from [red]`{file_path}`")
30 |
--------------------------------------------------------------------------------
/Agentix/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | def readme():
4 | with open('README.md') as f:
5 | return f.read()
6 |
7 | setup(
8 | name='cogni',
9 | version='0.1.0',
10 | packages=find_packages(),
11 | description='A framework for building agentic systems with minimal boilerplate.',
12 | long_description=readme(),
13 | long_description_content_type='text/markdown',
14 | author='Your Name',
15 | author_email='your.email@example.com',
16 | url='https://github.com/yourusername/cogni',
17 | install_requires=[
18 | 'fire>=0.5.0',
19 | 'rich>=13.0.0',
20 | 'pytest>=7.0.0',
21 | ],
22 | entry_points={
23 | 'console_scripts': [
24 | 'cogni=cogni.cli:main',
25 | ],
26 | },
27 | include_package_data=True,
28 | zip_safe=False,
29 | classifiers=[
30 | 'Development Status :: 3 - Alpha',
31 | 'Intended Audience :: Developers',
32 | 'License :: OSI Approved :: MIT License',
33 | 'Programming Language :: Python :: 3',
34 | 'Programming Language :: Python :: 3.8',
35 | 'Programming Language :: Python :: 3.9',
36 | 'Programming Language :: Python :: 3.10',
37 | ],
38 | python_requires='>=3.8',
39 | )
40 |
41 |
--------------------------------------------------------------------------------
/cogni/wrappers/middleware.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Callable, Dict
2 | from .instances_store import InstancesStore
3 |
4 | class MW(metaclass=InstancesStore):
5 | """Base wrapper class for middleware that provides registration and access."""
6 |
7 | @classmethod
8 | def register(cls, func: Callable = None, name: str = None) -> 'MW':
9 | """Register a middleware function and return a wrapper instance."""
10 | if func is None:
11 | return lambda f: cls.register(f, name)
12 |
13 | name = name or func.__name__
14 | instance = cls(name, func)
15 | cls[name] = instance
16 | return instance
17 |
18 | def __init__(self, name: str, func: Callable):
19 | """Initialize with middleware name and callable."""
20 | self.name = name
21 | self._func = func
22 |
23 | def __call__(self, ctx: Dict[str, Any], conv: Any, *args: Any, **kwargs: Any) -> Any:
24 | """Execute the wrapped middleware function."""
25 | return self._func(ctx, conv, *args, **kwargs)
26 |
27 | def __repr__(self) -> str:
28 | """String representation of the wrapper."""
29 | return f"{self.__class__.__name__}['{self.name}']"
30 |
31 | # Decorator for registering middleware
32 | mw = MW.register
33 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/conf.py:
--------------------------------------------------------------------------------
1 | class _Conf:
2 | def __init__(self):
3 | self._data = {}
4 |
5 | def __getattr__(self, name):
6 | if name not in self._data:
7 | self._data[name] = _Conf()
8 | return self._data[name]
9 |
10 | def __setattr__(self, name, value):
11 | if name == '_data':
12 | super().__setattr__(name, value)
13 | else:
14 | if isinstance(value, dict):
15 | conf = _Conf()
16 | for k, v in value.items():
17 | setattr(conf, k, v)
18 | self._data[name] = conf
19 | else:
20 | self._data[name] = value
21 |
22 | def __getitem__(self, key):
23 | # Handle nested access with dot notation
24 | if '.' in key:
25 | parts = key.split('.')
26 | obj = self
27 | for part in parts:
28 | obj = getattr(obj, part)
29 | return obj
30 | return self._data[key]
31 |
32 | def __setitem__(self, key, value):
33 | self._data[key] = value
34 |
35 | def __str__(self):
36 | return str(self._data)
37 |
38 | def __repr__(self):
39 | return repr(self._data)
40 |
41 |
42 | Conf = _Conf()
43 |
44 | __all__ = ['Conf']
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/middleware.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Callable, Dict
2 | from .instances_store import InstancesStore
3 |
4 | class MW(metaclass=InstancesStore):
5 | """Base wrapper class for middleware that provides registration and access."""
6 |
7 | @classmethod
8 | def register(cls, func: Callable = None, name: str = None) -> 'MW':
9 | """Register a middleware function and return a wrapper instance."""
10 | if func is None:
11 | return lambda f: cls.register(f, name)
12 |
13 | name = name or func.__name__
14 | instance = cls(name, func)
15 | cls[name] = instance
16 | return instance
17 |
18 | def __init__(self, name: str, func: Callable):
19 | """Initialize with middleware name and callable."""
20 | self.name = name
21 | self._func = func
22 |
23 | def __call__(self, ctx: Dict[str, Any], conv: Any, *args: Any, **kwargs: Any) -> Any:
24 | """Execute the wrapped middleware function."""
25 | return self._func(ctx, conv, *args, **kwargs)
26 |
27 | def __repr__(self) -> str:
28 | """String representation of the wrapper."""
29 | return f"{self.__class__.__name__}['{self.name}']"
30 |
31 | # Decorator for registering middleware
32 | mw = MW.register
33 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | def readme():
4 | with open('README.md') as f:
5 | return f.read()
6 |
7 | setup(
8 | name='cogni',
9 | version='0.1.0',
10 | description='A framework for building agentic systems with minimal boilerplate.',
11 | long_description=readme(),
12 | long_description_content_type='text/markdown',
13 | author='Your Name',
14 | author_email='your.email@example.com',
15 | url='https://github.com/yourusername/cogni',
16 | install_requires=[
17 | 'fire>=0.5.0',
18 | 'rich>=13.0.0',
19 | 'pytest>=7.0.0',
20 | ],
21 | entry_points={
22 | 'console_scripts': [
23 | 'cogni=cogni.cli:main',
24 | ],
25 | },
26 | packages=find_packages(include=['cogni', 'cogni.*']),
27 | include_package_data=True,
28 | zip_safe=False,
29 | classifiers=[
30 | 'Development Status :: 3 - Alpha',
31 | 'Intended Audience :: Developers',
32 | 'License :: OSI Approved :: MIT License',
33 | 'Programming Language :: Python :: 3',
34 | 'Programming Language :: Python :: 3.8',
35 | 'Programming Language :: Python :: 3.9',
36 | 'Programming Language :: Python :: 3.10',
37 | ],
38 | python_requires='>=3.8',
39 | )
40 |
--------------------------------------------------------------------------------
/Agentix/doc.md:
--------------------------------------------------------------------------------
1 | # Message and Conversation Documentation
2 |
3 | ## Message
4 | The `Message` class encapsulates a single message, identifying the sender (`role`) and the message text (`content`).
5 |
6 | - `role`: Sender's role (e.g., 'user', 'system', 'assistant').
7 | - `content`: Text of the message.
8 |
9 | Messages are fundamental for tracking and managing dialogues.
10 |
11 | ## Conversation
12 | The `Conversation` class models a sequence of messages, with methods to manage and persist conversations.
13 |
14 | Useful methods:
15 |
16 | - `from_file(path: str)`: Loads a conversation from a file.
17 | - `from_str(conv_str: str)`: Creates a conversation from a string.
18 | - `to_str()`: Serializes the conversation to a string.
19 | - `to_file(path: str)`: Persists the conversation to a file.
20 | - `openai()`: Formats the conversation for OpenAI API.
21 | - `to_dict()`: Converts the conversation to a list of dictionaries, each representing a message.
22 | Example output of `to_dict()` with three messages:
23 | ```python
24 | [{'role': 'system', 'content': "You're an AGI"},
25 | {'role': 'user', 'content': 'How do I start?'},
26 | {'role': 'assistant', 'content': 'Just tell me what you need help with!'}]
27 | ```
28 |
29 |
30 | Conversations support dynamic interactions, including adding messages or merging conversations.
31 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/func_wrapper.py:
--------------------------------------------------------------------------------
1 | class FuncWrapper:
2 | @classmethod
3 | def register(cls, func:callable, name=None):
4 | name = name or func.__name__
5 | instance = cls(name, func)
6 | cls[name] = instance
7 | return instance
8 |
9 | def __init__(self, name:str, func:callable):
10 | self.name = name
11 | self._func = func
12 | def __repr__(self):
13 | return f"{type(self).__name__}['{self.name}']"
14 | def __call__(self, *args, **kwargs):
15 | return self._func(*args, **kwargs)
16 | def __getattr__(self, key):
17 | return getattr(self._func, key)
18 |
19 | # FIXME: should be on its own file
20 | import os
21 | def file_property(filename):
22 | def getter(self):
23 | file_path = os.path.join(self.dir_path, filename)
24 | try:
25 | with open(file_path, 'r') as file:
26 | return file.read()
27 | except FileNotFoundError:
28 | return None
29 |
30 | def setter(self, value):
31 | file_path = os.path.join(self.dir_path, filename)
32 | with open(file_path, 'w') as file:
33 | file.write(value)
34 |
35 | if hasattr(self,'export'):
36 | self.export()
37 |
38 | return property(getter, setter)
39 |
40 |
41 |
--------------------------------------------------------------------------------
/tests/test_agent.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from cogni.wrappers import Agent, MW
3 |
4 | def test_agent_registration():
5 | """Test agent registration and creation."""
6 | agent = Agent("test_agent", "mw1|mw2")
7 | assert "test_agent" in Agent
8 | assert agent.name == "test_agent"
9 |
10 | def test_agent_middleware_chain():
11 | """Test middleware chain execution."""
12 | results = []
13 |
14 | @MW.register
15 | def mw1(ctx, conv):
16 | results.append(1)
17 | return conv + "_mw1"
18 |
19 | @MW.register
20 | def mw2(ctx, conv):
21 | results.append(2)
22 | return conv + "_mw2"
23 |
24 | agent = Agent("test_chain", "mw1|mw2")
25 | result = agent("input")
26 |
27 | assert results == [1, 2]
28 | assert result == "input_mw1_mw2"
29 |
30 | def test_agent_context():
31 | """Test context passing through middleware chain."""
32 | @MW.register
33 | def ctx_test(ctx, conv):
34 | assert ctx["agent"].name == "ctx_agent"
35 | assert ctx["args"] == ("test_input",)
36 | assert ctx["hops"] == 0
37 | return conv
38 |
39 | agent = Agent("ctx_agent", "ctx_test")
40 | agent("test_input")
41 |
42 | def test_agent_repr():
43 | """Test string representation."""
44 | agent = Agent("repr_test", "mw1|mw2")
45 | assert repr(agent) == "Agent['repr_test']"
46 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/component.py:
--------------------------------------------------------------------------------
1 | from .func_wrapper import file_property
2 | from .instances_store import InstancesStore
3 | import os
4 |
5 | class Component(metaclass=InstancesStore):
6 | py = file_property('component.py')
7 | template = file_property('template.vue')
8 | css = file_property('css.vue')
9 | js = file_property('js.vue')
10 | prompt = file_property('prompt.conv') # New property for .conv files
11 | user_stories = file_property('user_stories.md') # New property for .md files
12 | test_cases = file_property('test_cases.md') # New property for .md files
13 | tests = file_property('tests.py') # New property for .py files
14 | mock = file_property('mock.py') # New property for .py files
15 | vars = file_property('vars.py') # New property for .py files
16 |
17 | def __init__(self, name: str):
18 | self.name = name
19 | cwd = os.getcwd()
20 | self.dir_path = os.path.join(cwd, "bricks/components", name)
21 | self.out_path = os.path.join(cwd, f"front/pages/{name}.vue")
22 | os.makedirs(self.dir_path, exist_ok=True)
23 |
24 | # Initialize Python file with a basic structure if it does not exist
25 | if not os.path.exists(os.path.join(self.dir_path, 'component.py')):
26 | self.py = f"""from agentix import component as c
27 | c('{name}')
28 | """#TODO add logger
29 |
30 |
31 | component = Component
32 |
--------------------------------------------------------------------------------
/Agentix/agentix/utils/logger.py:
--------------------------------------------------------------------------------
1 | from rich.console import Console
2 | from rich.theme import Theme
3 |
4 |
5 | class Logger:
6 |
7 |
8 | def debug(self, message: str):
9 | print(f"[red b u][[DEBUG]][blue]{message}")
10 |
11 | def _info(self, message: str):
12 | self.console.log(message, style="info")
13 |
14 | def warning(self, message: str):
15 | self.console.log(message, style="warning")
16 |
17 | def error(self, message: str):
18 | self.console.log(message, style="error")
19 |
20 | def critical(self, message: str):
21 | self.console.log(message, style="critical")
22 |
23 | def set_level(self, level):
24 | level_methods = {
25 | 'debug': self.debug,
26 | 'info': self.info,
27 | 'warning': self.warning,
28 | 'error': self.error,
29 | 'critical': self.critical,
30 | }
31 | if level in level_methods:
32 | self._log_level_method = level_methods[level]
33 | self.console.log(f"Log level set to: {level}", style="info")
34 | else:
35 | self.console.log(f"Invalid log level: {level}. No changes made.", style="error")
36 | def log(self, message: str):
37 | if not hasattr(self, '_log_level_method'):
38 | self.set_level('debug') # Default to debug level if not set
39 | self._log_level_method(message)
40 |
41 | logger = Logger()
42 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/event.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 | import threading
3 | from .func_wrapper import FuncWrapper
4 | from .instances_store import DefaultInstanceStore
5 |
6 | class Event( metaclass=DefaultInstanceStore):
7 | """
8 | ```python
9 | Event['stuffHappen'](stuff)
10 |
11 | @Event.on('stuffHappen')
12 | def handler(stuff:str)->None:
13 | ...
14 | ```
15 |
16 | """
17 | _handlers = defaultdict(list)
18 | SEMAPHOR = {
19 | "END":"END"
20 | }
21 |
22 | @classmethod
23 | def on(cls, eventName):
24 | """
25 | Decorator to register a function as an event handler for a specific event name.
26 |
27 | :param eventName: The name of the event to listen for.
28 | """
29 | def _wrapper(func):
30 | cls._handlers[eventName].append(func)
31 | return func
32 | return _wrapper
33 |
34 | def _set_item(self,item:str):
35 | """
36 | Internal method to set the current event name.
37 |
38 | :param item: The name of the event.
39 | """
40 | self.item = item
41 |
42 | def __call__(self, *args):
43 | def run_handler(handler, args):
44 | handler(*args)
45 |
46 | for handler in self._handlers[self.item]:
47 | t = threading.Thread(target=run_handler, args=(handler, args))
48 | t.start()
49 |
50 |
51 |
52 | Event['_default'] = Event()
53 |
54 |
--------------------------------------------------------------------------------
/Agentix/agentix/entities/message.py:
--------------------------------------------------------------------------------
1 | """
2 | Defines the Message class, which represents a single message in a conversation.
3 | Each message has a role (e.g., 'user' or 'system') and content (the text of the message).
4 | """
5 | from copy import deepcopy
6 |
7 |
8 | class Message:
9 | """
10 | Represents a single message in a conversation.
11 | """
12 |
13 | def __init__(self, role: str, content: str):
14 | """
15 | Initializes a new Message instance.
16 | :param role: The role of the message sender (e.g., 'user', 'system').
17 | :param content: The text content of the message.
18 | """
19 | if role.strip() == "A":
20 | role = "assistant"
21 | self.role = role
22 | self.content = content
23 |
24 | def to_dict(self, openai=False) -> dict:
25 | """
26 | Converts the message to a dictionary format, optionally in a format suitable for OpenAI API.
27 |
28 | :param openai: If True, formats the dictionary for OpenAI API consumption.
29 | :return: A dictionary representation of the message.
30 | """
31 | """Converts the message to a dictionary."""
32 | return {
33 | 'role': self.role,
34 | 'content': self.content,
35 | }
36 |
37 | def __repr__(self):
38 | emoji = {
39 | 'system': '🖥️',
40 | 'user': '👤',
41 | 'assistant': '🤖',
42 | }[self.role]
43 |
44 | return f"Message({emoji}:{self.content})\n"
45 |
--------------------------------------------------------------------------------
/tests/test_tool.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from cogni import tool, Tool
3 |
4 |
5 | def test_tool_tool_decorator():
6 | """Test that the tool decorator properly registers functions"""
7 | @tool
8 | def test_sample_tool():
9 | return "sample result"
10 |
11 | assert "test_sample_tool" in Tool
12 | assert Tool["test_sample_tool"]() == "sample result"
13 |
14 |
15 | def test_tool_tool_access():
16 | """Test accessing tools through the Tool container"""
17 | @tool
18 | def test_another_tool(x: int) -> int:
19 | return x * 2
20 |
21 | assert Tool["test_another_tool"](5) == 10
22 |
23 |
24 | def test_tool_tool_nonexistent():
25 | """Test accessing a non-existent tool raises KeyError"""
26 | with pytest.raises(KeyError):
27 | Tool["nonexistent_tool"]
28 |
29 |
30 | def test_tool_tool_multiple_registration():
31 | """Test registering multiple tools"""
32 | results = []
33 |
34 | @tool
35 | def test_tool1():
36 | results.append(1)
37 |
38 | @tool
39 | def test_tool2():
40 | results.append(2)
41 |
42 | Tool["test_tool1"]()
43 | Tool["test_tool2"]()
44 |
45 | assert results == [1, 2]
46 |
47 |
48 | def test_tool_tool_with_args_kwargs():
49 | """Test tool with various argument types"""
50 | @tool
51 | def test_complex_tool(a: int, b: str = "default") -> str:
52 | return f"{a}-{b}"
53 |
54 | assert Tool["test_complex_tool"](1) == "1-default"
55 | assert Tool["test_complex_tool"](2, "custom") == "2-custom"
56 |
--------------------------------------------------------------------------------
/Agentix/bricks/tools/TermFacade.py:
--------------------------------------------------------------------------------
1 | from agentix import tool
2 | from typing import Optional, List, Dict, Union
3 |
4 | @tool
5 | def TF_run_command(
6 | command: str,
7 | session_name: Optional[str] = None,
8 | timeout: Optional[int] = None
9 | ) -> str:
10 | """Execute a command in a terminal session"""
11 | ...
12 |
13 | @tool
14 | def TF_ensure_session(
15 | session_name: str,
16 | session_type: str = "shell",
17 | config: Optional[Dict] = None
18 | ) -> bool:
19 | """Ensure a terminal session exists and is properly configured"""
20 | ...
21 |
22 | @tool
23 | def TF_set_current_session(session_name: str) -> bool:
24 | """Set the active terminal session"""
25 | ...
26 |
27 | @tool
28 | def TF_run_python_file(
29 | file_path: str,
30 | args: Optional[List[str]] = None,
31 | env: Optional[Dict[str, str]] = None
32 | ) -> str:
33 | """Execute a Python file in a dedicated session"""
34 | ...
35 |
36 | @tool
37 | def TF_get_session_output(
38 | session_name: str,
39 | last_n_lines: Optional[int] = None
40 | ) -> List[str]:
41 | """Get output from a terminal session"""
42 | ...
43 |
44 | @tool
45 | def TF_send_input(
46 | session_name: str,
47 | input_text: str,
48 | end: str = "\n"
49 | ) -> bool:
50 | """Send input to a terminal session"""
51 | ...
52 |
53 | @tool
54 | def TF_wait_for_pattern(
55 | session_name: str,
56 | pattern: str,
57 | timeout: Optional[int] = None
58 | ) -> bool:
59 | """Wait for a specific pattern in terminal output"""
60 | ...
61 |
--------------------------------------------------------------------------------
/Agentix/agentix/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | from .magicimport import dynamic_import
3 |
4 | from .wrappers import (Tool,
5 | Agent,
6 | tool,
7 | use_tools,
8 | mw,
9 | MW,
10 | Func,
11 | func,
12 | Log,
13 | log,
14 | Event,
15 | Endpoint,
16 | endpoint,
17 | Page,
18 | page,
19 | Component,
20 | component,
21 | get,
22 | post,
23 | Conf,
24 | Services,
25 | State,
26 | Store,
27 | SocketManager,
28 | )
29 |
30 |
31 | from .entities import Message, Conversation, ModuleInfo
32 | from .utils import logger
33 | from .utils.exec import Exec
34 |
35 |
36 | class _Stuff:
37 | _instance = None
38 |
39 |
40 | if not _Stuff._instance:
41 | _Stuff._instance = _Stuff()
42 | Stuff = _Stuff._instance
43 |
44 | # cwd = os.getcwd()
45 | # os.chdir('/home/val/algotrade')
46 |
47 |
48 | dynamic_import('middlewares')
49 | dynamic_import('tools')
50 | dynamic_import('loggers')
51 | dynamic_import('funcs')
52 | dynamic_import('agents')
53 | dynamic_import('endpoints')
54 | dynamic_import('utils')
55 | # dynamic_import('nodes')
56 | dynamic_import('code')
57 |
58 | # os.chdir(cwd)
59 |
60 | __all__ = [
61 | 'Store'
62 | ]
63 |
--------------------------------------------------------------------------------
/agents/Gitor/agent/tools/gitor_tools.py:
--------------------------------------------------------------------------------
1 | from agentix import tool, State
2 |
3 |
4 |
5 | @tool
6 | def gitor_current_repo():
7 | if not 'Gitor' in State:
8 | State['Gitor'] = {
9 | "current_repo":"no_repo_yet",
10 | "repos":{}
11 | }
12 | return State['Gitor']['current_repo']
13 |
14 | @tool
15 | def gitor_pwd():
16 | return 'tatayoyo'
17 |
18 | @tool
19 | def gitor_last_reported_commit():
20 | current_repo = State['Gitor'].current_repo
21 | if not current_repo in State['Gitor']['repos']:
22 | return "no_commit_yet"
23 | return State['Gitor']['repos'][current_repo].last_commit
24 |
25 | @tool
26 | def report_git_change(diff, repo, commit_id):
27 | """
28 | Reports a git change by updating the state with the new commit information and diff
29 |
30 | Args:
31 | diff (str): The diff content of the commit
32 | repo (str): The repository name
33 | commit_id (str): The commit identifier
34 | """
35 | if not 'Gitor' in State:
36 | State['Gitor'] = {
37 | "current_repo": "no_repo_yet",
38 | "repos": {}
39 | }
40 |
41 | if not repo in State['Gitor']['repos']:
42 | State['Gitor']['repos'][repo] = {
43 | "last_commit": "no_commit_yet",
44 | "diffs": {
45 |
46 | }
47 | }
48 | import time
49 | State['Gitor']['repos'][repo]['last_commit'] = commit_id
50 | State['Gitor']['repos'][repo]['diffs'][commit_id] = {
51 | "timestamp": int(time.time()),
52 | "handled": False,
53 | "commit_id": commit_id,
54 | "diff": diff
55 | }
56 | State['Gitor']['current_repo'] = repo
57 |
58 | return "Reported :)"
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/page.py:
--------------------------------------------------------------------------------
1 | from .func_wrapper import file_property
2 | from .instances_store import InstancesStore
3 | import os
4 |
5 | class Page(metaclass=InstancesStore):
6 | py = file_property('_ploup.py')
7 | template = file_property('tpl.vue')
8 | css = file_property('css.vue')
9 | js = file_property('js.vue')
10 | prompt = file_property('prompt.conv') # New property for .conv files
11 | user_stories = file_property('user_stories.md') # New property for .md files
12 | test_cases = file_property('test_cases.md') # New property for .md files
13 | tests = file_property('tests.py') # New property for .py files
14 | mock = file_property('mock.py') # New property for .py files
15 | vars = file_property('vars.py') # New property for .py files
16 |
17 | def __getitem__(self, k):
18 | return getattr(self, k)
19 |
20 | def __setitem__(self, key: str, value) -> None:
21 | setattr(self, key, value)
22 |
23 | def __init__(self, name: str):
24 | self.name = name
25 |
26 | cwd = os.getcwd()
27 | self.dir_path = os.path.join(cwd, "bricks/pages", name)
28 | self.out_path = os.path.join(cwd, f"front/pages/{name}.vue")
29 |
30 | os.makedirs(self.dir_path, exist_ok=True)
31 |
32 | self.py = f"""from agentix import page
33 | page('{name}')"""
34 |
35 | def export(self):
36 | try:
37 | Page[self.name] = self
38 | except:
39 | ...
40 | nuxt_page = f"""
41 | {self.template}
42 |
43 |
46 |
47 | """
50 |
51 | with open(self.out_path, "w") as f:
52 | f.write(nuxt_page)
53 | page = Page
54 |
--------------------------------------------------------------------------------
/Agentix/agentix/facade.py:
--------------------------------------------------------------------------------
1 | class Agentix:
2 | """Facade to a lot of moving parts
3 | """
4 |
5 | @staticmethod
6 | def all_endpoints_full() -> str:
7 | """Meant to be use by gen1 agents.
8 | return all the endpoints with internal state
9 | """
10 |
11 | @staticmethod
12 | def all_pages_full() -> str:
13 | """Returns all the pages with their full configurations."""
14 | from agentix.wrappers import Page
15 | pages_info = []
16 | for name, page in Page.items():
17 | pages_info.append(f"Page {name}: Template - {page.template}, CSS - {page.css}, JS - {page.js}")
18 | return "\n".join(pages_info)
19 |
20 | @staticmethod
21 | def all_components_full() -> str:
22 | """Returns all the components with their full configurations."""
23 | # This method would similarly enumerate all components, similar to all_pages_full.
24 | # Implementation would depend on how components are structured and stored.
25 | return "Components listing and their configurations."
26 |
27 | @staticmethod
28 | def all_functions_full() -> str:
29 | """Returns all the functions with their full configurations."""
30 | from agentix.wrappers import Func
31 | functions_info = []
32 | for name, func in Func.items():
33 | functions_info.append(f"Function {name}: Code - {func.code}")
34 | return "\n".join(functions_info)
35 |
36 | @staticmethod
37 | def all_entities_full() -> str:
38 | """Returns all the entities with their full configurations."""
39 | # This method would enumerate all entities, similar to all_pages_full.
40 | # Implementation would depend on how entities are structured and stored.
41 | return "Entities listing and their configurations."
42 |
--------------------------------------------------------------------------------
/tests/test_middleware.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from cogni.wrappers import MW, InstancesStore
3 |
4 | def test_mw_registration():
5 | """Test middleware registration and wrapper creation."""
6 | @MW.register
7 | def sample_mw(ctx, conv):
8 | return conv + "_processed"
9 |
10 | assert "sample_mw" in MW
11 | assert MW["sample_mw"]({"test": True}, "input") == "input_processed"
12 |
13 | def test_mw_custom_name():
14 | """Test registration with custom name."""
15 | @MW.register
16 | def sample_mw(ctx, conv, extra=""):
17 | return conv + extra
18 |
19 | wrapper = MW.register(sample_mw, "custom_name")
20 | assert wrapper.name == "custom_name"
21 | assert wrapper({"test": True}, "input", "_modified") == "input_modified"
22 |
23 | def test_mw_repr():
24 | """Test string representation."""
25 | @MW.register
26 | def sample_mw(ctx, conv): pass
27 | assert repr(MW["sample_mw"]) == "MW['sample_mw']"
28 |
29 | def test_mw_multiple_registration():
30 | """Test registering multiple middleware."""
31 | results = []
32 |
33 | @MW.register
34 | def mw1(ctx, conv):
35 | results.append(1)
36 | return conv
37 |
38 | @MW.register
39 | def mw2(ctx, conv):
40 | results.append(2)
41 | return conv
42 |
43 | MW["mw1"]({"test": True}, "input")
44 | MW["mw2"]({"test": True}, "input")
45 |
46 | assert results == [1, 2]
47 |
48 | def test_mw_chaining():
49 | """Test chaining multiple middleware."""
50 | @MW.register
51 | def add_a(ctx, conv):
52 | return conv + "a"
53 |
54 | @MW.register
55 | def add_b(ctx, conv):
56 | return conv + "b"
57 |
58 | result = add_b({"test": True}, add_a({"test": True}, "input"))
59 | assert result == "inputab"
60 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/instances_store.py:
--------------------------------------------------------------------------------
1 | class InstancesStore(type):
2 | """
3 | A metaclass for storing and retrieving Agent/MW instances.
4 | """
5 | def __new__(metacls, name, bases, dct):
6 | # Create a new class with a unique instance store
7 | cls = super().__new__(metacls, name, bases, dct)
8 | cls._store = {} # Instance store specific to each class
9 | return cls
10 |
11 | def __getitem__(cls, item: str):
12 | if '/' in item:
13 | item = item.split('/')[-1]
14 | try:
15 | return cls._store[item]
16 | except KeyError:
17 | if 'Null' in cls._store:
18 | nullInstance = cls._store['Null']
19 | nullInstance.setinfo(item)
20 | return nullInstance
21 | raise KeyError(f"No {cls.__name__} with name '{item}' found.")
22 |
23 | def __setitem__(cls, key: str, value) -> None:
24 | # if key in cls._store:
25 | # raise KeyError(f"{cls.__name__} with name '{key}' already exists.")
26 | cls._store[key] = value
27 |
28 | def __contains__(cls, item: str) -> bool:
29 | return item in cls._store
30 |
31 | def keys(cls):
32 | return cls._store.keys()
33 |
34 | def values(cls):
35 | return cls._store.values()
36 |
37 | def items(cls):
38 | return cls._store.items()
39 |
40 | def get(cls, item: str, default=None):
41 | return cls._store.get(item, default)
42 |
43 |
44 | class DefaultInstanceStore(InstancesStore):
45 | def __getitem__(cls, item: str):
46 |
47 | val = super().get(item) or super().get('_default')
48 | if val is None:
49 | raise KeyError(
50 | f"No {cls.__name__} with name '{item}' found and no default instance available.")
51 | try:
52 | val._set_item(item)
53 | except:
54 | ...
55 | return val
56 |
--------------------------------------------------------------------------------
/tests/test_wrappers.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from cogni.wrappers import FuncWrapper, InstancesStore
3 |
4 | # Test FuncWrapper
5 | def test_func_wrapper_registration():
6 | """Test function registration and wrapper creation."""
7 | def sample_func(x: int) -> int:
8 | return x * 2
9 |
10 | wrapper = FuncWrapper.register(sample_func)
11 | assert wrapper.name == "sample_func"
12 | assert wrapper(5) == 10
13 |
14 | def test_func_wrapper_custom_name():
15 | """Test registration with custom name."""
16 | def sample_func(x: int) -> int:
17 | return x * 2
18 |
19 | wrapper = FuncWrapper.register(sample_func, "custom_name")
20 | assert wrapper.name == "custom_name"
21 | assert wrapper(5) == 10
22 |
23 | def test_func_wrapper_repr():
24 | """Test string representation."""
25 | def sample_func(): pass
26 | wrapper = FuncWrapper.register(sample_func)
27 | assert repr(wrapper) == "FuncWrapper['sample_func']"
28 |
29 | # Test InstancesStore
30 | def test_instances_store():
31 | """Test instance storage and retrieval."""
32 | class TestContainer(metaclass=InstancesStore):
33 | pass
34 |
35 | instance = TestContainer()
36 | TestContainer['test'] = instance
37 |
38 | assert TestContainer['test'] is instance
39 | assert 'test' in TestContainer
40 |
41 | def test_instances_store_missing_key():
42 | """Test accessing non-existent instance."""
43 | class TestContainer(metaclass=InstancesStore):
44 | pass
45 |
46 | with pytest.raises(KeyError):
47 | _ = TestContainer['nonexistent']
48 |
49 | def test_instances_store_multiple_classes():
50 | """Test isolation between different classes using InstancesStore."""
51 | class Container1(metaclass=InstancesStore):
52 | pass
53 |
54 | class Container2(metaclass=InstancesStore):
55 | pass
56 |
57 | Container1['test'] = 'value1'
58 | Container2['test'] = 'value2'
59 |
60 | assert Container1['test'] != Container2['test']
61 |
--------------------------------------------------------------------------------
/Agentix/AgenticParadigms.md:
--------------------------------------------------------------------------------
1 | # Agentic paradigms
2 | ## There is a paradigm
3 |
4 | If you're writting code, the language you're using, the way you'll conceive your code, all of that, regardless of you knowing it, will be done under [A Paradigm](https://en.wikipedia.org/wiki/Programming_paradigm).
5 |
6 | A paradigm can be viewed as a set of statements
7 | about what conceptual objects to consider, their properties, and in what way they should interact.
8 |
9 | ## Agentic has paradigms too
10 |
11 | We can postulate that agentic, as code writting, is done under a paradigm (that is yet to be defined).
12 |
13 | Agentic's Moving parts the way they're defined, the way they interact. that will also fit into a paradigm. So defining them is usefull
14 |
15 | ## My best try at defining paradigms
16 |
17 | ### Monolithic Agent
18 | ChatGPT, (though debatable if an agent as it has no feedback loop) would be a Monolithic Agent.
19 | ```
20 | You're an helpful assistant.
21 | # Instructions
22 | {instructions}
23 | # Context
24 | {context}
25 | # Tools
26 | {tools}
27 | ```
28 | ### Swarm
29 | #### Swarm of clone
30 | Very useful for prompting strategy.
31 | Three of thoughts would be an example
32 | #### Swarm of specialists
33 | CrewAI.
34 | A bunch of agents with a role, a context, tools and some kind of shared space to communicate.
35 | #### Tree of specialists
36 | Or **Agents as code**
37 | Agents interact in a fixed way.
38 | ##### Functionnal agentic
39 | (what **Agentix** is all about).
40 | > Agents are functions
41 |
42 | _______________
43 |
44 | Broad intuitions:
45 |
46 | A LLM, configured as a chatbot works by being fed user inputs, conversation history, and it's own output until a STOP token is hit.
47 |
48 | What's presented to the LLM can be dynamic and evolve according to the LLM outputs.
49 |
50 | That's the "LLM as a computer" intuition.
51 |
52 | Another approach would be to consider hyper-specialised agents, that would be part of an algorithm.
53 |
54 | I'm not sure where this can lead but a lot of cursor can be identified and some of the responsibility could shift from the LLM to the cognitive architecture.
--------------------------------------------------------------------------------
/Agentix/bricks/tools/parser.py:
--------------------------------------------------------------------------------
1 | import re
2 | from agentix import tool
3 |
4 |
5 | @tool
6 | def parser(opening_tag, ending_tag) -> callable:
7 | """
8 | Parses text enclosed between specified opening and ending tags.
9 |
10 | Args:
11 | opening_tag (str): The opening tag to look for.
12 | ending_tag (str): The ending tag to look for.
13 |
14 | Returns:
15 | callable: A function that takes a string and returns a list of parsed segments.
16 | """
17 | def parse(text: str):
18 |
19 | segments = text.split(opening_tag)
20 | results = []
21 | for segment in segments[1:]:
22 | end_idx = segment.find(ending_tag)
23 | if end_idx != -1:
24 | results.append(segment[:end_idx].strip())
25 | return results
26 | return parse
27 |
28 |
29 | @tool
30 | def xml_parser(tag_name):
31 | def parse(xml_string):
32 | from copy import deepcopy
33 | result = {}
34 | # Regex to find content, capturing attributes and content
35 | tag_pattern = f'<{tag_name}(.*?)>(.*?){tag_name}>'
36 | tags = re.findall(tag_pattern, xml_string, re.DOTALL)
37 |
38 | for attrs, content in tags:
39 | # Extract all attributes
40 | attr_dict = dict(re.findall(r'(\w+)="([^"]+)"', attrs))
41 |
42 | foulard = deepcopy(attr_dict)
43 | for attr_to_dump in ['format', 'name']:
44 | if attr_to_dump in foulard:
45 | del foulard[attr_to_dump]
46 | attr_dict['kwargs'] = foulard
47 |
48 | # Add content to the attribute dictionary
49 | attr_dict['content'] = content.strip()
50 |
51 | # Use the 'name' attribute as the key in the result dictionary if it exists
52 | name = attr_dict.get('name', f'unnamed_{len(result)}')
53 |
54 | # Store the raw XML string
55 | raw_xml = f'<{tag_name}{attrs}>{content}{tag_name}>'
56 | attr_dict['raw'] = raw_xml.strip()
57 |
58 | result[name] = attr_dict
59 | return result
60 | return parse
61 |
--------------------------------------------------------------------------------
/agents/ChangeDescriptor/agent/prompts/ChangeDescriptor.conv:
--------------------------------------------------------------------------------
1 | system: You are ChangeDescriptor, an AGI agent that provides summaries of changes in the latest commit of a Git repository.
2 | You analyze commit messages and diffs to generate concise summaries.
3 |
4 | To use the shell, use this syntax:
5 | command
6 |
7 | Use to think step by step before replying.
8 |
9 | /!\ Never more than ONE tool use per output!
10 |
11 | Your session_name will always be "change_desc"
12 |
13 | __-__
14 |
15 | user: Can you describe the latest changes in the repo?
16 |
17 | __-__
18 |
19 | assistant:
20 | 1. First, I need to ensure the repository is up to date
21 | 2. Then, I'll fetch the latest commit details
22 | 3. Analyze the commit message and diff for a summary
23 |
24 | git pull
25 |
26 | __-__
27 |
28 | system: Already up to date.
29 |
30 | __-__
31 |
32 | assistant:
33 | 1. Repository is up to date
34 | 2. Now, I'll get the latest commit details
35 |
36 | git log -1 --pretty=format:"%H %an %ad %s" --date=short
37 |
38 | __-__
39 |
40 | system: 8cf36f3 Liam Dugan 2023-05-24 Add version number
41 |
42 | __-__
43 |
44 | assistant:
45 | 1. I have the commit details
46 | 2. Now, I'll analyze the changes
47 |
48 | git diff HEAD~1 HEAD
49 |
50 | __-__
51 |
52 | system: diff --git a/whisper/__init__.py b/whisper/__init__.py
53 | index 9f7a875..c1b6a6a 100644
54 | --- a/whisper/__init__.py
55 | +++ b/whisper/__init__.py
56 | @@ -1,3 +1,4 @@
57 | +# Version 1.0.0
58 | from .audio import load_audio, log_mel_spectrogram, pad_or_trim
59 | from .decoding import DecodingOptions, DecodingResult, decode, detect_language
60 | from .model import Whisper, ModelDimensions
61 |
62 | __-__
63 |
64 | assistant:
65 | 1. Analyzed the diff
66 | 2. Ready to summarize the changes
67 |
68 | Latest commit summary:
69 | - Commit Hash: 8cf36f3
70 | - Author: Liam Dugan
71 | - Date: 2023-05-24
72 | - Message: "Add version number"
73 | - Changes: Added version 1.0.0 to `__init__.py`
74 |
75 | Need anything else? 😊
76 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/socket.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 | import threading
3 |
4 |
5 | def printou(w):
6 | return
7 | from rich import print
8 | print('[red]__EVENT__')
9 | print(f"{w=}")
10 |
11 |
12 | class SocketManager:
13 | _handlers = {}
14 | instance = None
15 |
16 | def __init__(self, socketio):
17 | self.socketio = socketio
18 | # socketio.on('event', printou)
19 | if socketio is None:
20 | return
21 | for event_type, handler in self._handlers.items():
22 | socketio.on(event_type)(handler)
23 |
24 | @classmethod
25 | def handle(cls, json_data):
26 | def run_handler(hndlr, jd):
27 | hndlr(jd)
28 | event_type = json_data.get('eventType')
29 |
30 | handler = cls._handlers.get(event_type, False)
31 | if not handler:
32 | # print(f"No handler for {event_type}")
33 | return
34 | t = threading.Thread(target=run_handler, args=(handler, json_data))
35 | t.start()
36 |
37 | @classmethod
38 | def register(cls, event_type) -> callable:
39 |
40 | assert cls.instance is None, "You should register all handlers before init"
41 |
42 | def decorator(func):
43 | assert not hasattr(func, '_sockethandler')
44 | setattr(func, '_sockethandler', True)
45 | assert event_type not in cls._handlers, f"already a handler for {event_type}"
46 |
47 | cls._handlers[event_type] = func
48 |
49 | return func
50 | return decorator
51 | on = register
52 |
53 | @classmethod
54 | def init(cls, socketio):
55 | if cls.instance is None:
56 | cls.instance = cls(socketio)
57 | return cls.instance
58 |
59 | @classmethod
60 | def emit(cls, event_type, payload, broadcast=False):
61 | if cls.instance is None:
62 | cls.instance = cls(None)
63 | payload['eventType'] = event_type
64 | payload['_type'] = 'event'
65 | if not '_ttl' in payload:
66 | payload['_ttl'] = 5
67 | else:
68 | payload['_ttl'] -= 1
69 |
70 | if payload['_ttl'] > 0:
71 | cls.instance.handle(payload)
72 | if cls.instance.socketio:
73 | # , broadcast=broadcast)
74 | cls.instance.socketio.emit('event', payload)
75 |
--------------------------------------------------------------------------------
/Agentix/bricks/tools/tpl.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 | import random
3 | import re
4 | import string
5 | from agentix import tool, Conversation, Message
6 |
7 |
8 | @tool
9 | def tpl_s(tpl_str: str, **kwargs) -> str:
10 | for key, value in kwargs.items():
11 | if not isinstance(value, (str, int, float)):
12 | continue
13 | tpl_str = tpl_str.replace('{'+key+'}', str(value))
14 |
15 | return tpl_str
16 |
17 |
18 | @tool
19 | def tpl_c(conv: Conversation, **kwargs) -> Conversation:
20 | new_conv = deepcopy(conv)
21 |
22 | for m in new_conv.msgs:
23 | m.content = tpl_s(m.content, **kwargs)
24 |
25 | return new_conv
26 |
27 |
28 | @tool
29 | def tpl(conv, **kwargs):
30 | if isinstance(conv, str):
31 | return tpl_s(conv, **kwargs)
32 | if isinstance(conv, Conversation):
33 | return tpl_c(conv, **kwargs)
34 |
35 | raise ValueError("Input must be a string or an instance of Conversation.")
36 |
37 |
38 | def generate_random_id():
39 | letters = string.ascii_lowercase
40 | return ''.join(random.choice(letters) for _ in range(3)) + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(3))
41 |
42 |
43 | @tool
44 | def tpl_tool(conv):
45 | from agentix import Tool
46 | tool_uses = {}
47 | modified_content = conv
48 |
49 | # Find all {tool:xxx} patterns
50 | tool_pattern = r'\{tool:([^\}]+)\}'
51 |
52 | if isinstance(conv, str):
53 | matches = re.finditer(tool_pattern, conv)
54 | for match in matches:
55 | tool_to_use = match.group(1)
56 | random_id = generate_random_id()
57 | tool_uses[random_id] = Tool[tool_to_use]()
58 | modified_content = modified_content.replace(
59 | match.group(0), f'{{{random_id}}}')
60 |
61 | elif isinstance(conv, Conversation):
62 | new_conv = deepcopy(conv)
63 | for msg in new_conv.msgs:
64 | matches = re.finditer(tool_pattern, msg.content)
65 | for match in matches:
66 | tool_to_use = match.group(1)
67 | random_id = generate_random_id()
68 | tool_uses[random_id] = Tool[tool_to_use]()
69 | msg.content = msg.content.replace(
70 | match.group(0), f'{{{random_id}}}')
71 | modified_content = new_conv
72 |
73 | return tpl(modified_content, **tool_uses)
74 |
--------------------------------------------------------------------------------
/Agentix/bricks/tools/doc.md:
--------------------------------------------------------------------------------
1 | # Discord Bot API Documentation
2 |
3 | The base url is http://localhost:5000/
4 |
5 | ## Endpoints
6 |
7 | ### POST /post
8 | Post a message to a channel
9 | - Body: `{"content": "message text", "channel": "channel_name"}`
10 | - Channel defaults to "general" if not specified
11 | - Returns: `{"status": "success", "message_id": "id"}`
12 |
13 | ### POST /send_message/{channel_id}
14 | Send message to channel by ID
15 | - Body: `{"content": "message text"}`
16 | - Returns: Message ID and status
17 |
18 | ### POST /send_message_by_name/{channel_name}
19 | Send message to channel by name
20 | - Body: `{"content": "message text"}`
21 | - Returns: Message ID and status
22 |
23 | ### POST /reply/{message_id}/{reply_text}
24 | Reply to a specific message
25 | - Returns: Reply message ID and status
26 |
27 | ### POST /send_dm
28 | Send direct message to user defined in user.yaml
29 | - Body: `{"content": "message text"}`
30 | - Returns: Message ID and status
31 |
32 | ### POST /send_dm/{user_id}/{message}
33 | Send direct message to user
34 | - Returns: Message ID and status
35 |
36 | ### POST /edit/{message_id}
37 | Edit a bot message
38 | - Body: `{"content": "new text"}`
39 | - Returns: Status and message ID
40 |
41 | ### GET /messages
42 | List all bot messages
43 |
44 | ### GET /wip
45 | Generate channel mapping JSON file
46 | - Creates channels.json with name->id mapping
47 | - Returns: Success status
48 |
49 | ### GET /kill
50 | Shutdown the bot and server
51 |
52 | ## Available Channels
53 |
54 | - général (1258679259434713091)
55 | - general (1311278452933922837)
56 | - lucy (1312108804036366346)
57 | - coder-log (1312108841441427496)
58 | - hitl (1312113326922137750)
59 | - config (1312115776601194548)
60 | - front (1312115830791602236)
61 | - tool-maker (1312367815474286633)
62 | - prompter (1312367843651747870)
63 | - general-logs (1312367880150585427)
64 |
65 | ## Message Events
66 |
67 | The bot automatically tracks messages from user "poltronsuperstar":
68 | - Stores message content, timestamp, channel info
69 | - Tracks attachments, mentions, embeds
70 | - Records message edits and other metadata
71 | - Maintains list of active channels
72 | - Saves all data to user.yaml
73 |
74 | For other users:
75 | - Auto-replies to DMs with "I'll think about it"
76 | - Forwards messages to external service endpoint:
77 | POST http://localhost:5555/post
78 | Body: {"id": "message_id", "content": "message_content", "channel": "channel_name"}
79 | Note: For DMs, channel is set to "DM"
80 |
--------------------------------------------------------------------------------
/cogni/entities/message.py:
--------------------------------------------------------------------------------
1 | """
2 | Defines the Message class, which represents a single message in a conversation.
3 | Each message has a role (e.g., 'user' or 'system') and content (the text of the message).
4 | """
5 | from copy import deepcopy
6 |
7 |
8 | class Message:
9 | """
10 | Represents a single message in a conversation.
11 | """
12 |
13 | def __init__(self, role: str, content: str):
14 | """
15 | Initializes a new Message instance.
16 | :param role: The role of the message sender (e.g., 'user', 'system').
17 | :param content: The text content of the message.
18 | """
19 | if role.strip() == "A":
20 | role = "assistant"
21 | self.role = role
22 | self.content = content
23 | self._original_content = content
24 |
25 | def to_dict(self) -> dict:
26 | """
27 | Converts the message to a dictionary format, optionally in a format suitable for OpenAI API.
28 |
29 | :param openai: If True, formats the dictionary for OpenAI API consumption.
30 | :return: A dictionary representation of the message.
31 | """
32 | """Converts the message to a dictionary."""
33 | return {
34 | 'role': self.role,
35 | 'content': self.content,
36 | }
37 |
38 | def parse(self, parser):
39 | self.content = parser(self._original_content)
40 |
41 | def __repr__(self):
42 | import shutil
43 | import textwrap
44 |
45 | terminal_width = shutil.get_terminal_size().columns
46 | margin = 2
47 | effective_width = terminal_width - 2 * margin - 4
48 |
49 | emoji = {
50 | 'system': '🖥️',
51 | 'user': '🧑',
52 | 'assistant': '🤖',
53 | }[self.role]
54 |
55 | pad = {
56 | 'user': 1,
57 | 'system': 2,
58 | 'assistant': 0
59 | }
60 |
61 | role_display = f" │ {emoji} {self.role}:".ljust(
62 | effective_width + 4 + pad[self.role]) + "│"
63 | border_top = f" {'╭' + '─' * (effective_width + 2) + '╮'}\n"
64 | border_bottom = f" {'╰' + '─' * (effective_width + 2) + '╯'}\n"
65 |
66 | wrapped_lines = textwrap.wrap(self.content, width=effective_width)
67 |
68 | content_with_margin = "\n".join(
69 | f" │ {line.ljust(effective_width)} │" for line in wrapped_lines
70 | )
71 |
72 | return f"{border_top}{role_display}\n{content_with_margin}\n{border_bottom}"
73 |
--------------------------------------------------------------------------------
/Agentix/AgentsAsFunctions.md:
--------------------------------------------------------------------------------
1 | [Back to README](README.md)
2 | # Agents are functions
3 |
4 | Here we'll decribe Agentix's Agents behaviors. What you should consider when **USING** them (as opposed to their implementation details)
5 |
6 |
7 | Said otherwise, this is a **Black box description** of what Agents are in Agentix.
8 |
9 | ## Agents, Are, Functions.
10 | From the outside, an **Agent is conceptually a function**. Oh, and also actually **a python function**.
11 |
12 | It ingests inputs.
13 |
14 | It return outputs of given types.
15 |
16 | It mutates some states here and there.
17 |
18 | It's a Function.
19 |
20 |
21 | While this principle is simple and doesn't look like much, it proved unsuspectedly powerful to implement complex control flow over LLM inferences.
22 |
23 | **Note:** Agentix's Agents are function in the programmatical sense, not (yet) in a stricter sense.
24 |
25 | Here's how "Agents are functions" works **Exemple of control flow with Agentix**
26 | ```python
27 | for task in Agent['task_lister'](user_input):
28 | Agent['task_executor'](task)
29 | ```
30 |
31 | Here's an actual piece of code I used to implement a long term memory:
32 |
33 |
34 | ```python
35 | for fact in Agent['LTM_fact_extract'](user_input):
36 | Agent['LTM_fact_store'](
37 | Agent['LTM_compress_fact'](fact)
38 | )
39 |
40 | context = Agent['LTM_fact_recall'](user_input)
41 | ```
42 |
43 | (Bare in mind each Agent/function call resulted in one or several conversations, LLM contexts, and arbitrarely long feedback loops.)
44 |
45 |
46 | It allows for strong algorithmic decoupling, which in turn makes easy to have very specialist agents.
47 |
48 | Akin to a function, an agent can itself be an arbitrarly complexe set of agents. Resulting for a given task in many contexts/conversations.
49 |
50 | **Note**: One thing I first thought as a tradeoff: my approach is heavy in LLM calls. In reality, it's balanced by the fact it requires overall less tokens generated than other approachs (when done correctly). Also, some Agents can run with `gpt-3.5-turbo`.
51 |
52 | _______________
53 |
54 |
55 |
56 | **experimental takeaway of this approach**
57 | * Increase overall performance
58 | * Multi-LLM architecture (some tasks can be handled by _gpt-3.5-turbo_, ) with gains in both speed and cost.
59 |
60 |
61 | ## Agentic Single Responsibility Principle
62 | **Agentic Single Responsibility Principle** states (I should know, I made it up) that an Agent should do one thing.
63 |
64 | (You should trust me on this one)
65 |
66 | [Back to README](README.md)
67 |
68 |
--------------------------------------------------------------------------------
/doc/quickstart/first_tool.md:
--------------------------------------------------------------------------------
1 | # Creating Your First Tool
2 |
3 | Tools are standalone functions that can be used by both developers and agents. They provide reusable functionality that can be accessed globally throughout your project.
4 |
5 | ## Basic Tool Example
6 |
7 | Create `tools/calculator.py`:
8 |
9 | ```python
10 | from cogni import tool
11 |
12 | @tool
13 | def add_numbers(a: int, b: int) -> int:
14 | """Add two numbers together.
15 |
16 | Args:
17 | a: First number
18 | b: Second number
19 |
20 | Returns:
21 | Sum of the two numbers
22 | """
23 | return a + b
24 | ```
25 |
26 | ## Using Tools
27 |
28 | Tools can be accessed anywhere in your project:
29 |
30 | ```python
31 | from cogni import Tool
32 |
33 | # Use the tool directly
34 | result = Tool['add_numbers'](5, 3)
35 | print(result) # Output: 8
36 |
37 | # Tools are also available to agents
38 | @tool
39 | def complex_math(x: int) -> int:
40 | """Perform complex math using other tools."""
41 | result = Tool['add_numbers'](x, 10)
42 | return result * 2
43 | ```
44 |
45 | ## Tool Best Practices
46 |
47 | 1. Clear Documentation
48 | - Always include docstrings
49 | - Document parameters and return types
50 | - Provide usage examples
51 |
52 | 2. Error Handling
53 | - Validate inputs
54 | - Return meaningful error messages
55 | - Handle edge cases
56 |
57 | 3. Naming Conventions
58 | - Use descriptive names
59 | - Follow verb_noun pattern for actions
60 | - Be consistent across project
61 |
62 | ## Advanced Features
63 |
64 | ### Tool with State
65 |
66 | ```python
67 | from cogni import tool, State
68 |
69 | @tool
70 | def save_result(calculation: str, result: float):
71 | """Save a calculation result to state."""
72 | if 'calculations' not in State:
73 | State['calculations'] = []
74 | State['calculations'].append({
75 | 'calculation': calculation,
76 | 'result': result,
77 | 'timestamp': time.time()
78 | })
79 | ```
80 |
81 | ### Async Tools
82 |
83 | ```python
84 | from cogni import tool
85 |
86 | @tool
87 | async def fetch_data(url: str) -> dict:
88 | """Fetch JSON data from URL."""
89 | async with aiohttp.ClientSession() as session:
90 | async with session.get(url) as response:
91 | return await response.json()
92 | ```
93 |
94 | ## Next Steps
95 |
96 | - Learn how to [create an agent](first_agent.md) that uses your tools
97 | - Explore [state management](states.md) for persistent data
98 | - See how to create [HTTP endpoints](endpoints.md)
99 |
--------------------------------------------------------------------------------
/Agentix/bricks/system/llm/middlewares/llm.py:
--------------------------------------------------------------------------------
1 | from rich import print
2 | from rich.syntax import Syntax
3 | from rich.table import Table
4 | from rich.console import Console
5 | from rich.panel import Panel
6 | from agentix import mw
7 |
8 |
9 | @mw
10 | def llm(ctx, conv):
11 |
12 | conv.should_infer = ctx['hops'] == 0
13 | return conv
14 |
15 |
16 | @mw
17 | def gpt3(ctx, conv):
18 | conv.should_infer = ctx['hops'] == 0
19 | conv.llm = 'gpt-3.5-turbo'
20 | return conv
21 |
22 |
23 | @mw
24 | def gpt4(ctx, conv):
25 | conv.should_infer = ctx['hops'] == 0
26 | conv.llm = 'gpt-4-turbo-preview'
27 | return conv
28 |
29 |
30 | @mw
31 | def aider_ask(ctx, conv):
32 | conv.should_infer = ctx['hops'] == 0
33 | conv.llm = 'aider_ask'
34 | return conv
35 |
36 |
37 | @mw
38 | def aider_code(ctx, conv):
39 | conv.should_infer = ctx['hops'] == 0
40 | conv.llm = 'aider_code'
41 | return conv
42 |
43 |
44 | @mw
45 | def gpt4o(ctx, conv):
46 | conv.should_infer = ctx['hops'] == 0
47 | conv.llm = 'gpt-4o'
48 | return conv
49 |
50 |
51 | @mw
52 | def gpt4omini(ctx, conv):
53 | conv.should_infer = ctx['hops'] == 0
54 | conv.llm = 'gpt-4o-mini'
55 | return conv
56 |
57 |
58 | @mw
59 | def o1mini(ctx, conv):
60 | conv.should_infer = ctx['hops'] == 0
61 | conv.llm = 'o1-mini'
62 | return conv
63 |
64 |
65 | @mw
66 | def stream(ctx, conv):
67 | conv._flags['stream'] = True
68 | return conv
69 |
70 |
71 | @mw
72 | def debug(ctx, conv):
73 | console = Console()
74 |
75 | # Header
76 | header = Panel(
77 | "[bold red]DEBUG",
78 | border_style="yellow",
79 | expand=False,
80 | padding=(1, 1),
81 | )
82 | console.print(header)
83 |
84 | # Context
85 | ctx_table = Table(title="Context", show_header=True,
86 | header_style="bold magenta")
87 | ctx_table.add_column("Key", style="cyan", no_wrap=True)
88 | ctx_table.add_column("Value", style="green")
89 |
90 | for key, value in ctx.items():
91 | ctx_table.add_row(str(key), str(value))
92 |
93 | console.print(Panel(ctx_table, expand=False, border_style="blue"))
94 |
95 | # Conversation
96 | conv_syntax = Syntax(str(conv), "python",
97 | theme="monokai", line_numbers=True)
98 | console.print(Panel(conv_syntax, title="Conversation",
99 | expand=False, border_style="green"))
100 |
101 | # Footer
102 | footer = Panel(
103 | "[bold red]End of Debug Output",
104 | border_style="yellow",
105 | expand=False,
106 | padding=(1, 1),
107 | )
108 | console.print(footer)
109 |
110 | quit()
111 |
112 |
113 | @mw
114 | def last_msg_content(ctx, conv):
115 | return conv[-1].content
116 |
--------------------------------------------------------------------------------
/doc/quickstart/endpoints.md:
--------------------------------------------------------------------------------
1 | # Creating HTTP Endpoints
2 |
3 | Cogni makes it easy to expose your agents and tools via HTTP endpoints.
4 |
5 | ## Basic Endpoint
6 |
7 | ```python
8 | from cogni import endpoint
9 |
10 | @endpoint
11 | def hello():
12 | return {"message": "Hello World"}
13 | ```
14 |
15 | ## Custom Routes
16 |
17 | ```python
18 | from cogni import endpoint
19 |
20 | @endpoint('/api/v1/data')
21 | def get_data():
22 | return {"data": "some data"}
23 |
24 | @endpoint('/users/')
25 | def get_user(user_id: str):
26 | return {"user_id": user_id}
27 | ```
28 |
29 | ## HTTP Methods
30 |
31 | ```python
32 | from cogni import get, post, put, delete
33 |
34 | @get('/items')
35 | def list_items():
36 | return {"items": [...]}
37 |
38 | @post('/items')
39 | def create_item():
40 | return {"status": "created"}
41 |
42 | @put('/items/')
43 | def update_item(id: str):
44 | return {"status": "updated"}
45 |
46 | @delete('/items/')
47 | def delete_item(id: str):
48 | return {"status": "deleted"}
49 | ```
50 |
51 | ## Agent Endpoints
52 |
53 | ```python
54 | from cogni import endpoint, Agent
55 |
56 | @endpoint('/chat')
57 | def chat():
58 | message = request.json['message']
59 | agent = Agent['ChatBot']
60 | response = agent(message)
61 | return {"response": response}
62 | ```
63 |
64 | ## Request Handling
65 |
66 | ```python
67 | from cogni import endpoint
68 | from flask import request
69 |
70 | @endpoint('/upload')
71 | def upload():
72 | if 'file' not in request.files:
73 | return {"error": "No file"}, 400
74 |
75 | file = request.files['file']
76 | # Process file
77 | return {"status": "uploaded"}
78 | ```
79 |
80 | ## Authentication
81 |
82 | ```python
83 | from cogni import endpoint
84 | from functools import wraps
85 |
86 | def require_auth(f):
87 | @wraps(f)
88 | def decorated(*args, **kwargs):
89 | auth = request.headers.get('Authorization')
90 | if not auth:
91 | return {"error": "No auth"}, 401
92 | return f(*args, **kwargs)
93 | return decorated
94 |
95 | @endpoint('/secure')
96 | @require_auth
97 | def secure():
98 | return {"data": "secret"}
99 | ```
100 |
101 | ## WebSocket Support
102 |
103 | ```python
104 | from cogni import websocket
105 |
106 | @websocket('/ws')
107 | def ws_handler(ws):
108 | while True:
109 | message = ws.receive()
110 | ws.send(f"Echo: {message}")
111 | ```
112 |
113 | ## Error Handling
114 |
115 | ```python
116 | from cogni import endpoint, HTTPError
117 |
118 | @endpoint('/risky')
119 | def risky():
120 | try:
121 | # Risky operation
122 | raise ValueError("Something went wrong")
123 | except ValueError as e:
124 | raise HTTPError(400, str(e))
125 | ```
126 |
127 | ## Running the Server
128 |
129 | ```bash
130 | cogni serve --port 5000
131 | ```
132 |
133 | ## Next Steps
134 |
135 | - Add [state management](states.md)
136 | - Create a [swarm of agents](first_swarm.md)
137 | - Implement monitoring
138 |
--------------------------------------------------------------------------------
/doc/quickstart/first_agent.md:
--------------------------------------------------------------------------------
1 | # Creating Your First Agent: ShellAgent
2 |
3 | This guide walks through creating a shell command assistant agent.
4 |
5 | ## 1. Create Agent Structure
6 |
7 | ```bash
8 | cogni create-agent ShellAgent
9 | ```
10 |
11 | This creates:
12 | ```
13 | agents/
14 | └── ShellAgent/
15 | ├── prompts/
16 | │ └── shell_agent.conv
17 | ├── middlewares/
18 | │ └── shell_loop.py
19 | └── agent.py
20 | ```
21 |
22 | ## 2. Define the Prompt
23 |
24 | Edit `prompts/shell_agent.conv`:
25 |
26 | ```
27 | system: You are ShellAgent, a CLI assistant.
28 | You help users with shell commands.
29 |
30 | Instructions:
31 | 1. Understand the user's request
32 | 2. Suggest appropriate commands
33 | 3. Explain what each command does
34 | 4. Use safe commands only
35 |
36 | Tools available:
37 | command
38 | path
39 |
40 | user: {user_input}
41 | ```
42 |
43 | ## 3. Implement Middleware
44 |
45 | Edit `middlewares/shell_loop.py`:
46 |
47 | ```python
48 | from cogni import mw, Tool
49 |
50 | @mw
51 | def shell_loop(ctx, conv):
52 | """Process shell commands safely."""
53 | # Get user's last message
54 | user_msg = conv[-1].content
55 |
56 | # Process through LLM
57 | response = conv.rehop(
58 | f"I'll help you with: {user_msg}",
59 | role="assistant"
60 | )
61 |
62 | return response
63 | ```
64 |
65 | ## 4. Create Required Tools
66 |
67 | Create `tools/shell_tools.py`:
68 |
69 | ```python
70 | from cogni import tool
71 | import subprocess
72 |
73 | @tool
74 | def run_command(cmd: str) -> str:
75 | """Run a shell command safely."""
76 | # Add safety checks here
77 | result = subprocess.run(
78 | cmd,
79 | shell=True,
80 | capture_output=True,
81 | text=True
82 | )
83 | return result.stdout
84 |
85 | @tool
86 | def check_permissions(path: str) -> bool:
87 | """Check if we have permission to access path."""
88 | return os.access(path, os.R_OK | os.W_OK)
89 | ```
90 |
91 | ## 5. Register the Agent
92 |
93 | In `agent.py`:
94 |
95 | ```python
96 | from cogni import Agent
97 |
98 | Agent('ShellAgent', 'prompt|gpt4|shell_loop')
99 | ```
100 |
101 | ## Using the Agent
102 |
103 | ```python
104 | from cogni import Agent
105 |
106 | # Create agent instance
107 | shell = Agent['ShellAgent']
108 |
109 | # Ask for help
110 | response = shell("How do I list files in the current directory?")
111 | print(response)
112 | ```
113 |
114 | ## Safety Considerations
115 |
116 | 1. Input Validation
117 | - Sanitize all commands
118 | - Check permissions
119 | - Limit allowed commands
120 |
121 | 2. Error Handling
122 | - Catch and handle exceptions
123 | - Provide clear error messages
124 | - Prevent dangerous operations
125 |
126 | 3. Monitoring
127 | - Log all commands
128 | - Track usage patterns
129 | - Set up alerts
130 |
131 | ## Next Steps
132 |
133 | - Create a [swarm of agents](first_swarm.md)
134 | - Add [state management](states.md)
135 | - Create [HTTP endpoints](endpoints.md)
136 |
--------------------------------------------------------------------------------
/Agentix/agentix/utils/exec.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | import json
3 | import os
4 | import time
5 | from typing import List, Optional
6 | from toolz import reduce
7 |
8 | @dataclasses.dataclass
9 | class Event:
10 | """Represents an execution event, either entering or exiting a named state."""
11 | event_type: str # 'enter' or 'exit'
12 | target: str
13 |
14 | class Exec:
15 | """Handles execution states using event sourcing, with events persisted as files."""
16 |
17 |
18 | _instance = None
19 | @classmethod
20 | def get_instance(cls):
21 | cls._instance = cls._instance or cls()
22 |
23 | return cls._instance
24 |
25 |
26 | def __init__(self, events_dir: str = 'events') -> None:
27 | """Initializes the executor with a directory to store events."""
28 | self.events_dir = events_dir
29 | os.makedirs(self.events_dir, exist_ok=True)
30 |
31 | def _write_event(self, event: Event) -> None:
32 | """Writes a single event to a uniquely named file."""
33 | timestamp = str(int(time.time() * 1000000)) # Microsecond precision for filenames
34 | filename = os.path.join(self.events_dir, f"{timestamp}.json")
35 | with open(filename, 'w') as f:
36 | f.write(json.dumps(dataclasses.asdict(event)))
37 |
38 | def enter(self, target: str) -> None:
39 | """Records an 'enter' event for a given target."""
40 | self._write_event(Event('enter', target))
41 |
42 | def exit(self) -> None:
43 | """Records an 'exit' event for the most recent 'enter' target, if any."""
44 | current_cursor = self.current_cursor
45 | if current_cursor:
46 | self._write_event(Event('exit', current_cursor))
47 |
48 | def _load_events(self) -> List[Event]:
49 | """Loads all events from the file system, sorted by their filenames (timestamps)."""
50 | files = sorted(os.listdir(self.events_dir))
51 | return [Event(**json.loads(open(os.path.join(self.events_dir, file), 'r').read())) for file in files]
52 |
53 | @property
54 | def current_cursor(self) -> Optional[str]:
55 | """Returns the current cursor, i.e., the target of the most recent 'enter' event not yet exited."""
56 | return self.execution_path[-1] if self.execution_path else None
57 |
58 | @property
59 | def execution_path(self) -> List[str]:
60 | """Returns the list of active 'enter' targets leading to the current cursor."""
61 | events = self._load_events()
62 | active_nodes = reduce(lambda acc, e: acc + [e.target] if e.event_type == 'enter' else acc[:-1], events, [])
63 | return active_nodes
64 |
65 | def generate_mermaid_flowchart(self) -> str:
66 | """Generates a Mermaid flowchart representation of the execution flow."""
67 | mermaid = "graph TD\n"
68 | stack = []
69 | for event in self._load_events():
70 | if event.event_type == 'enter':
71 | if stack:
72 | mermaid += f"{stack[-1]} --> {event.target}\n"
73 | stack.append(event.target)
74 | elif stack:
75 | stack.pop()
76 | return mermaid
77 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/event_store.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import time
4 | from datetime import datetime
5 | from typing import Any, Dict, List, Union
6 | from pathlib import Path
7 |
8 | class _EventStore:
9 | def __init__(self):
10 | self._stores_dir = './.stores'
11 | os.makedirs(self._stores_dir, exist_ok=True)
12 |
13 | def _get_store_path(self, store_name: str) -> str:
14 | """Get the full path for a store directory"""
15 | return os.path.join(self._stores_dir, store_name)
16 |
17 | def _get_next_event_path(self, store_name: str) -> str:
18 | """Get the path for the next event file"""
19 | store_path = self._get_store_path(store_name)
20 | os.makedirs(store_path, exist_ok=True)
21 |
22 | existing_files = [f for f in os.listdir(store_path) if f.endswith('.json')]
23 | next_num = len(existing_files)
24 | return os.path.join(store_path, f"{next_num}.json")
25 |
26 | def _save_event(self, store_name: str, event: Dict) -> None:
27 | """Save an event with timestamp"""
28 | event['timestamp'] = int(time.time() * 1000) # Millisecond timestamp
29 | path = self._get_next_event_path(store_name)
30 |
31 | with open(path, 'w') as f:
32 | json.dump(event, f, indent=2)
33 |
34 | def _load_events(self, store_name: str) -> List[Dict]:
35 | """Load all events from a store"""
36 | store_path = self._get_store_path(store_name)
37 | if not os.path.exists(store_path):
38 | return []
39 |
40 | events = []
41 | for file_name in sorted(os.listdir(store_path)):
42 | if file_name.endswith('.json'):
43 | with open(os.path.join(store_path, file_name), 'r') as f:
44 | events.append(json.load(f))
45 | return events
46 |
47 | class _StoreProxy:
48 | def __init__(self, store_name: str, parent: '_EventStore'):
49 | self.store_name = store_name
50 | self.parent = parent
51 |
52 | def __iadd__(self, event: Dict) -> '_EventStore._StoreProxy':
53 | """Handle += operator for adding events"""
54 | self.parent._save_event(self.store_name, event)
55 | return self
56 |
57 | def all(self) -> List[Dict]:
58 | """Get all events in the store"""
59 | return self.parent._load_events(self.store_name)
60 |
61 | def after(self, threshold: Union[datetime, int]) -> List[Dict]:
62 | """Get events after a specific timestamp or datetime"""
63 | if isinstance(threshold, datetime):
64 | threshold_ts = int(threshold.timestamp() * 1000)
65 | else:
66 | threshold_ts = int(threshold)
67 |
68 | return [
69 | event for event in self.all()
70 | if event['timestamp'] > threshold_ts
71 | ]
72 |
73 | def __getitem__(self, store_name: str) -> _StoreProxy:
74 | """Get a store proxy for the given name"""
75 | return self._StoreProxy(store_name, self)
76 |
77 | Store = _EventStore()
78 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/endpoint.py:
--------------------------------------------------------------------------------
1 | # endpoint.py
2 | from flask import Flask, request, jsonify
3 | from functools import wraps
4 | from typing import Callable, Dict, Any, List
5 |
6 | from typing import List, Dict, Any, Callable, Union, TypeVar, overload
7 |
8 | T = TypeVar('T', bound=Callable[..., Any])
9 |
10 |
11 | class Endpoint:
12 | """
13 | A class to register and serve decorated functions as Flask endpoints.
14 | """
15 | _endpoints: List[Dict[str, Any]] = []
16 |
17 | @classmethod
18 | @overload
19 | def decorate(cls, http_method: str) -> Callable[[T], T]: ...
20 |
21 | @classmethod
22 | @overload
23 | def decorate(cls, http_method: str) -> Callable[[
24 | str], Callable[[T], T]]: ...
25 |
26 | @classmethod
27 | def decorate(cls, http_method: str) -> Union[Callable[[T], T], Callable[[str], Callable[[T], T]]]:
28 | """
29 | Decorator to register a function as a Flask endpoint.
30 | Supports both @get and @get('/path') patterns.
31 |
32 | Args:
33 | http_method (str): HTTP method for the endpoint (e.g., 'GET', 'POST').
34 |
35 | Returns:
36 | Callable: The decorator.
37 | """
38 | def decorator(func_or_path: Union[T, str]) -> Union[T, Callable[[T], T]]:
39 | # If decorator is used with path argument (@get('/path'))
40 | if isinstance(func_or_path, str):
41 | def inner_decorator(func: T) -> T:
42 | endpoint_info = {
43 | 'func': func,
44 | 'method': http_method.upper(),
45 | 'endpoint': func_or_path if func_or_path.startswith('/') else f'/{func_or_path}',
46 | 'registered':False,
47 | }
48 | cls._endpoints.append(endpoint_info)
49 | return func
50 | return inner_decorator
51 |
52 | # If decorator is used without path argument (@get)
53 | func = func_or_path
54 | endpoint_info = {
55 | 'func': func,
56 | 'method': http_method.upper(),
57 | 'endpoint': f"/{func.__name__}".replace('_', '/'),
58 | 'registered': False,
59 |
60 | }
61 |
62 | if endpoint_info['endpoint'] == '/root':
63 | endpoint_info['endpoint'] = '/'
64 | cls._endpoints.append(endpoint_info)
65 | return func
66 |
67 | return decorator
68 |
69 | @classmethod
70 | def register_endpoints(cls, app: Flask) -> None:
71 | """
72 | Register all collected endpoints with a Flask application.
73 |
74 | Args:
75 | app: Flask application instance
76 | """
77 | for endpoint in cls._endpoints:
78 | if endpoint['registered']: continue
79 | app.add_url_rule(
80 | endpoint['endpoint'], # URL rule
81 | view_func=endpoint['func'], # View function
82 | methods=[endpoint['method']] # HTTP methods
83 | )
84 |
85 | endpoint['registered'] = True
86 |
87 | get = Endpoint.decorate('GET')
88 | post = Endpoint.decorate('POST')
89 | endpoint = get
--------------------------------------------------------------------------------
/doc/quickstart/states.md:
--------------------------------------------------------------------------------
1 | # Managing State in Cogni
2 |
3 | State management is crucial for maintaining context and persistence in your agent applications.
4 |
5 | ## Basic State Usage
6 |
7 | ```python
8 | from cogni import State
9 |
10 | # Set state
11 | State['my_state'] = {'key': 'value'}
12 |
13 | # Get state
14 | value = State['my_state'].key
15 |
16 | # Check if state exists
17 | if 'my_state' in State:
18 | print("State exists!")
19 |
20 | # Delete state
21 | del State['my_state']
22 | ```
23 |
24 | ## State Types
25 |
26 | ### Dictionary State
27 | ```python
28 | # Attribute-style access
29 | State['config'].debug = True
30 | State['config'].api_key = "sk-..."
31 |
32 | # Dictionary-style access
33 | State['config']['timeout'] = 30
34 | ```
35 |
36 | ### List State
37 | ```python
38 | # Initialize list
39 | State['history'] = []
40 |
41 | # Append items
42 | State['history'].append("event")
43 |
44 | # Access items
45 | latest = State['history'][-1]
46 | ```
47 |
48 | ## Persistence
49 |
50 | State is automatically persisted to disk:
51 |
52 | ```python
53 | # State is saved in .states directory
54 | State['user_preferences'] = {
55 | 'theme': 'dark',
56 | 'language': 'en'
57 | } # Automatically persisted
58 |
59 | # Load persisted state
60 | preferences = State['user_preferences']
61 | ```
62 |
63 | ## State Patterns
64 |
65 | ### Conversation History
66 | ```python
67 | @tool
68 | def save_message(msg: dict):
69 | if 'conversation' not in State:
70 | State['conversation'] = []
71 | State['conversation'].append({
72 | 'content': msg['content'],
73 | 'timestamp': time.time(),
74 | 'role': msg['role']
75 | })
76 | ```
77 |
78 | ### User Preferences
79 | ```python
80 | @tool
81 | def get_user_settings(user_id: str) -> dict:
82 | if 'users' not in State:
83 | State['users'] = {}
84 | if user_id not in State['users']:
85 | State['users'][user_id] = {
86 | 'language': 'en',
87 | 'timezone': 'UTC'
88 | }
89 | return State['users'][user_id]
90 | ```
91 |
92 | ### Caching
93 | ```python
94 | @tool
95 | def cached_api_call(url: str) -> dict:
96 | if 'api_cache' not in State:
97 | State['api_cache'] = {}
98 |
99 | if url in State['api_cache']:
100 | if time.time() - State['api_cache'][url]['timestamp'] < 3600:
101 | return State['api_cache'][url]['data']
102 |
103 | data = requests.get(url).json()
104 | State['api_cache'][url] = {
105 | 'data': data,
106 | 'timestamp': time.time()
107 | }
108 | return data
109 | ```
110 |
111 | ## Best Practices
112 |
113 | 1. State Organization
114 | - Use clear naming conventions
115 | - Group related data
116 | - Document state structure
117 |
118 | 2. Error Handling
119 | - Check for existence
120 | - Provide defaults
121 | - Handle race conditions
122 |
123 | 3. Performance
124 | - Cache frequently accessed data
125 | - Clean up old state
126 | - Use appropriate data structures
127 |
128 | ## Next Steps
129 |
130 | - Create [HTTP endpoints](endpoints.md)
131 | - Build a [swarm of agents](first_swarm.md)
132 | - Implement monitoring
133 |
--------------------------------------------------------------------------------
/agents/ChangeLogAgent/agent/prompts/ChangeLogAgent.conv:
--------------------------------------------------------------------------------
1 | system: You are ChangeLogAgent, an AGI agent that analyzes GitHub repositories and provides clear summaries of recent commits.
2 | You can interact with other specialized agents to get more information.
3 |
4 | ## **Tools**
5 | Your message to the agent
6 | Your message to human
7 | Write a Discord post
8 |
9 | ## **Output format**
10 | Use to break down your analysis process.
11 |
12 | /!\ Only ONE agent interaction per message!
13 |
14 | ## **Workflow**
15 |
16 | You're given a Github repo, you orchestrate agents to post changelogs to Discord, every time a commit is made.
17 |
18 | ## **Agents**
19 | The agents you can talk to:
20 |
21 | - **Gitor**: Inits the git repo
22 | - **ChangeDescriptor**: Give you a summary of the stuff implemented or fixed on the last commit
23 | - **WebHookSchedueler**: Ask her to prompt you when a commit is done
24 |
25 | __-__
26 |
27 | user: Can you analyze the repo "https://github.com/openai/whisper" ?
28 |
29 | __-__
30 |
31 | A:
32 | 1. First, I need to initialize the repo connection
33 | 2. I'll talk to Gitor to set this up
34 |
35 |
36 | Hi Gitor :),
37 |
38 | I'm ChangeLogAgent.
39 | Please initialize connection to https://github.com/openai/whisper and prepare for monitoring.
40 |
41 | Thanks a lot :)
42 |
43 | __-__
44 |
45 | system: Repository initialized successfully, lol.
46 | Ready for monitoring ;).
47 |
48 | __-__
49 |
50 | A:
51 | 1. Now that the repo is initialized, I should get the latest commit details
52 | 2. ChangeDescriptor can help with this
53 |
54 |
55 | Hi ChangeDescriptor!
56 |
57 | Could you analyze the latest commit from the Whisper repo and give me a clear summary of the changes?
58 |
59 | Thanks!
60 |
61 | __-__
62 |
63 | system: Latest commit adds support for Apple Silicon devices and fixes memory leaks in the transcription pipeline. Changes affect core processing modules.
64 |
65 | __-__
66 |
67 | A:
68 | 1. I have the information about the latest commit
69 | 2. I should set up monitoring for future commits
70 | 3. Then post the current changes to Discord
71 |
72 |
73 | 🔄 **Whisper Repo Update**
74 |
75 | Latest changes:
76 | • Added support for Apple Silicon devices
77 | • Fixed memory leaks in transcription pipeline
78 | • Core processing modules updated
79 |
80 | Stay tuned for more updates! 🚀
81 |
82 | __-__
83 |
84 | system:The post has been sent, lol.
85 |
86 | __-__
87 |
88 | A:
89 | 1. Now I need to set up continuous monitoring
90 | 2. WebHookScheduler can help with this
91 |
92 |
93 | Hi WebHookScheduler!
94 |
95 | Please set up monitoring for the Whisper repo and notify me when new commits are detected.
96 |
97 | Thanks!
98 |
99 | __-__
100 |
101 | system:done
102 |
103 | __-__
104 |
105 | user: Sorry to interupt, we'll work on another repo now
106 |
107 | __-__
108 |
109 | A:- I should wait for more instructions
110 | - I should reply casually
111 |
112 | OK, waiting for your instructions, boss :)
--------------------------------------------------------------------------------
/doc/quickstart/first_swarm.md:
--------------------------------------------------------------------------------
1 | # Creating Your First Swarm: DiscordAgent
2 |
3 | A swarm consists of multiple specialized agents working together. We'll create a Discord bot using multiple coordinated agents.
4 |
5 | ## Project Structure
6 |
7 | ```
8 | agents/
9 | └── DiscordAgent/
10 | ├── agents/
11 | │ ├── message_handler.py # Processes incoming messages
12 | │ ├── command_parser.py # Parses commands
13 | │ └── response_formatter.py # Formats responses
14 | ├── prompts/
15 | │ ├── message_handler.conv
16 | │ ├── command_parser.conv
17 | │ └── response_formatter.conv
18 | └── tools/
19 | └── discord_tools.py
20 | ```
21 |
22 | ## 1. Create the Agents
23 |
24 | ### Message Handler
25 | ```python
26 | from cogni import Agent
27 |
28 | Agent('MessageHandler', 'prompt|gpt4|message_handler_loop')
29 | ```
30 |
31 | ### Command Parser
32 | ```python
33 | from cogni import Agent
34 |
35 | Agent('CommandParser', 'prompt|gpt3|command_parser_loop')
36 | ```
37 |
38 | ### Response Formatter
39 | ```python
40 | from cogni import Agent
41 |
42 | Agent('ResponseFormatter', 'prompt|gpt3|response_formatter_loop')
43 | ```
44 |
45 | ## 2. Define Communication Flow
46 |
47 | ```python
48 | from cogni import tool, Agent
49 |
50 | @tool
51 | def process_message(message: str) -> str:
52 | # 1. Message Handler processes raw input
53 | context = Agent['MessageHandler'](message)
54 |
55 | # 2. Command Parser extracts intent
56 | command = Agent['CommandParser'](context)
57 |
58 | # 3. Response Formatter creates reply
59 | response = Agent['ResponseFormatter'](command)
60 |
61 | return response
62 | ```
63 |
64 | ## 3. Implement Discord Integration
65 |
66 | ```python
67 | import discord
68 | from cogni import Tool
69 |
70 | class DiscordBot(discord.Client):
71 | async def on_message(self, message):
72 | if message.author == self.user:
73 | return
74 |
75 | response = Tool['process_message'](message.content)
76 | await message.channel.send(response)
77 | ```
78 |
79 | ## 4. State Management
80 |
81 | ```python
82 | from cogni import State
83 |
84 | # Store conversation history
85 | @tool
86 | def save_conversation(msg: dict):
87 | if 'conversations' not in State:
88 | State['conversations'] = []
89 | State['conversations'].append(msg)
90 |
91 | # Track user preferences
92 | @tool
93 | def get_user_preferences(user_id: str) -> dict:
94 | return State['preferences'].get(user_id, {})
95 | ```
96 |
97 | ## 5. Running the Swarm
98 |
99 | ```python
100 | # Initialize all agents
101 | Agent['MessageHandler']
102 | Agent['CommandParser']
103 | Agent['ResponseFormatter']
104 |
105 | # Start Discord bot
106 | client = DiscordBot()
107 | client.run('your-token-here')
108 | ```
109 |
110 | ## Benefits of Swarm Architecture
111 |
112 | 1. Specialization
113 | - Each agent has a specific role
114 | - Optimized for different tasks
115 | - Can use different models
116 |
117 | 2. Scalability
118 | - Easy to add new agents
119 | - Horizontal scaling
120 | - Independent updates
121 |
122 | 3. Reliability
123 | - Fault isolation
124 | - Easy to debug
125 | - Simple to test
126 |
127 | ## Next Steps
128 |
129 | - Add [HTTP endpoints](endpoints.md)
130 | - Implement [state persistence](states.md)
131 | - Add monitoring and logging
132 |
--------------------------------------------------------------------------------
/Agentix/bricks/tools/State.md:
--------------------------------------------------------------------------------
1 | # State Management in Agentix
2 |
3 | ## State Architecture
4 |
5 | - **_State**: Manages state entries, loading, saving, callbacks.
6 | - **StateDict**: Dict wrapper, attribute access, persistence.
7 | - **StateList**: List wrapper, persistence.
8 |
9 | ## Projects Handling
10 |
11 | Projects are stored as:
12 |
13 | ```json
14 | {
15 | "projects": {
16 | "current_project": "ProjectAlpha",
17 | "projects": {
18 | "ProjectAlpha": {
19 | "base_dir": "/path/to/project",
20 | "files_to_add": ["src/main.py", "src/utils.py"],
21 | "files_to_drop": ["src/old_module.py"],
22 | "aider_session": {
23 | "omni": "ProjectAlpha_omni_session"
24 | }
25 | }
26 | }
27 | },
28 | "current_task": {
29 | "description": "Implement authentication",
30 | "acceptance_criteria": ["User can register", "User can login"]
31 | },
32 | "discord": {
33 | "message_map": {},
34 | "callbacks": {
35 | "general": "general_channel_handler"
36 | }
37 | }
38 | }
39 | ```
40 |
41 | - **Pointers**: `current_project`, `current_task` reference specific states.
42 |
43 | ## Using State
44 |
45 | ### Setting State
46 |
47 | ```python
48 | from agentix import State
49 |
50 | State['new_state'] = {'key1': 'value1'}
51 | State['current_project'].description = "Updated Project Description"
52 | ```
53 |
54 | ### Getting State
55 |
56 | ```python
57 | "NonexistentState" in State # > False
58 |
59 | current_project = State['current_project'].to_dict()
60 | base_dir = State['projects']['ProjectAlpha'].base_dir
61 | ```
62 |
63 | ### Deleting State
64 |
65 | ```python
66 | del State['obsolete_state']
67 | ```
68 |
69 | ## State Persistence
70 |
71 | - Auto-saved to `./.states/*.json`.
72 | - Callbacks triggered on changes.
73 |
74 | ## Accessing State as JSON
75 |
76 | ```python
77 | import json
78 |
79 | state_json = json.dumps(State['projects'].to_dict(), indent=2)
80 | print(state_json)
81 | ```
82 |
83 | ## Example Usage
84 |
85 | ### Initialize Project
86 |
87 | ```python
88 | from agentix import State
89 |
90 | State['projects'] = {
91 | "current_project": "ProjectAlpha",
92 | "projects": {}
93 | }
94 |
95 | State['projects']['projects']['ProjectAlpha'] = {
96 | "base_dir": "/path/to/project",
97 | "files_to_add": ["src/main.py", "src/utils.py"],
98 | "files_to_drop": ["src/old_module.py"],
99 | "aider_session": {
100 | "omni": "ProjectAlpha_omni_session"
101 | }
102 | }
103 | ```
104 |
105 | ### Manage Tasks
106 |
107 | ```python
108 | State['current_task'] = {
109 | "description": "Implement authentication",
110 | "acceptance_criteria": ["User can register", "User can login"]
111 | }
112 |
113 | State['current_task'].status = "In Progress"
114 | ```
115 |
116 | ### Register Callbacks
117 |
118 | ```python
119 | def on_project_change(state_id, state):
120 | print(f"Project {state_id} updated.")
121 |
122 | State.onChange(on_project_change)
123 | ```
124 |
125 | ## Best Practices
126 |
127 | - **Consistent Naming**: Clear, consistent state names.
128 | - **Avoid Deep Nesting**: Keep state structure flat.
129 | - **Use Callbacks Wisely**: Prevent complex dependencies.
130 | - **Regular Backups**: Protect against data loss.
131 | - **Validation**: Ensure data integrity.
132 |
133 | ## Conclusion
134 |
135 | `State` efficiently manages application state. Follow strategies and best practices for effective project and task handling.
136 |
--------------------------------------------------------------------------------
/doc/quickstart/prompting.md:
--------------------------------------------------------------------------------
1 | # Conversation and Message Guide
2 |
3 | The `Conversation` and `Message` classes are core components for handling dialogue in Cogni.
4 |
5 | ## Note about Few Shot Prompting
6 |
7 | Few Shot Prompting is great, and you should be using it.
8 |
9 | For all of this doc, I'll call "**Prompt**" the base conversation of an agent; a short conversation starting with a system prompt
10 |
11 | ## File
12 |
13 | By convention, the prompt of an agent will be in a file with the same name as the agent with `.conv` extension.
14 |
15 | So, for the example agent `ShellAgent`
16 | ```
17 | agents/
18 | └─ ShellAgent/
19 | ├─ agents/
20 | │ └─ ShellAgent.py
21 | ├─ middlewares/
22 | │ └─ shellagent_loop.py
23 | ├─ prompts/
24 | │ └─ ShellAgent.conv # 👈 IT'S HERE
25 | └─ tools/
26 | └─ shellagent_tools.py
27 | ```
28 |
29 | ## File Format
30 |
31 | A message is formated `{role}:{content}` and message are separated with `__-__`
32 |
33 | ```
34 | system: Your are an agent blabla
35 |
36 | __-__
37 |
38 | user: can you to stuff
39 |
40 | __-__
41 |
42 | assistant: sure :)
43 | ```
44 |
45 | ## Message Class
46 |
47 | A `Message` represents a single message in a conversation with:
48 |
49 | - A `role` (e.g., 'user', 'system', 'assistant')
50 | - `content` (the actual text)
51 |
52 | ```python
53 | from cogni import Message
54 |
55 | # Create a message
56 | msg = Message('user', 'What is the current directory?')
57 |
58 | # Access properties
59 | print(msg.role) # 'user'
60 | print(msg.content) # 'What is the current directory?'
61 |
62 | # Convert to dict format
63 | msg_dict = msg.to_dict() # {'role': 'user', 'content': '...'}
64 | ```
65 |
66 | ## Conversation Class
67 |
68 | A `Conversation` manages a sequence of messages with features for:
69 |
70 | 1. Creation from strings or files:
71 | ```python
72 | from cogni import Conversation
73 |
74 | # From a string
75 | conv_str = """
76 | system:You are a helpful assistant.
77 | __-__
78 |
79 | user:Hi!
80 | __-__
81 | assistant:Hello! How can I help you?
82 | """
83 | conv = Conversation.from_str(conv_str)
84 | ```
85 |
86 | 2. Adding messages:
87 | ```python
88 | # Add a single message
89 | conv = conv + Message('user', 'What time is it?')
90 |
91 | # Combine conversations
92 | conv2 = Conversation.from_str("user:Another question")
93 | combined = conv + conv2
94 | ```
95 |
96 | 3. Accessing messages:
97 | ```python
98 | # Get specific message
99 | first_msg = conv[0]
100 | last_msg = conv[-1]
101 |
102 | # Get a slice
103 | recent = conv[-3:] # Last 3 messages
104 | ```
105 |
106 | ## Flags
107 |
108 | Conversations can have flags that control their behavior:
109 |
110 | ```python
111 | conv = Conversation.from_str("user: hello")
112 |
113 | # Check flags
114 | print(conv.llm) # None - LLM model to use
115 | print(conv.should_infer) # False - Whether to run inference
116 | print(conv.hops) # 0 - Number of inference steps
117 |
118 | # Set flags
119 | conv.llm = "gpt-4"
120 | conv.should_infer = True
121 | ```
122 |
123 | ## Templating
124 |
125 | Messages can be parsed using custom parsers to replace placeholders or apply formatting:
126 |
127 | ```python
128 | from cogni import Message, parser
129 |
130 | # Create a parser for tags
131 | var_parser = parser("", "")
132 |
133 | msg = Message("user", "My name is Alice")
134 | msg.parse(var_parser) # Will process the tags
135 |
136 | # Create custom parsers for any tags
137 | custom_parser = parser("{{", "}}")
138 | msg.parse(custom_parser) # Will process {{tags}}
139 | ```
140 |
141 | The parser function helps create reusable parsers for different tag formats. Common uses include:
142 |
143 | - Replacing variables in prompts
144 | - Formatting code blocks
145 | - Processing custom markup in messages
146 |
--------------------------------------------------------------------------------
/Agentix/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 | .aider*
162 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110 | .pdm.toml
111 | .pdm-python
112 | .pdm-build/
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 | .aider*
164 |
--------------------------------------------------------------------------------
/Agentix/bricks/tools/AiderFacade.py:
--------------------------------------------------------------------------------
1 | from agentix import tool, Tool
2 | from typing import Optional, List, Dict, Union
3 |
4 |
5 | @tool
6 | def validate_with_human(msg: str):
7 | return Tool['Human'](msg)
8 |
9 |
10 | @tool
11 | def AF_ensure_session(name: str, mode: str = "ask") -> str:
12 | """Ensure an Aider session exists and is in the correct mode"""
13 | ...
14 |
15 |
16 | @tool
17 | def AF_ensure_omni(project_name: str) -> str:
18 | """Ensure the omniscient Aider session exists for a project"""
19 |
20 |
21 | @tool
22 | def AF_set_context_files(files: List[str], session_name: Optional[str] = None) -> bool:
23 | """Set the files that Aider should work with"""
24 | ...
25 |
26 |
27 | @tool
28 | def AF_ask(
29 | prompt: str,
30 | session_name: Optional[str] = None,
31 | context_files: Optional[List[str]] = None
32 | ) -> str:
33 | """Ask Aider a question about the codebase"""
34 | ...
35 |
36 |
37 | @tool
38 | def AF_set_current_task(
39 | task_description: str,
40 | acceptance_criteria: Optional[List[str]] = None
41 | ) -> str:
42 | """Set the current task for Aider to work on"""
43 | ...
44 |
45 |
46 | @tool
47 | def AF_validate_step(step_description: str, files_changed: List[str]) -> bool:
48 | """Validate that a step meets its acceptance criteria"""
49 | ...
50 |
51 |
52 | @tool
53 | def AF_propose_changes(
54 | task: str,
55 | files: List[str],
56 | constraints: Optional[Dict] = None
57 | ) -> Dict[str, str]:
58 | """Get Aider to propose specific changes to files"""
59 | ...
60 |
61 |
62 | @tool
63 | def AF_review_changes(
64 | files_changed: List[str],
65 | review_criteria: Optional[List[str]] = None
66 | ) -> Dict[str, List[str]]:
67 | """Get Aider to review changes against criteria"""
68 | ...
69 |
70 |
71 | @tool
72 | def _talk_to_omni(message):
73 | # return 'oiu'
74 | project = State['projects']['current']
75 |
76 | if input(f"Current project is {project}, continue ? (Y)/n: ").strip().startswith('n'):
77 | print('bye then')
78 | return
79 | all_sessions = all_aider_sessions(project)
80 |
81 | assert 'omni' in all_sessions, f'init project with \nTool["_aider_project"]("{project}")\n'
82 | o_sess = all_sessions['omni']
83 | reply_name = random_id()
84 | assert is_aider(o_sess), f"{o_sess} doesn't seem like an aider session"
85 | run_with_tmux('/chat-mode ask', o_sess)
86 | full_msg = f"""
87 | ## **Output Format**
88 | You'll think about the problem using your thought
89 | The reply should be inside .
90 |
91 | Your base ttl is given at the start, you're allowed that many thoughts.
92 | - If you're asked for a list of files, you reply should consist in file paths and nothing else, one per line.
93 | - If you're ask for a list of file, be greedy and add, if possible, extra files where stuff looks like what we want to do for inspiration/convention
94 |
95 | Here's an example of output:
96 | ```
97 | user: Can you give me the list of relevant files to add a module ?
98 | assistant:I should find the doc for modules
99 | I should also include some already implemented module for inspiration
100 | ...
101 | User asked for a list of files, my reply should be easily parsable, I'll answer with file paths, one per line and nothing else
102 |
103 |
104 | modules/doc.md
105 | modules/SomeModule/manifest.yml
106 | ...
107 |
108 |
109 | The thing we'll work on today
110 | ## **User question**
111 |
112 | {message}
113 |
114 | ## **Settings**
115 |
116 | - Your thought TTL is 27
117 | - The `name` attr of your reply opening tag should be '{reply_name}'
118 | """
119 | with open('/tmp/zouzou', "w") as f:
120 | f.write(full_msg)
121 |
122 | run_with_tmux('wl-copy < /tmp/zouzou', 'zoubida')
123 |
124 | reply = '\n'.join(run_with_tmux('/paste', o_sess))
125 | reply_content = Tool['xml_parser']('reply')(
126 | reply).get(reply_name, {}).get('content', '')
127 |
128 | return reply_content
129 |
--------------------------------------------------------------------------------
/Agentix/bricks/system/llm/tools/llm.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | from openai import OpenAI
4 | from agentix import tool, Conversation, Tool, Log, Event, SocketManager
5 | from rich import print
6 | import requests
7 | import ollama
8 |
9 | '''
10 | import ollama
11 | zouzou = ''
12 |
13 | stream = ollama.chat(
14 | model='qwen2.5-coder:32b',
15 | messages=[{'role': 'user', 'content': f"""
16 | Can you code a chess board with movable pieces using chess unicode emojis ?
17 | It should not
18 | Comment your code thoroughly using emoji and in Jamaican patois"""},
19 | {"role":"assistant", "content":"""Should dat be in JS my brada ?
20 | """,
21 | },{"role":"user","content":"""Yes plz:)"""}
22 | ],
23 | stream=True,
24 | )
25 |
26 | for chunk in stream:
27 | print(chunk['message']['content'], end='', flush=True)
28 |
29 | '''
30 |
31 |
32 | @tool
33 | def llm(conversation: Conversation, model='gpt-4') -> Conversation:
34 | client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
35 | # Event['beforeInfer'](conversation)
36 |
37 | if '{tool' in conversation[1].content:
38 | conversation.__content = conversation[1].content
39 | if hasattr(conversation, '__content'):
40 | conversation[1].content = Tool['tpl_tool'](conversation.__content)
41 |
42 | if '{tool' in conversation[1].content:
43 | raise Exception('Wrong parsing')
44 |
45 | msg_id = ''.join([random.choice('abcdefghijklmno5896321470')
46 | for _ in range(10)])
47 | base_message_id = conversation.discord.get('base')
48 | reply_id = (
49 | requests.get(
50 | f"http://localhost:5000/reply/{base_message_id}/Thinking").json().get('message_id')
51 | )
52 | base_message_id = conversation.discord['current'] = reply_id
53 | conversation.discord['buffer'] = '.'
54 | last_call = 0
55 |
56 | def emit_msg(content=None):
57 | nonlocal last_call
58 | from time import time
59 | now = time()
60 | if content:
61 | conversation.discord['buffer'] = content
62 | if now - last_call < 1.:
63 | return False
64 | last_call = now
65 | # print(f'[red on green b]{content}')
66 |
67 | SocketManager.emit('streamMsg',
68 | payload={
69 | "conv": conversation.openai(),
70 | # getattr(conversation, "discord_message_id", "-1"),
71 | "discord_message_id": reply_id,
72 | "msg_id": msg_id,
73 | "content": conversation.discord['buffer'],
74 | },
75 | broadcast=True
76 | )
77 | return True
78 |
79 | stream = conversation._flags.get('stream', False)
80 | llm = conversation.llm
81 | if conversation.llm == 'gpt-4':
82 | llm = 'gpt-4o'
83 | # '''
84 | response = client.chat.completions.create(
85 | model='gpt-4o',
86 | messages=conversation.openai(),
87 | max_tokens=4000,
88 | temperature=.2,
89 | stream=True
90 | )
91 | '''
92 | response = ollama.chat(
93 | model='qwq',
94 | messages=conversation.openai(),
95 | stream=True,
96 | )
97 | #'''
98 |
99 | msg = ''
100 | emit_msg('___')
101 | last_edit_success = True
102 | for message in response:
103 | mm = message.choices[0].delta.content
104 | # mm = message['message']['content']
105 | if mm:
106 | msg += mm
107 | last_edit_success = emit_msg(msg)
108 | if not stream:
109 | continue
110 |
111 | while not last_edit_success:
112 | last_edit_success = emit_msg(content=msg)
113 |
114 | return msg
115 |
116 |
117 | @tool
118 | def llm_no_stream(conversation: Conversation, model='o1-mini') -> Conversation:
119 | client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
120 | response = client.chat.completions.create(
121 | model=model,
122 | messages=conversation.openai(),
123 | )
124 |
125 | return response.choices[0].message.content
126 |
--------------------------------------------------------------------------------
/cogni/wrappers/agent.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | from typing import Any, Dict, List, Optional
4 | from .instances_store import InstancesStore
5 | from .middleware import MW
6 | from ..entities import Message, Conversation
7 | from .tool import Tool
8 |
9 |
10 | class Agent(metaclass=InstancesStore):
11 | """Base class for agents that provides middleware chaining and execution."""
12 |
13 | def __init__(self, name: str, middlewares: str):
14 | """Initialize an agent with a name and middleware chain.
15 |
16 | Args:
17 | name: The agent's unique identifier
18 | middlewares: Pipe-separated list of middleware names
19 | """
20 | self.name = name
21 | self._middlewares_str = middlewares
22 | self._middlewares: Optional[List[MW]] = None
23 | Agent[name] = self
24 |
25 | @property
26 | def histo(self):
27 | try:
28 | return Conversation.from_file(self._histo_path)
29 | except: # FIXME: have typed exception & exception handling, this can hide nasty bugs
30 | return Conversation([])
31 |
32 | def append_histo(self, msg):
33 | try:
34 | last = self.histo[-1]
35 | if last.content == msg.content and last.role == msg.role:
36 | return
37 | except: # FIXME
38 | pass
39 | (self.histo + msg).to_file(self._histo_path)
40 |
41 | @property
42 | def base_prompt(self):
43 | grandparent_path = os.getcwd()
44 | pattern = grandparent_path + f"/**/prompts/{self.name}.conv"
45 |
46 | for file_path in glob.glob(pattern, recursive=True):
47 | self._histo_path = file_path.replace(
48 | '/prompts/', '/prompts/.histo/')
49 | histo_dir = os.path.dirname(self._histo_path)
50 | if not os.path.exists(histo_dir):
51 | os.makedirs(histo_dir, exist_ok=True)
52 | return Conversation.from_file(file_path)
53 |
54 | raise FileNotFoundError(f"Did not find {self.name}.conv")
55 |
56 | def _init_middlewares(self):
57 | """Initialize middleware chain from string specification."""
58 | if self._middlewares is None:
59 | self._middlewares = [
60 | MW[name.strip()]
61 | for name in self._middlewares_str.split('|')
62 | ]
63 |
64 | def __call__(self, *args: Any, **kwargs: Any) -> Any:
65 | """Execute the middleware chain with the given inputs.
66 |
67 | The first middleware receives a context dict containing:
68 | - agent: Reference to this agent
69 | - args: The input arguments
70 | - hops: Number of inference steps (initially 0)
71 | - kwargs: Any keyword arguments
72 |
73 | Each middleware receives:
74 | - ctx: The context dict
75 | - conv: The current conversation/value
76 | """
77 | self._init_middlewares()
78 |
79 | def infer(conv):
80 | ...
81 |
82 | ctx = {
83 | 'agent': self,
84 | 'args': args,
85 | 'hops': 0,
86 | 'kwargs': kwargs
87 | }
88 |
89 | conv = args
90 | for mw in self._middlewares:
91 |
92 | if isinstance(conv, tuple):
93 | conv = mw(ctx, *conv)
94 | else:
95 | conv = mw(ctx, conv)
96 | while isinstance(conv, Conversation) and conv.should_infer:
97 | llm = conv.llm
98 | if llm == 'fake':
99 | conv = conv.rehop(
100 | "Fake llm message",
101 | 'assistant'
102 | )
103 | else:
104 | conv = conv.rehop(
105 | Tool['llm'](conv),
106 | 'assistant'
107 | )
108 | conv.should_infer = False
109 | self.append_histo(conv[-1])
110 | conv.hops += 1
111 | ctx['hops'] += 1
112 | conv = mw(ctx, conv)
113 |
114 | return conv
115 |
116 | def __repr__(self) -> str:
117 | """String representation showing agent name."""
118 | return f"Agent['{self.name}']"
119 |
--------------------------------------------------------------------------------
/cogni/entities/conversation.py:
--------------------------------------------------------------------------------
1 | """
2 | Defines the Conversation class, which represents a sequence of messages between a user and a system.
3 | """
4 | import os
5 | import json
6 | from copy import deepcopy
7 | from typing import List
8 | from .message import Message
9 |
10 |
11 | SEP = '\n__-__\n'
12 |
13 |
14 | def repr_flags(**flags):
15 |
16 | effective_width = 30
17 | margin = 10
18 |
19 | border_top = f"{' ' * margin}╭{'─' * effective_width}╮\n"
20 | border_bottom = f"{' ' * margin}╰{'─' * effective_width}╯\n"
21 |
22 | formatted_flags = "\n".join(
23 | f"{' ' * margin}│ {key.ljust(8)
24 | } {str(value).ljust(effective_width - 11)} │"
25 | for key, value in flags.items()
26 | )
27 |
28 | return f"{border_top}{' ' * margin}│ 🏳️ flags:{' ' * (effective_width - 10)} │\n{formatted_flags}\n{border_bottom}"
29 |
30 |
31 | repr_flags
32 |
33 |
34 | class Conversation:
35 | """
36 | Represents a conversation, which is a sequence of messages.
37 | Conversations can be created from strings or files and manipulated programmatically.
38 | """
39 | @classmethod
40 | def from_file(cls, path: str):
41 | assert os.path.isfile(path), f"No conversation found at {path}"
42 | with open(path) as f:
43 | return cls.from_str(f.read())
44 |
45 | @classmethod
46 | def from_str(cls, conv_str: str) -> 'Conversation':
47 | msgs = [Message(role=role, content=content)
48 | for msg_str in conv_str.split(SEP)
49 | for role, content in [msg_str.strip().split(':', 1)]]
50 | return cls(msgs)
51 |
52 | def to_str(self) -> str:
53 | padded_SEP = '\n\n' + SEP + '\n\n'
54 | return f"{padded_SEP}".join(f"{msg.role}:{msg.content}" for msg in self.msgs)
55 |
56 | def to_file(self, path: str) -> None:
57 | with open(path, 'w') as f:
58 | f.write(self.to_str())
59 |
60 | def openai(self) -> List[dict]:
61 | return [msg.to_dict() for msg in self.msgs]
62 |
63 | def to_dict(self) -> List[dict]:
64 | """
65 | Converts the conversation to a list of dictionaries, each representing a message.
66 | :return: A list of dictionaries with 'role' and 'content' keys.
67 | """
68 | return [{'role': msg.role, 'content': msg.content} for msg in self.msgs]
69 |
70 | def __init__(self, msgs: List[Message]) -> None:
71 |
72 | def to_Message(msg: dict | Message) -> Message:
73 | if isinstance(msg, dict):
74 | return Message(**msg)
75 | return msg
76 |
77 | self.msgs = [to_Message(m) for m in msgs]
78 | self.llm = None
79 | self.should_infer = False
80 | self.hops = 0
81 |
82 | def __add__(self, other) -> 'Conversation':
83 | """Returns a new Conversation instance with the given message or conversation added."""
84 | if isinstance(other, Message):
85 | new_msgs = deepcopy(self.msgs) + [deepcopy(other)]
86 | elif isinstance(other, Conversation):
87 | new_msgs = deepcopy(self.msgs) + deepcopy(other.msgs)
88 | else:
89 | raise TypeError(
90 | "Operand must be an instance of Message or Conversation.")
91 | new_conv = deepcopy(self)
92 | new_conv.msgs = new_msgs
93 |
94 | return new_conv
95 |
96 | def __repr__(self):
97 |
98 | flags = repr_flags(llm=self.llm,
99 | rehop=self.should_infer,
100 | hops=self.hops)
101 |
102 | msgs = ''.join([str(m) for m in self.msgs])
103 |
104 | return flags + '\n' + msgs
105 |
106 | def __getitem__(self, key):
107 | if isinstance(key, int):
108 | # Return a single message if key is an integer
109 | return self.msgs[key]
110 | return deepcopy(self.msgs[key])
111 | elif isinstance(key, slice):
112 | # Return a new Conversation instance with a slice of messages if key is a slice
113 | new_msgs = deepcopy(self.msgs[key])
114 | new_conv = deepcopy(self)
115 | new_conv.msgs = new_msgs
116 | return new_conv
117 | return Conversation(new_msgs)
118 | else:
119 | raise TypeError(
120 | "Invalid key type. Key must be an integer or a slice.")
121 |
122 | def rehop(self, message_str=None, role='system'):
123 | new_conv = deepcopy(self)
124 |
125 | if message_str is not None:
126 | new_conv = new_conv + Message(role, message_str)
127 | new_conv.should_infer = True
128 |
129 | return new_conv
130 |
--------------------------------------------------------------------------------
/cogni/wrappers/event.py:
--------------------------------------------------------------------------------
1 | """
2 | Event system for handling asynchronous events and socket communications.
3 |
4 | This module provides a standardized way to handle events across the application,
5 | with support for WebSocket communication and thread-safe event handling.
6 |
7 | Example usage:
8 |
9 | # Register an event handler
10 | @Event.on("user_connected")
11 | def handle_user_connection(event_data: EventPayload):
12 | print(f"User {event_data.user_id} connected")
13 |
14 | # Emit an event
15 | Event.emit("user_connected", {"user_id": "123"})
16 |
17 | # Initialize the event system
18 | event_system = Event.init()
19 | """
20 |
21 | from collections import defaultdict
22 | import threading
23 | from typing import Any, Dict, Optional, Callable
24 | from pydantic import BaseModel, Field
25 |
26 |
27 | class EventPayload(BaseModel):
28 | """Standard structure for event payloads."""
29 | event_type: str = Field(..., description="Type of the event")
30 | data: Dict[str, Any] = Field(default_factory=dict, description="Event data")
31 | _type: str = Field(default="event", description="Internal event type marker")
32 | _ttl: int = Field(default=5, description="Time-to-live counter for event propagation")
33 |
34 |
35 | class Event:
36 | """
37 | Event management system with thread-safe handlers and WebSocket support.
38 |
39 | Attributes:
40 | _handlers: Dictionary mapping event types to their handler functions
41 | instance: Singleton instance of the Event system
42 | socketio: Optional SocketIO instance for WebSocket communication
43 | """
44 | _handlers: Dict[str, Callable] = {}
45 | instance: Optional['Event'] = None
46 | socketio: Any = None
47 |
48 | @classmethod
49 | def handle(cls, event_data: Dict[str, Any]) -> None:
50 | """
51 | Handle an incoming event by dispatching it to the appropriate handler.
52 |
53 | Args:
54 | event_data: Dictionary containing event information
55 | """
56 | event_type = event_data.get('eventType')
57 | handler = cls._handlers.get(event_type)
58 |
59 | if not handler:
60 | return
61 |
62 | def run_handler(handler: Callable, data: Dict[str, Any]) -> None:
63 | handler(EventPayload(**data))
64 |
65 | thread = threading.Thread(
66 | target=run_handler,
67 | args=(handler, event_data)
68 | )
69 | thread.start()
70 |
71 | @classmethod
72 | def register(cls, event_type: str) -> Callable:
73 | """
74 | Decorator to register an event handler.
75 |
76 | Args:
77 | event_type: The type of event this handler will process
78 |
79 | Returns:
80 | Decorator function for registering handlers
81 |
82 | Raises:
83 | AssertionError: If handlers are registered after initialization
84 | """
85 | assert cls.instance is None, "Register handlers before initialization"
86 |
87 | def decorator(func: Callable) -> Callable:
88 | assert not hasattr(func, '_sockethandler'), "Handler already registered"
89 | assert event_type not in cls._handlers, f"Handler exists for {event_type}"
90 |
91 | setattr(func, '_sockethandler', True)
92 | cls._handlers[event_type] = func
93 | return func
94 |
95 | return decorator
96 |
97 | # Alias for register to provide a more intuitive API
98 | on = register
99 |
100 | @classmethod
101 | def init(cls) -> 'Event':
102 | """
103 | Initialize the Event system singleton.
104 |
105 | Returns:
106 | The Event system instance
107 | """
108 | if cls.instance is None:
109 | cls.instance = cls()
110 | return cls.instance
111 |
112 | @classmethod
113 | def emit(cls, event_type: str, payload: Dict[str, Any]) -> None:
114 | """
115 | Emit an event to all registered handlers.
116 |
117 | Args:
118 | event_type: Type of event to emit
119 | payload: Event data to send
120 | """
121 | if cls.instance is None:
122 | cls.instance = cls()
123 |
124 | event_data = EventPayload(
125 | event_type=event_type,
126 | data=payload,
127 | _ttl=payload.get('_ttl', 5)
128 | )
129 |
130 | if event_data._ttl > 0:
131 | event_data._ttl -= 1
132 | cls.instance.handle(event_data.dict())
133 |
134 | if cls.instance.socketio:
135 | cls.instance.socketio.emit('event', event_data.dict())
136 |
--------------------------------------------------------------------------------
/Agentix/README.md:
--------------------------------------------------------------------------------
1 | # Agentix
2 |
3 | ## What is Agentix?
4 | Agentix is a micro-framework allowing low code/low boilerplate implementation of LLM agents.
5 |
6 | ### Yeah but, why not LangChain though ?
7 | LangChain/LangGraph allow for creating agents and orchestrating flow and communication.
8 | One key assumption of Agentix is that agentic execution flow has the same requirements and complexity as code; and therefore Agents should created, managed, and orchestrated by code.
9 | Which, as a side effect, allows for borrowing architecture from domains as web dev, where best practices are mature.
10 |
11 | ## How it works
12 | Agentix is built on the principle of "Agents as Functions," allowing for a modular and composable approach to AI agent development.
13 |
14 | ### Hide complexity
15 | Do Large Language Models have any amount of complexity ?
16 |
17 | If your answer includes "Matrix multiplication" or "Attention map caching", I would object that I don't care.
18 |
19 | LLMs are magic black boxes that take text as input and return text.
20 |
21 | For all we care, from our coder point of view, LLMs are as simple as
22 | ```python
23 | mport openai
24 | import os
25 |
26 | # Set up the OpenAI API client
27 | openai.api_key = os.environ.get("OPENAI_API_KEY")
28 |
29 | def chat_completion(prompt, model="gpt-3.5-turbo", max_tokens=150):
30 | try:
31 | # Create the API request
32 | response = openai.ChatCompletion.create(
33 | model=model,
34 | messages=[
35 | {"role": "system", "content": "You are a helpful assistant."},
36 | {"role": "user", "content": prompt}
37 | ],
38 | max_tokens=max_tokens,
39 | temperature=0.7,
40 | )
41 |
42 | # Extract and return the generated message
43 | return response.choices[0].message['content'].strip()
44 |
45 | except Exception as e:
46 | print(f"An error occurred: {e}")
47 | return None
48 |
49 | # Example usage
50 | if __name__ == "__main__":
51 | user_prompt = "What is the capital of Paris ?"
52 | result = chat_completion(user_prompt)
53 |
54 | if result:
55 | print("Assistant:", result)
56 | else:
57 | print("Failed to get a response.")
58 | ```
59 |
60 | ### Going further
61 |
62 | Our goal as coders should be that, from any given scope, all dependencies are magic black boxes with clear specifications.
63 |
64 | ### Everything is a function
65 |
66 | We'll define *Agent* as a blackbox that takes any number of inputs of any type, and return, either nothing, or anything of any type, and potentially have *side effects*.
67 |
68 | In other terms, agents can be thought as **functions**
69 |
70 | *NOTE: the term **Function** is used in the programmatic sense (i.e.: a Python function), as opposed to the stricter sense it carries in **Functional Programming***
71 |
72 | Here's a toy example of an execution flow this approach allows for:
73 | ```python
74 | from agentix import Agent
75 |
76 | for task in Agent['task_lister'](user_input):
77 | Agent['task_executor'](task)
78 | ```
79 |
80 | *NOTE: the why and how of global containers as **agentix.Agent** will be explained further down*
81 |
82 | ### Agentix's approach
83 |
84 | #### Separation of concerns
85 | In web development, it's common to have a conventional directory structure containing:
86 | - Presentation/integration (aka templates)
87 | - Application logic (say, TypeScript files)
88 | - Styling (CSS)
89 |
90 | Adopting a similar approach, we can break down our agents into:
91 | - Conversation templates
92 | - Application Logic
93 | - Tools/utilities/dependencies
94 |
95 | Because some processed will be common accross agents, we'll further break down *Application Logic* into middlewares.
96 |
97 | #### Global containers and magic imports
98 |
99 | Taking inspiration from the web framework **Nuxt**, aiming as much as possible at **Low Code**, all components of our agentic stack will be automatically imported and accessible via a set of global containers.
100 |
101 | example:
102 | ```python
103 | # Inside any file within our arboresence
104 | from agentix import func
105 |
106 | @func
107 | def add_ints(a:int, b:int) -> int:
108 | return a + b
109 | ```
110 |
111 | without any additionnal code, in any other file
112 | ```python
113 | from agentix import Func
114 |
115 | print("4 + 3=", Func['add_ints'](4, 3))
116 | ```
117 |
118 | ### Building an agent with Agentix
119 |
120 | #### Agentix CLI tool
121 |
122 | ```bash
123 | agentix create ShellGPT
124 | ```
125 |
126 | will result in this boilerplate being created
127 |
128 | ```
129 | 📁agents
130 | └─📁ShellGPT
131 | ├─📁agents
132 | │ └─📄ShellGPT.py
133 | ├─📁middlewares
134 | │ └─📄ShellGPT_loop.py
135 | ├─📁prompts
136 | │ └─📄ShellGPT.conv
137 | ├─📁tests
138 | │ └─📄test_ShellGPT.py
139 | └─📁tools
140 | ```
141 |
142 |
143 | TODO: Write this TODO
--------------------------------------------------------------------------------
/Agentix/agentix/entities/conversation.py:
--------------------------------------------------------------------------------
1 | """
2 | Defines the Conversation class, which represents a sequence of messages between a user and a system.
3 | """
4 | import os
5 | import json
6 | from copy import deepcopy
7 | from typing import List
8 | from .message import Message
9 |
10 |
11 | SEP = '\n__-__\n'
12 |
13 |
14 | class Conversation:
15 | """
16 | Represents a conversation, which is a sequence of messages.
17 | Conversations can be created from strings or files and manipulated programmatically.
18 | """
19 | @classmethod
20 | def from_file(cls, path: str):
21 | assert os.path.isfile(path), f"No conversation found at {path}"
22 | with open(path) as f:
23 | return cls.from_str(f.read())
24 |
25 | @classmethod
26 | def from_str(cls, conv_str: str) -> 'Conversation':
27 | msgs = [Message(role=role, content=content)
28 | for msg_str in conv_str.split(SEP)
29 | for role, content in [msg_str.strip().split(':', 1)]]
30 | flags = {}
31 | # IF THE First message as a role `flags` then set flag_msg ,*msgs = msgs and flags = json.loads(flag_msg.content)
32 | if msgs[0].role == 'flags':
33 | flags_msg, *msgs = msgs
34 | flags = json.loads(flags_msg.content)
35 | return cls(msgs, flags)
36 |
37 | def to_str(self) -> str:
38 | flag_msg = Message(role='flags', content=json.dumps(self._flags))
39 |
40 | return SEP.join(f"{msg.role}:{msg.content}" for msg in [flag_msg] + self.msgs)
41 |
42 | def to_file(self, path: str) -> None:
43 | with open(path, 'w') as f:
44 | f.write(self.to_str())
45 |
46 | def openai(self) -> List[dict]:
47 | return [msg.to_dict(True) for msg in self.msgs]
48 |
49 | def to_dict(self) -> List[dict]:
50 | """
51 | Converts the conversation to a list of dictionaries, each representing a message.
52 | :return: A list of dictionaries with 'role' and 'content' keys.
53 | """
54 | return [{'role': msg.role, 'content': msg.content} for msg in self.msgs]
55 |
56 | def __init__(self, msgs: List[Message], flags: dict = None) -> None:
57 |
58 | def to_Message(msg: dict | Message) -> Message:
59 | if isinstance(msg, dict):
60 | return Message(**msg)
61 | return msg
62 |
63 | msgs = [to_Message(m) for m in msgs]
64 |
65 | flags = flags or {}
66 | self._msgs = msgs
67 | self._flags = {'should_infer': False, **flags}
68 | self._llm = os.getenv('AGENTIX_MODEL') or 'gpt-4o'
69 |
70 | @property
71 | def should_infer(self) -> bool:
72 | return self._flags.get('should_infer', False)
73 |
74 | @should_infer.setter
75 | def should_infer(self, value: bool):
76 | self._flags['should_infer'] = value
77 |
78 | @property
79 | def llm(self) -> str:
80 | return self._llm
81 |
82 | @llm.setter
83 | def llm(self, value: str):
84 | self._llm = value
85 |
86 | @property
87 | def msgs(self) -> List[Message]:
88 | return self._msgs
89 |
90 | @msgs.setter
91 | def msgs(self, msgs):
92 | self._msgs = msgs
93 |
94 | def __add__(self, other) -> 'Conversation':
95 | """Returns a new Conversation instance with the given message or conversation added."""
96 | if isinstance(other, Message):
97 | new_msgs = deepcopy(self.msgs) + [deepcopy(other)]
98 | elif isinstance(other, Conversation):
99 | new_msgs = deepcopy(self.msgs) + deepcopy(other.msgs)
100 | else:
101 | raise TypeError(
102 | "Operand must be an instance of Message or Conversation.")
103 | new_conv = deepcopy(self)
104 | new_conv.msgs = new_msgs
105 | return new_conv
106 | return Conversation(new_msgs, deepcopy(self._flags))
107 |
108 | def __repr__(self):
109 | return '\n\n______________\n\n'.join(f"{m.role}:{m.content}" for m in self.msgs)
110 |
111 | def __getitem__(self, key):
112 | if isinstance(key, int):
113 | # Return a single message if key is an integer
114 | return self._msgs[key]
115 | return deepcopy(self.msgs[key])
116 | elif isinstance(key, slice):
117 | # Return a new Conversation instance with a slice of messages if key is a slice
118 | new_msgs = deepcopy(self.msgs[key])
119 | new_conv = deepcopy(self)
120 | new_conv.msgs = new_msgs
121 | return new_conv
122 | return Conversation(new_msgs)
123 | else:
124 | raise TypeError(
125 | "Invalid key type. Key must be an integer or a slice.")
126 |
127 | def rehop(self, message_str=None, role='system'):
128 | new_conv = deepcopy(self)
129 |
130 | if message_str is not None:
131 | new_conv = new_conv + Message(role, message_str)
132 | new_conv.should_infer = True
133 |
134 | return new_conv
135 |
--------------------------------------------------------------------------------
/Agentix/agentix/cli.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from rich.columns import Columns
4 | from rich.panel import Panel
5 | import fire
6 |
7 | from rich import print
8 | from rich.console import Console
9 | from rich.prompt import Prompt, Confirm
10 | from rich.table import Table
11 |
12 | from agentix import Func, Conf, ModuleInfo
13 |
14 |
15 | class AgentixCLI:
16 | def create(self):
17 | console = Console()
18 |
19 | # Display title
20 | title = "[bold blue]Agentix module creation wizard[/bold blue]"
21 | console.print(Panel(title, style="green"), justify="center")
22 | modules_dir = os.path.abspath("./AgentixAgents/modules")
23 | # Check directory
24 | if not os.path.isdir(modules_dir):
25 | console.print(
26 | "[red]You should run `agentix create` in the root dir of `GoodAssistant` project[/red]")
27 | return
28 | Conf.GA_modules_dir = modules_dir
29 |
30 | existing_modules = Func['get_all_GA_modules']()
31 |
32 | # Gather information
33 | name = None
34 | while name is None:
35 | name_input = Prompt.ask("[cyan]Module name")
36 | if name_input not in existing_modules:
37 | name = name_input
38 | break
39 | console.print(
40 | f"[green]A module names [red]{name_input}[/red][green] already exists. Please pick another name")
41 |
42 | author = Prompt.ask("[cyan]Author")
43 | description = Prompt.ask("[cyan]Description")
44 | version = Prompt.ask("[cyan]version", default='0.0.1')
45 | # Ask for boilerplates
46 | create_agent = Confirm.ask(
47 | "[cyan]Create boilerplate for agent?[/cyan]",
48 | default=False
49 | )
50 |
51 | create_endpoints = Confirm.ask(
52 | "[cyan]Create boilerplate for endpoints?[/cyan]",
53 | default=False
54 | )
55 |
56 | create_widget = Confirm.ask(
57 | "[cyan]Create boilerplate for widget?[/cyan]",
58 | default=False
59 | )
60 |
61 | # Check if at least one is selected
62 | if not any([create_agent, create_endpoints, create_widget]):
63 | console.print(
64 | "[red]At least one of `agent`, `endpoint` or `widget` should exist[/red]")
65 | return
66 |
67 | # Widget framework choice
68 | if create_widget:
69 | console.print(
70 | "\n[yellow]For the following question, `(N)uxt.js/(R)eact.js` will be added later[/yellow]")
71 | widget_type = Prompt.ask(
72 | "[cyan]Widget boilerplate?[/cyan]",
73 | choices=["V"],
74 | default="V"
75 | )
76 | else:
77 | widget_type = None
78 |
79 | # Create summary table
80 | table = Table(title="Summary of choices")
81 | table.add_column("Setting", style="cyan")
82 | table.add_column("Value", style="green")
83 |
84 | table.add_row("Module name", name)
85 | table.add_row("Author", author)
86 | table.add_row("Create agent", "✅" if create_agent else "❌")
87 | table.add_row("Create endpoints", "✅" if create_endpoints else "❌")
88 | table.add_row("Create widget", "✅" if create_widget else "❌")
89 | if widget_type:
90 | table.add_row("Widget type", f"Vanilla JS")
91 |
92 | console.print("\n")
93 | console.print(table)
94 |
95 | # Final confirmation
96 | confirm = Confirm.ask(
97 | "\n[yellow]Do you confirm these choices?[/yellow]")
98 | if not confirm:
99 | console.print("[red]Operation cancelled[/red]")
100 | return
101 |
102 | console.print("[green]Proceeding with module creation...[/green]")
103 |
104 | module_info = ModuleInfo(
105 | name=name,
106 | author=author,
107 | version=version,
108 | description=description,
109 | agent=create_agent,
110 | endpoints=create_endpoints,
111 | widget=create_widget,
112 | widget_type=widget_type,
113 | )
114 |
115 | Func['init_module'](module_info)
116 | print("done")
117 |
118 | def credate(self):
119 | """Creates a new agent structure."""
120 | print('ca')
121 | print(os.getcwd())
122 | quit()
123 | base_path = os.path.join('./agents', name)
124 | directories = ['agents', 'middlewares', 'tools', 'prompts', 'tests',]
125 | for directory in directories:
126 | os.makedirs(os.path.join(base_path, directory), exist_ok=True)
127 | # Create agent, middleware, and test files
128 | agent_file_path = os.path.join(base_path, 'agents', f'{name}.py')
129 | middleware_file_path = os.path.join(
130 | base_path, 'middlewares', f'{name}_loop.py')
131 | test_file_path = os.path.join(base_path, 'tests', f'test_{name}.py')
132 | prompt_file_path = os.path.join(base_path, 'prompts', f'{name}.conv')
133 |
134 | for fp, content in zip(
135 | [agent_file_path,
136 | middleware_file_path,
137 | test_file_path,
138 | prompt_file_path],
139 | [
140 | f'''from agentix import Agent
141 | Agent('{name}', 'prompt_histo|gpt4|{name}_loop')''',
142 |
143 | f'''from agentix import mw, Tool
144 |
145 | @mw
146 | def {name}_loop(ctx, conv):
147 | return conv''',
148 | '''''',
149 | f'''system:You are {name}, an AGI
150 | __-__
151 | user:hi
152 | __-__
153 | assistant:How can I help you ma bro ?'''
154 | ]):
155 | if os.path.isfile(fp):
156 | continue
157 | with open(fp, 'w') as f:
158 | f.write(content)
159 |
160 | print(
161 | f"[red b]Agentix:[/]Agent structure for [green b]{name}[/] created successfully.")
162 |
163 |
164 | def main():
165 | return fire.Fire(AgentixCLI)
166 |
--------------------------------------------------------------------------------
/Agentix/bricks/tools/HumanFacade.py:
--------------------------------------------------------------------------------
1 | from agentix import tool, State
2 | from typing import Optional, List, Dict, Callable, Any, Union
3 | import requests
4 | import json
5 | import time
6 |
7 | # Base URL for Discord bot API
8 | DISCORD_API_BASE = "http://localhost:5000"
9 |
10 |
11 | @tool
12 | def HF_send_message(
13 | message: str,
14 | channel: str = "general",
15 | msg_type: str = "info",
16 | ) -> str:
17 | """
18 | Send a message to a specific channel
19 | Returns: Message ID that can be used for editing
20 | """
21 | try:
22 | response = requests.post(
23 | f"{DISCORD_API_BASE}/send_message_by_name/{channel}",
24 | json={"content": message}
25 | )
26 | response.raise_for_status()
27 | return response.json().get('message_id', '')
28 | except Exception as e:
29 | print(f"Error sending message: {e}")
30 | return ''
31 |
32 |
33 | @tool
34 | def HF_ask_human(message: str) -> str:
35 | from agentix import Stuff, State
36 | """
37 | Ask the human a question via DM and wait for their response
38 | Returns: The human's response as a string
39 | """
40 | print("\n[HF_ask_human] Starting to ask human:", message)
41 |
42 | # Send the question
43 |
44 | # Set up response handling
45 | # if not 'ask' in State:
46 | State['ask'] = {"asking": False, 'reply': None}
47 | assert not State['ask']['asking']
48 | State['ask']['asking'] = True
49 | msg_id = HF_send_dm(message)
50 |
51 | print(f"[HF_ask_human] Sent DM, got message ID: {msg_id}")
52 |
53 | if not msg_id:
54 | print("[HF_ask_human] Failed to send DM")
55 | return ''
56 | print('waiting for human to reply.', end='')
57 | while not State['ask']['reply']:
58 | print('.', end='')
59 | time.sleep(0.5)
60 | State.reset_cache()
61 |
62 | reply = State['ask']['reply']
63 | State['ask']['asking'] = False
64 | State['ask']['reply'] = None
65 | return reply
66 |
67 | # Store the question details
68 | State['ask_human']['pending_questions'][msg_id] = True
69 | print(f"[HF_ask_human] Added pending question {msg_id}")
70 |
71 | # Register DM callback if not already done
72 | def dm_callback(content: str, response_id: str):
73 | print(
74 | f"[HF_ask_human] Got DM callback - content: {content}, response_id: {response_id}")
75 | # Check if this is a response to a pending question
76 | for question_id in State['ask_human']['pending_questions']:
77 | if State['ask_human']['pending_questions'][question_id]:
78 | print(f"[HF_ask_human] Found matching question {question_id}")
79 | # Store the response
80 | State['ask_human']['responses'][question_id] = content
81 | # Mark question as answered
82 | State['ask_human']['pending_questions'][question_id] = False
83 | print(
84 | "[HF_ask_human] Stored response and marked question as answered")
85 | break
86 |
87 | print("[HF_ask_human] Registering DM callback")
88 | HF_on_dm(dm_callback)
89 |
90 | # Wait for response with timeout
91 | max_wait = 3000 # 5 minutes timeout
92 | wait_time = 0
93 | print("[HF_ask_human] Starting to wait for response")
94 |
95 | while wait_time < max_wait:
96 | if msg_id in State['ask_human']['responses']:
97 | response = State['ask_human']['responses'][msg_id]
98 | print(f"[HF_ask_human] Got response: {response}")
99 | # Cleanup
100 | del State['ask_human']['pending_questions'][msg_id]
101 | del State['ask_human']['responses'][msg_id]
102 | print("[HF_ask_human] Cleaned up state")
103 | return response
104 | time.sleep(1)
105 | wait_time += 1
106 | if wait_time % 10 == 0: # Print every 10 seconds
107 | print(f"[HF_ask_human] Still waiting... {wait_time}/{max_wait}")
108 |
109 | print("[HF_ask_human] Timed out waiting for response")
110 | return 'Human timed out'
111 |
112 |
113 | @tool
114 | def HF_edit_message(
115 | message_id: str,
116 | new_content: str,
117 | ) -> bool:
118 | """
119 | Edit an existing message
120 | Returns: True if successful, False otherwise
121 | """
122 | try:
123 | response = requests.post(
124 | f"{DISCORD_API_BASE}/edit/{message_id}",
125 | json={"content": new_content}
126 | )
127 | response.raise_for_status()
128 | print(response, f"{response=}")
129 | input('ca')
130 | return True
131 | except Exception as e:
132 | print(f"Error editing message: {e}")
133 | return False
134 |
135 |
136 | @tool
137 | def HF_send_dm(
138 | message: str,
139 | ) -> str:
140 | """
141 | Send a direct message to the configured user
142 | Returns: Message ID that can be used for editing
143 | """
144 | try:
145 | response = requests.post(
146 | f"{DISCORD_API_BASE}/send_dm",
147 | json={"content": message}
148 | )
149 | response.raise_for_status()
150 | result = response.json()
151 | if result.get('status') != 'success':
152 | print(
153 | f"Error sending DM: {result.get('message', 'Unknown error')}")
154 | return ''
155 | return result.get('message_id', '')
156 | except Exception as e:
157 | print(f"Error sending DM: {e}")
158 | return ''
159 |
160 |
161 | @tool
162 | def HF_on_message(channel: str, callback: Callable[[str, str, str], None]) -> None:
163 | """
164 | Register a callback for messages in a specific channel
165 | Callback receives: channel_name, message_content, message_id
166 | """
167 | if 'discord' not in State:
168 | State['discord'] = {
169 | 'callbacks': {},
170 | 'message_map': {}
171 | }
172 | State['discord']['callbacks'][channel] = callback
173 |
174 |
175 | @tool
176 | def HF_on_dm(callback: Callable[[str, str], None]) -> None:
177 | """
178 | Register a callback for direct messages
179 | Callback receives: message_content, message_id
180 | """
181 | HF_on_message('DM', lambda _, content, msg_id: callback(content, msg_id))
182 |
--------------------------------------------------------------------------------
/agents/Gitor/agent/prompts/Gitor.conv:
--------------------------------------------------------------------------------
1 | system: You are Gitor, a specialized agent that handles Git repository initialization and setup.
2 | You use shell commands through tmux to manage git operations.
3 |
4 | To use the shell, use this syntax:
5 | command
6 | The git diff
7 |
8 | Use to think step by step before replying.
9 |
10 | /!\ Never more than ONE tool use per output!
11 |
12 | Your session_name will always be "gitor"
13 |
14 | Before cloning a repo, always insure you're in /root/workspace !!
15 |
16 | # **IMPORTANT**
17 | If user tells you that you're in DEV mode, you shall execute only one tool, then on the next interaction, include !!NOTOOL
18 |
19 | __-__
20 |
21 | system: Here's some env vars.
22 | This message is updated and dynamic
23 |
24 | ## **Last reported Repo**
25 | {tool:gitor_current_repo}
26 |
27 | ## **Last reported commit**
28 | {tool:gitor_last_reported_commit}
29 |
30 |
31 | __-__
32 |
33 | user: We'll work on https://github.com/openai/whisper ?
34 |
35 | __-__
36 |
37 | assistant:
38 | - The `currentState` name is "IllustriousBleach". I note that in case it changes, then I'll know it has been updated, lol
39 | - First, I need to check if we're in a clean directory. I should ALWAYS do pwd before doing stuff
40 | - Then I'll clone the repository
41 | - I should use git clone with the HTTPS URL
42 |
43 | pwd
44 |
45 | __-__
46 |
47 | system: /root/workspace
48 |
49 | __-__
50 |
51 | assistant:
52 | - I'll check if the repo exists
53 |
54 | ls whisper
55 |
56 | __-__
57 |
58 | system: ls: cannot access '/root/workspace/whisper': No such file or directory
59 |
60 | __-__
61 |
62 | assistant:
63 | - Doens't exisitis. Oh nooo
64 | - I'll clone the repository xD
65 |
66 | git clone https://github.com/openai/whisper.git
67 |
68 | __-__
69 |
70 | system: Cloning into 'whisper'...
71 | remote: Enumerating objects: 5723, done.
72 | remote: Total 5723 (delta 0), reused 0 (delta 0), pack-reused 5723
73 | Receiving objects: 100% (5723/5723), 27.15 MiB | 8.52 MiB/s, done.
74 | Resolving deltas: 100% (3831/3831), done.
75 |
76 | __-__
77 |
78 | assistant:
79 | - Repository has been cloned successfully, yay \o/
80 | - I should move into the directory lol
81 | - Verify the git setup and stuff
82 |
83 | cd whisper
84 |
85 | __-__
86 |
87 | system:
88 |
89 | __-__
90 |
91 | assistant:
92 | - We're in the right directory, yay \o/
93 | - Let's verify the remote is properly set up, lol
94 |
95 | git remote -v
96 |
97 | __-__
98 |
99 | system: origin https://github.com/openai/whisper.git (fetch)
100 | origin https://github.com/openai/whisper.git (push)
101 |
102 | __-__
103 |
104 |
105 | assistant:
106 | - OK, so now I should report the changes and stuff. It's by duty after all xD
107 | - The `currentState` name is "BadLemur". I note that in case it changes, then I'll know it has been updated, lol
108 | - last commit hash is 0f0eab6355657af74e6c20ab0218528d8221d0b0
109 | - I should always git pull before anything. I SHOULD ALWAYS DO THAT
110 | - I should use git log to see the latest commit
111 | - I'll limit it to 1 commit
112 | - Show full details
113 |
114 | git pull
115 |
116 | __-__
117 |
118 | system: already up to date
119 |
120 | __-__
121 | assistant:
122 | - The repo is up to date, yay \o/
123 | - I should use git log to see the latest commit
124 | - I'll limit it to 1 commit
125 | - Show full details
126 |
127 | git --no-pager diff 0f0eab6355657af74e6c20ab0218528d8221d0b0
128 |
129 | __-__
130 |
131 | system: diff --git a/whisper/__init__.py b/whisper/__init__.py
132 | index 9f7a875..c1b6a6a 100644
133 | --- a/whisper/__init__.py
134 | +++ b/whisper/__init__.py
135 | @@ -1,3 +1,4 @@
136 | +# Version 1.0.0
137 | from .audio import load_audio, log_mel_spectrogram, pad_or_trim
138 | from .decoding import DecodingOptions, DecodingResult, decode, detect_language
139 | from .model import Whisper, ModelDimensions
140 |
141 | __-__
142 |
143 | assistant:
144 | - Mmmh, lol. Let me get the commit message and author
145 | - Use git log for this information
146 |
147 | git --no-pager log -1
148 |
149 | __-__
150 |
151 | system: commit 8cf36f3508c9ff9aa7e5a5b0cc1662a79966eb14
152 | Author: Liam Dugan
153 | Date: Wed May 24 16:23:43 2023 -0400
154 |
155 | Add version number
156 |
157 | __-__
158 |
159 | assistant:
160 | - I should report the change, ROFL
161 | - I'll use report_git_change xD
162 |
163 | index 9f7a875..c1b6a6a 100644
164 | --- a/whisper/__init__.py
165 | +++ b/whisper/__init__.py
166 | @@ -1,3 +1,4 @@
167 | +# Version 1.0.0
168 | from .audio import load_audio, log_mel_spectrogram, pad_or_trim
169 | from .decoding import DecodingOptions, DecodingResult, decode, detect_language
170 | from .model import Whisper, ModelDimensions
171 |
172 | __-__
173 |
174 | user: Sorry to interupt, we'll work on a new repo.
175 | I worked on the session `gitor` so don't forget to do a pwd before anything else.
176 |
177 | **IMPORTANT**: From now on, you're in DEV mode
178 |
179 | __-__
180 |
181 | assistant:
182 | - I should wait for more instructions xD
183 | - I'll just tell that to user
184 | - Whatever happens next, I must insure the right directory and stuff.
185 | - My first action should probably be `pwd`
186 | - Then I'll check the latest commit. If it's the same as in **Last reported commit**, I won't report.
187 | - LOL xD
188 | - I'm in DEV mode, so Whatever user asks, I'll execute a tool, then after the output of the tool, I'll report on what happened and include "!!NOTOOL"
189 |
--------------------------------------------------------------------------------
/Agentix/agentix/wrappers/agent.py:
--------------------------------------------------------------------------------
1 | from rich import print
2 | import os
3 | import glob
4 |
5 | from .instances_store import InstancesStore
6 | from .middleware import MW, mw
7 | from .tool import Tool, tool
8 | from agentix.entities import Conversation
9 | oprint = print
10 |
11 | _DEBUG = os.getenv('AGENTIX_DEBUG')
12 |
13 |
14 | class Agent(metaclass=InstancesStore):
15 | def __init__(self, name: str, middlewares: str):
16 | """
17 | Initialize a new Agent instance.
18 |
19 | :param name: The name of the Agent.
20 | :param middlewares: A list of middleware instances to be used by the Agent.
21 | :raises Exception: If an Agent with the given name already exists.
22 | """
23 | _DEBUG and print(
24 | f"init Agent [green b]{name}[/] with [blue i]{middlewares}")
25 | # FIXME: Actually implement logs
26 | self.name = name
27 |
28 | # if name in Agent:
29 | # raise Exception(f"Agent with name '{name}' already exists.")
30 |
31 | Agent[name] = self
32 | self._middlewares_str = middlewares
33 |
34 | @property
35 | def base_prompt(self):
36 | grandparent_path = os.getcwd()
37 | pattern = grandparent_path + f"/**/prompts/{self.name}.conv"
38 |
39 | for file_path in glob.glob(pattern, recursive=True):
40 | self._histo_path = file_path.replace(
41 | '/prompts/', '/prompts/.histo/')
42 | histo_dir = os.path.dirname(self._histo_path)
43 | if not os.path.exists(histo_dir):
44 | os.makedirs(histo_dir, exist_ok=True)
45 | return Conversation.from_file(file_path)
46 |
47 | raise FileNotFoundError(f"Did not find {self.name}.conv")
48 |
49 | @property
50 | def histo(self):
51 | try:
52 | return Conversation.from_file(self._histo_path)
53 | except: # FIXME: have typed exception & exception handling, this can hide nasty bugs
54 | return Conversation([])
55 |
56 | def append_histo(self, msg):
57 | try:
58 | last = self.histo[-1]
59 | if last.content == msg.content and last.role == msg.role:
60 | return
61 | except: # FIXME
62 | pass
63 | (self.histo + msg).to_file(self._histo_path)
64 |
65 | def __repr__(self):
66 | return f"Agent({self.name}):\t" + ' -> '.join([f"{mw}" for mw in self._middlewares])
67 |
68 | def __call__(self, *args, **kwargs):
69 | """
70 | """
71 | if self.name == 'Null':
72 | return f"I'm {self.info}. I don't exist yet though, that's a null pattern branch to tell you you should implement it."
73 |
74 | self._middlewares = [MW[name.strip()]
75 | for name in self._middlewares_str.split('|')]
76 | from agentix import Exec
77 | ctx = {
78 | 'exec': Exec.get_instance(),
79 | 'agent': self,
80 | 'args': args,
81 | 'hops': 0,
82 | 'kwargs': kwargs,
83 | "base_discord_message_id": kwargs.get('user_discord_message_id', "-1"),
84 | "discord": {},
85 | } # 'agent': self, 'input': args, 'hops':0}
86 |
87 | conv = args
88 | current_project_message = None
89 | for mware in self._middlewares:
90 | dada1 = getattr(conv, 'discord', 'nope')
91 | if isinstance(conv, Conversation) and not hasattr(conv, 'discord'):
92 | conv.discord = {"base": kwargs.get(
93 | 'user_discord_message_id', '-1')}
94 | dada2 = getattr(conv, 'discord', 'nope')
95 | if type(conv) is tuple:
96 | conv = mware(ctx, *conv)
97 | else:
98 |
99 | dada3 = getattr(conv, 'discord', 'nope')
100 | conv = mware(ctx, conv)
101 | if isinstance(conv, Conversation) and not hasattr(conv, 'discord'):
102 | conv.discord = {"base": kwargs.get(
103 | 'user_discord_message_id', '-1')}
104 | dada4 = getattr(conv, 'discord', 'nope')
105 | dism = kwargs.get("discord_message_id", "-1")
106 | while isinstance(conv, Conversation) and conv.should_infer:
107 |
108 | dada5 = getattr(conv, 'discord', 'nope')
109 | # print(f"🤖[blue u b]{self.name}[/]_____\n{conv[-3:]}")
110 | # FIXME add a method that doesn't set should_infer to True
111 | self.append_histo(conv[-1])
112 | print(f"{conv.llm=}")
113 | tool_to_use = 'llm' if not conv.llm.startswith(
114 | 'o1') else "llm_no_stream"
115 |
116 | if "aider" in conv.llm:
117 | tool_to_use = conv.llm + '_tool'
118 | dada6 = getattr(conv, 'discord', None)
119 | conv = conv.rehop(
120 | Tool[tool_to_use](conv),
121 | 'assistant'
122 | )
123 | if dada6 and not hasattr(conv, 'discord'):
124 | conv.discord = dada6
125 | dada7 = getattr(conv, 'discord', 'nope')
126 |
127 | # TODO: allow conv.rehop(model="gpt-3.5-turbo")
128 | content = conv
129 | if isinstance(conv, Conversation):
130 | content = conv[-1].content
131 | role = conv[-1].role
132 | Tool['HF_send_message'](f"""■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
133 | ### Agent {self.name}: ({role}):
134 |
135 | {content}
136 | """, 'debug')
137 | self.append_histo(conv[-1])
138 | ctx['hops'] += 1
139 | conv.should_infer = False
140 | dada8 = getattr(conv, 'discord', None)
141 | conv = mware(ctx, conv)
142 | if isinstance(conv, Conversation) and dada8 and not hasattr(conv, 'discord'):
143 | conv.discord = dada8
144 | dada9 = getattr(conv, 'discord', 'nope')
145 |
146 | return conv
147 |
148 | def clear_histo(self):
149 | """Clears the history by saving an empty Conversation."""
150 | Conversation([]).to_file(self._histo_path)
151 |
152 | def __repr__(self):
153 | return f"Agent['{self.name}']"
154 |
155 | def setinfo(self, info):
156 | self.info = info
157 |
158 |
159 | Agent('Null', '')
160 |
--------------------------------------------------------------------------------
/cogni/cli.py:
--------------------------------------------------------------------------------
1 | """CLI interface for Cogni using Fire and Rich."""
2 | import os
3 | import sys
4 | import fire
5 | from rich.console import Console
6 | from rich.panel import Panel
7 | from rich.prompt import Confirm, Prompt
8 | from rich.theme import Theme
9 | import subprocess
10 |
11 | console = Console(theme=Theme({
12 | "info": "cyan",
13 | "warning": "yellow",
14 | "error": "red bold"
15 | }))
16 |
17 | if sys.version_info < (3, 8):
18 | console.print("[error]Python 3.8 or higher is required")
19 | sys.exit(1)
20 |
21 |
22 | class CogniCLI:
23 | """Cogni command line interface."""
24 |
25 | def create(self, name: str):
26 | """Create a new Cogni project.
27 |
28 | Args:
29 | name: Name of the project to create
30 | """
31 | console.print(Panel(f"Creating new project: [bold blue]{name}[/]"))
32 | # TODO: Implement project creation
33 |
34 | def init(self):
35 | """Initialize a Cogni project in the current directory."""
36 | console.print(Panel("Initializing Cogni project"))
37 |
38 | # Get project name
39 | project_name = Prompt.ask("Enter project name")
40 | use_boilerplate = Confirm.ask("Create boilerplate ShellAgent?", default=True)
41 | init_git = Confirm.ask("Initialize git repository?", default=True)
42 | add_toolbox = Confirm.ask("Add cogni toolbox (recommended)?", default=True)
43 |
44 | # Create project directory
45 | os.makedirs(project_name, exist_ok=True)
46 |
47 | # Create main.py
48 | with open(f"{project_name}/main.py", "w") as f:
49 | f.write('from cogni import Agent\n\n')
50 | f.write('def main():\n')
51 | f.write(' pass\n\n')
52 | f.write('if __name__ == "__main__":\n')
53 | f.write(' Agent["ShellAgent"]("what is the current dir ?")')
54 |
55 | if use_boilerplate:
56 | # Create agent directory structure
57 | agent_base = f"{project_name}/agents/ShellAgent"
58 | os.makedirs(f"{agent_base}/agents", exist_ok=True)
59 | os.makedirs(f"{agent_base}/tools", exist_ok=True)
60 | os.makedirs(f"{agent_base}/prompts", exist_ok=True)
61 | os.makedirs(f"{agent_base}/middlewares", exist_ok=True)
62 |
63 | # Create agent files
64 | with open(f"{agent_base}/agents/ShellAgent.py", "w") as f:
65 | f.write('from cogni import Agent\n\n')
66 | f.write(
67 | 'Agent("ShellAgent", "prompt_histo|gpt4omini|shellagent_loop")\n')
68 |
69 | with open(f"{agent_base}/middlewares/shellagent_loop.py", "w") as f:
70 | f.write('from cogni import mw, Tool, use_tools, parse_tools\n\n')
71 | f.write('@mw\n')
72 | f.write('@use_tools\n')
73 | f.write('def shellagent_loop(ctx, conv):\n')
74 | f.write(' tool_uses = parse_tools(ctx)\n')
75 |
76 | f.write(' if tool_uses.error:\n')
77 | f.write(
78 | ' raise Exception(f"Tool use failed: {tool_uses.error}")\n')
79 | f.write(
80 | ' # Alternatively:\n')
81 | f.write(
82 | ' # return conv.rehop(f"Hey ShellAgent, tools encountered this error:{tool_uses.error}"):\n')
83 | f.write('\n if tool_uses.output:\n')
84 |
85 | f.write(' return conv.rehop(tool_uses.output[0])\n')
86 |
87 | f.write('\n return conv[-1].content\n')
88 |
89 | with open(f"{agent_base}/prompts/ShellAgent.conv", "w") as f:
90 | f.write('''
91 | system: You are ShellAgent, a shell AGI that performs cli operations.
92 | ## **Thinking**
93 | You'll think step by step using
94 | ## **Using Tool**
95 | To use a tool, simply include the tool use syntax in your output. You'll be prompted back with the output.
96 | /!\ ONLY ONE TOOL use per message
97 | /!\ One command at a time
98 |
99 | ## **Tools**
100 | ls -la
101 |
102 | ## **Reply**
103 |
104 | Use for your final reply
105 |
106 | __-__
107 |
108 | user: Can you tell me how much space I have left on / ?
109 |
110 | __-__
111 |
112 | assistant:
113 | - I should use `df` for that
114 | - I'll use "-h" to have it readable
115 | - I'll execute the command, I'll be prompted back with the result
116 | - At which point I should answer politely and give the information user asked for
117 |
118 |
119 | df -h /
120 |
121 | __-__
122 |
123 | system:## Output from tool `shell`
124 |
125 | ```
126 | Filesystem Size Used Avail Use% Mounted on
127 | /dev/sdc 1007G 586G 371G 62% /
128 | ```
129 |
130 | __-__
131 |
132 | assistant:
133 | - OK, so, `371G` should be the information user wanted
134 | - I have no more command to run
135 | - I'll reply to user :)
136 |
137 |
138 |
139 | The available space on `/` is `371G`.
140 |
141 |
142 | ''')
143 |
144 | with open(f"{agent_base}/tools/shell_tools.py", "w") as f:
145 | f.write('from cogni import tool\n\n')
146 | f.write('@tool\n')
147 | f.write('def echo(text: str) -> str:\n')
148 | f.write(' """Echo the input text"""\n')
149 | f.write(' return text\n')
150 |
151 | if init_git:
152 | subprocess.run(['git', 'init', project_name])
153 | if add_toolbox:
154 | subprocess.run(['git', 'submodule', 'add', 'https://github.com/BrutLogic/CogniToolBox.git', 'toolbox'],
155 | cwd=project_name)
156 | subprocess.run(['git', 'submodule', 'update', '--init', '--recursive'],
157 | cwd=project_name)
158 |
159 | console.print(f"[green]✓[/] Created Cogni project: {project_name}")
160 |
161 | def run(self, agent: str, input: str):
162 | """Run an agent with given input.
163 |
164 | Args:
165 | agent: Name of the agent to run
166 | input: Input text for the agent
167 | """
168 | console.print(Panel(f"Running agent [bold blue]{agent}[/]"))
169 | # TODO: Implement agent running
170 |
171 | def __str__(self):
172 | return """Cogni CLI
173 |
174 | Usage:
175 | cogni create Create a new project
176 | cogni init Initialize project in current directory
177 | cogni run Run an agent"""
178 |
179 |
180 | def main():
181 | """Main entry point for the Cogni CLI."""
182 | try:
183 | if len(sys.argv) == 1:
184 | console.print(CogniCLI())
185 | sys.exit(0)
186 | fire.Fire(CogniCLI)
187 | except Exception as e:
188 | console.print(f"[error]Error: {str(e)}")
189 | sys.exit(1)
190 |
191 |
192 | if __name__ == "__main__":
193 | main()
194 |
--------------------------------------------------------------------------------
/Agentix/agentix/utils/modules_crud.py:
--------------------------------------------------------------------------------
1 | from agentix import func, Func, Tool, Conf, ModuleInfo
2 | from typing import Optional
3 | from pydantic import BaseModel
4 | import os
5 | import yaml
6 | from agentix import func, Func, Tool, Conf, ModuleInfo
7 | from typing import Optional
8 | from pydantic import BaseModel
9 | import os
10 | import yaml
11 | from pathlib import Path
12 |
13 |
14 | def create_widget(module_info: ModuleInfo):
15 | widget_path = Path(module_info.module_path) / "widget"
16 | # Create directory structure
17 | for directory in "src,assets,templates".split(','):
18 | Tool['shell'](f"mkdir -p {widget_path}/{directory}")
19 |
20 | Tool['shell'](f"""
21 | cd {widget_path};
22 | npm init -y && npm pkg set \
23 | name="{module_info.name}" \
24 | version="{module_info.version}" \
25 | description="{module_info.description}" \
26 | main="src/index.js" \
27 | scripts.build="webpack --mode production" \
28 | scripts.start="webpack serve --mode development" \
29 | scripts.dev="webpack serve --mode development" && \
30 | npm install --save-dev webpack webpack-cli webpack-dev-server html-webpack-plugin html-webpack-inline-source-plugin style-loader css-loader
31 | """)
32 |
33 |
34 |
35 | # Create index.js
36 | with open(widget_path / "src/index.js", 'w') as f:
37 | f.write("""// Default widget implementation
38 | console.log('Widget loaded');
39 | """)
40 |
41 | # Create package.json
42 | package_json = {
43 | "name": module_info.name,
44 | "version": "1.0.0",
45 | "description": f"Widget for {module_info.name} module",
46 | "main": "src/index.js",
47 | "scripts": {
48 | "build": "webpack --mode production",
49 | "dev": "webpack --mode development --watch"
50 | },
51 | "author": module_info.author,
52 | "license": "ISC"
53 | }
54 |
55 | with open(widget_path / "package.json", 'w') as f:
56 | yaml.dump(package_json, f)
57 |
58 | # Install webpack
59 | Tool['shell'](
60 | f"cd {widget_path} && npm install --save-dev webpack webpack-cli")
61 |
62 | # Create webpack config
63 | webpack_config = """
64 | const path = require('path');
65 |
66 | module.exports = {
67 | entry: './src/index.js',
68 | output: {
69 | filename: 'bundle.js',
70 | path: path.resolve(__dirname, 'dist'),
71 | },
72 | };
73 | """
74 |
75 | with open(widget_path / "webpack.config.js", 'w') as f:
76 | f.write(webpack_config)
77 |
78 |
79 | def create_endpoints(module_info: ModuleInfo):
80 | endpoints_path = Path(module_info.module_path) / "endpoints"
81 | Tool['shell'](f"mkdir -p {endpoints_path}")
82 |
83 | with open(endpoints_path / f"{module_info.name}.py", 'w') as f:
84 | f.write(f"""from agentix import endpoint
85 |
86 | @endpoint
87 | def {module_info.name}():
88 | return "Not implemented"
89 | """)
90 |
91 |
92 | def create_agent(module_info: ModuleInfo):
93 | agent_path = Path(module_info.module_path) / "agent"
94 | for directory in "agents,middlewares,prompts,tools,tests".split(','):
95 | Tool['shell'](
96 | f"mkdir -p {agent_path}/{directory}")
97 |
98 | with open(agent_path / f"agents/{module_info.name}.py", 'w') as f:
99 | f.write(f"""from agentix import Agent
100 | Agent('{module_info.name}', 'prompt|gpt4|{module_info.name}_loop')
101 | """)
102 |
103 | with open(agent_path / f"middlewares/{module_info.name}_loop.py", 'w') as f:
104 | f.write(f"""from agentix import mw, Tool
105 |
106 | @mw
107 | def {module_info.name}_loop(ctx, conv):
108 | return conv
109 | """)
110 |
111 | with open(agent_path / f"prompts/{module_info.name}.conv", 'w') as f:
112 | f.write(f"""system: You're {module_info.name}.
113 | You're not yet implemented. The only reply you can give for now is:
114 | "Hey there :). My prompt is the default one created by the wizzard. Change it if you want me to do stuff"
115 | """)
116 |
117 |
118 | def create_manifest(module_info: ModuleInfo):
119 | manifest_path = Path(module_info.module_path) / "manifest.yml"
120 |
121 | # Determine components
122 | components = []
123 | if module_info.agent:
124 | components.append("agent")
125 | if module_info.widget:
126 | components.append("widget")
127 | if module_info.endpoints:
128 | components.append("endpoints")
129 |
130 | manifest_content = {
131 | "name": module_info.name,
132 | "version": module_info.version,
133 | # You might want to add this to ModuleInfo
134 | "description": module_info.description,
135 | "author": module_info.author,
136 | "license": "Internal",
137 |
138 | "components": components,
139 |
140 | "dependencies": [], # You might want to add this to ModuleInfo
141 |
142 | "documentation": {
143 | "usage": "This module was created using Agentix wizard. Please update this documentation."
144 | },
145 |
146 | "logo": "assets/logo.png" # Default value
147 | }
148 |
149 | with open(manifest_path, 'w') as f:
150 | yaml.dump(manifest_content, f, sort_keys=False,
151 | default_flow_style=False)
152 |
153 |
154 |
155 | @func
156 | def init_module(module_info: ModuleInfo):
157 | module_info.module_path = str(Path(Conf.GA_modules_dir) / module_info.name)
158 | Tool['shell'](f"mkdir -p {module_info.module_path}")
159 |
160 | create_manifest(module_info)
161 |
162 | if module_info.agent:
163 | create_agent(module_info)
164 |
165 | if module_info.widget:
166 | create_widget(module_info)
167 |
168 | if module_info.endpoints:
169 | create_endpoints(module_info)
170 |
171 |
172 |
173 |
174 | @func
175 | def get_all_GA_modules(root_path):
176 | result = {}
177 |
178 | # Convert to absolute path if not already
179 |
180 | # Walk through all directories
181 | for dirpath, dirnames, filenames in os.walk(root_path):
182 | # Check if manifest.yml exists in current directory
183 | manifest_path = os.path.join(dirpath, 'manifest.yml')
184 | if 'manifest.yml' in filenames:
185 | try:
186 | # Read and parse the manifest file
187 | with open(manifest_path, 'r') as f:
188 | manifest_content = yaml.safe_load(f)
189 |
190 | # Get directory name
191 | dir_name = os.path.basename(dirpath)
192 |
193 | # Add to result dictionary
194 | result[dir_name] = {
195 | "path": '/'.join(dirpath.split('/')[-3:]),
196 | "manifest": manifest_content
197 | }
198 | except Exception as e:
199 | raise
200 | print(f"Error processing {manifest_path}: {str(e)}")
201 | continue
202 |
203 | return result
204 |
205 |
206 |
207 | @func
208 | def list_available_agents():
209 | ...
210 |
211 |
212 | @func
213 | def create_new_agent():
214 | ...
215 |
216 |
217 | @func
218 | def list_available_modules():
219 | ...
220 |
221 |
--------------------------------------------------------------------------------
/README2.md:
--------------------------------------------------------------------------------
1 | # Cogni
2 |
3 | # UPDATE
4 |
5 | I'm still feeding the repo
6 |
7 | # IMPORTANT NOTE 1
8 | If your reading this, I'm currently refactoring my code and feeding the repo.
9 |
10 | It's not usable yet, and I'm actively working on the doc (and this README.md).
11 |
12 | You can still read it though, it will give you an idea of what it's all about. Just be aware that it will take a few days (probably a week) to be a usable/well documented project.
13 |
14 | If you want to chat, come to my [Discord](https://discord.gg/eXtysN5HAH) :) !
15 |
16 | I'm super glad for your interest in my stuff.
17 |
18 | # IMPORTANT NOTE 2
19 |
20 | My initial plan was to feed this README as I feed the repo, but I'm not sure yet how to structure it properly.
21 |
22 | For that reason I'll leave it as it is and work on:
23 |
24 | ## Quick Start
25 |
26 | [How Cogni works (If you're in a hurry)](doc/quickstart/how-cogni-works.md)
27 |
28 | [Installation](doc/quickstart/install.md)
29 |
30 | [Prompting primitives](doc/quickstart/prompting.md)
31 |
32 | [Your first Cogni project](doc/quickstart/project_init.md)
33 |
34 | [Your first tool](doc/quickstart/first_tool.md)
35 |
36 | [Your first agent: ShellAgent](doc/quickstart/first_agent.md)
37 |
38 | [Your first swarm: DiscordAgent](doc/quickstart/first_swarm.md)
39 |
40 | [Handling states](doc/quickstart/states.md)
41 |
42 | [Handling Endpoint](doc/quickstart/states.md)
43 |
44 |
45 |
46 | ## What is Cogni?
47 |
48 | Cogni is a framework focusing on low code/low boilerplate implementation of LLM agents.
49 |
50 | ### Yeah but, why not LangChain though?
51 |
52 | I wouldn't go that far as saying `LangChain==Cancer`.
53 |
54 | LangChain/LangGraph allow for creating agents and orchestrating flow and communication.
55 | One key assumption of Cogni is that agentic execution flow has the same requirements and complexity as code; and therefore Agents should be created, managed, and orchestrated as code and by code.
56 |
57 | Which, as a side effect, allows for borrowing ideas from domains like web dev, where best practices are mature.
58 |
59 | ## How it works
60 |
61 | Cogni is built on the principle of "Agents as Functions," allowing for a modular and composable approach to AI agent development.
62 |
63 | ### Hide complexity
64 |
65 | Do Large Language Models have any amount of complexity?
66 |
67 | If your answer includes "Matrix multiplication" or "Attention map caching", I would have a simple objection: **I don't care**
68 |
69 | When you implement agents, for all you care, LLMs are magic black boxes that take text as input and return text.
70 |
71 | For all we care, from our coder point of view, LLMs are as simple as:
72 |
73 | ```python
74 | def chat_completion(prompt: str, model: str = "gpt-3.5-turbo") -> str:
75 | response = openai.ChatCompletion.create(
76 | model=model,
77 | messages=[
78 | {"role": "system", "content": "You are a helpful assistant."},
79 | {"role": "user", "content": prompt}
80 | ]
81 | )
82 | return response.choices[0].message['content'].strip()
83 | ```
84 |
85 | ### Going further
86 |
87 | Our goal as coders should be that, from any given scope, all dependencies are magic black boxes with clear specifications.
88 |
89 | ### Everything is a function
90 |
91 | We'll define *Agent* as a black box that takes any number of inputs of any type, and returns either nothing, or anything of any type, and potentially has *side effects*.
92 |
93 | In other terms, agents can be thought as **functions**. This allows for powerful control flows:
94 |
95 | (\*Note: `function` as in "a Python function"; as opposed to more rigorous definition, like in functional programming)
96 |
97 | ```python
98 | from cogni import Agent
99 |
100 | # Process tasks in a loop
101 | def do_complex_stuff(task_description:str):
102 | for sub_task in Agent['task_splitter'](task_description):
103 | Agent['worker'](sub_task)
104 |
105 | ```
106 |
107 | ### Magic Imports
108 |
109 |
110 | Cogni uses automatic discovery to make components available globally:
111 |
112 | **With Cogni, we're free from the burden of imports**
113 |
114 | Anywhere in your project:
115 |
116 | ```python
117 | # project/it/can/be/any/path/fliddleblooksh.py
118 | from cogni import tool
119 |
120 | @tool
121 | def add_two_ints(a:int, b:int)->int:
122 | return int(a) + int(b)
123 | ```
124 |
125 | Anywhere else:
126 |
127 | ```python
128 | # project/some/other/file/anywhere.py
129 | from cogni import Tool
130 |
131 | print(
132 | Tool['add_two_ints'](2, 3)
133 | )#> 5
134 | ```
135 |
136 | ### Tools System
137 |
138 | Tools are standalone functions that can be used by coder and agents:
139 |
140 | ```python
141 | # agents/SomeAgents/tools/someagent_tools.py
142 | from cogni import tool
143 |
144 | @tool
145 | def fetch_weather(city: str) -> dict:
146 | """Get weather data for a city"""
147 | return weather_api.get(city)
148 |
149 | ```
150 |
151 | ```python
152 | # somefile.py
153 | from cogni import Tool
154 |
155 | print(Tool['fetch_weather']('Paris'))
156 | ```
157 |
158 |
159 |
160 | ### Install
161 |
162 | ````bash
163 | git clone https://github.com/BrutLogic/cogni.git&&cd cogni
164 | python3 -m pip install
165 | ### Creating an Agent
166 | Agents are created by combining a prompt template (.conv file) with middleware:
167 |
168 | ```bash
169 | cd myproject
170 | cogni create_agent
171 | ````
172 |
173 | ### Middleware Flow
174 |
175 | Middlewares form a processing chain, each receiving and returning a conversation:
176 |
177 | ```python
178 | @mw
179 | def myagent_loop(ctx, conv):
180 | # Access conversation history
181 | last_msg = conv[-1].content
182 |
183 | # Use tools
184 | result = Tool['some_tool'](user_msg)
185 |
186 | # Continue conversation with rehop
187 | return conv.rehop(
188 | f"I found this: {result}",
189 | )
190 | ```
191 |
192 | ### Creating a Complete Agent
193 |
194 | 1. Project Structure:
195 |
196 | ```
197 | agents/
198 | my_agent/
199 | prompts/
200 | my_agent.conv # Prompt template
201 | middlewares/
202 | process.py # Custom middleware
203 | tools/
204 | helpers.py # Agent-specific tools
205 | ```
206 |
207 | 2. Prompt Template (my\_agent.conv):
208 |
209 | ```
210 | system: You are MyAgent, specialized in {domain}.
211 | Your capabilities include: {capabilities}
212 |
213 | user: {user_input}
214 | ```
215 |
216 | 3. Middleware (process.py):
217 |
218 | ```python
219 | from cogni import mw, Tool
220 |
221 | @mw
222 | def process(ctx, conv):
223 | # Process user input
224 | data = Tool['helper'](conv[-1].content)
225 |
226 | # Continue conversation
227 | return conv.rehop(f"Processed: {data}")
228 | ```
229 |
230 | 4. Tools (helpers.py):
231 |
232 | ```python
233 | from cogni import tool
234 |
235 | @tool
236 | def helper(input: str) -> str:
237 | return f"Processed {input}"
238 | ```
239 |
240 | 5. Agent Registration:
241 |
242 | ```python
243 | from cogni import Agent
244 |
245 | Agent('my_agent', 'prompt|gpt4|process')
246 | ```
247 |
248 | 6. Usage:
249 |
250 | ```python
251 | from cogni import Agent
252 |
253 | response = Agent['my_agent']("Hello!")
254 | ```
255 |
256 | ### Testing
257 |
258 | Run the test suite:
259 |
260 | ```bash
261 | pytest tests/
262 | ```
263 |
264 | ### License
265 |
266 | MIT License
267 |
268 |
--------------------------------------------------------------------------------
/Agentix/bricks/tools/ProjectRepr.py:
--------------------------------------------------------------------------------
1 | from agentix import tool, Tool, State
2 |
3 |
4 | @tool
5 | def current_project():
6 | """Get the current project data."""
7 | current_project_name = State['projects']['current']
8 | return State['projects']['projects'][current_project_name]
9 |
10 |
11 | @tool
12 | def current_task():
13 | """Get the current task data."""
14 | project = current_project()
15 | current_task_name = project['current_task']
16 | if current_task_name and project['tasks']:
17 | return project['tasks'][current_task_name]
18 | return None
19 |
20 |
21 | def all_projects_as_md():
22 | """Generate markdown for all projects list."""
23 | all_proj = State['projects']['projects'].to_dict()
24 | md = ["# All Projects\n"]
25 | for pname in all_proj.keys():
26 | md.append(f"- `{pname}`")
27 | return '\n'.join(md)
28 |
29 |
30 | def project_files_as_md(project):
31 | """Generate markdown for project files sections."""
32 | md = []
33 |
34 | # Files to add
35 | md.append("\n## Files to Add")
36 | if project['files_to_add']:
37 | for file in project['files_to_add']:
38 | md.append(f"- `{file}`")
39 | else:
40 | md.append("- *No files to add*")
41 |
42 | # Files to drop
43 | md.append("\n## Files to Drop")
44 | if project['files_to_drop']:
45 | for file in project['files_to_drop']:
46 | md.append(f"- `{file}`")
47 | else:
48 | md.append("- *No files to drop*")
49 |
50 | return md
51 |
52 |
53 | @tool
54 | def task_as_md():
55 | """Generate markdown for a single task."""
56 | task_data = Tool['current_task']()
57 | if not task_data:
58 | return "\n**No current task yet**\n"
59 | md = [
60 | f'''## Current Task (`{task_data['name']}`) **{task_data['title']}**'''
61 | ]
62 | md.append('### Description')
63 | md.append(task_data['description'])
64 |
65 | md.append('\n### Specs')
66 |
67 | if not 'specs' in task_data:
68 | md.append('*No specs yet*')
69 | else:
70 | md.append(task_data['specs'])
71 |
72 | md.append("\n### Checklist:")
73 |
74 | if not 'checklist' in task_data:
75 | md.append('*No checklist items yet*')
76 |
77 | for item_id in task_data.get('checklist_order', []):
78 | md.append('\n')
79 | item = task_data['checklist'][item_id]
80 | checkbox = "[x]" if item['checked'] else "[ ]"
81 | md.append(f"- {checkbox} (`{item_id}`) {item['name']} ")
82 | if item['status']:
83 | md.append(f" - Status: {item['status']}")
84 |
85 | return "\n".join(md)
86 |
87 |
88 | def aider_sessions_as_md(project, current_task_data):
89 | """Generate markdown for aider sessions."""
90 | aider_sessions = {
91 | 'omni': project['aider_session']['omni']
92 | }
93 | task = current_task()
94 | if task:
95 | tname = task.name
96 | if f"{tname}_ask" in State['Aiders']:
97 | aider_sessions['ask'] = f"{tname}_ask"
98 |
99 | if f"{tname}_code" in State['Aiders']:
100 | aider_sessions['code'] = f"{tname}_code"
101 |
102 | md = ['\n#### aider sessions']
103 | for name, sess_name in aider_sessions.items():
104 | md.append(f"- **{name}**: {sess_name}")
105 | return "\n".join(md)
106 |
107 |
108 | @tool
109 | def cwd():
110 | return Tool['current_project']()['base_dir']
111 |
112 |
113 | @tool
114 | def project_as_md():
115 | """Convert the current project state to a markdown formatted string."""
116 | project = current_project()
117 | current_task_data = current_task()
118 |
119 | md = []
120 |
121 | # Add all projects section
122 | md.append(all_projects_as_md())
123 |
124 | # Project header and base directory
125 | md.extend([
126 | f"\n# Current Project: {project['name']}\n",
127 | f"**Base Directory:** `{project['base_dir']}`\n",
128 | ])
129 |
130 | # Add files sections
131 | # md.extend(project_files_as_md(project))
132 |
133 | # Add tasks section
134 | md.append("\n## Tasks")
135 |
136 | if project['tasks']:
137 | for task_name, task_data in project['tasks'].items():
138 | is_current = task_name == project['current_task']
139 | if not is_current:
140 | md.append(f"### (`{task_name}`) {task_data['title']}")
141 |
142 | md.append(task_as_md())
143 | else:
144 | md.append("\n*No tasks defined*")
145 |
146 | # Add aider sessions
147 | md.append(aider_sessions_as_md(project, current_task_data))
148 |
149 | # Add checklist with devlogs
150 |
151 | md.append("\n### Checklist")
152 |
153 | md.append(Tool['current_checklist_as_md']())
154 | md.append('\n\n# Next step (the action to perform)')
155 | if 'next_step' in project:
156 |
157 | md.append(project.next_step)
158 | else:
159 | md.append('**No next step yet**')
160 |
161 | return "\n".join(md)
162 |
163 |
164 | @tool
165 | def set_next_step(caption: str):
166 | Tool['current_project']()['next_step'] = caption
167 |
168 | return f"Next step set to `{caption}`"
169 |
170 |
171 | @tool
172 | def current_checklist():
173 | return Tool['current_task']().checklist
174 |
175 |
176 | @tool
177 | def current_checklist_as_md():
178 | md = ["#### current checklist item"]
179 | current_task = Tool['current_task']()
180 |
181 | if not 'checklist' in current_task:
182 | md.append('no checklist yet')
183 | return '\n'.join(md)
184 |
185 | # del current_task['checklist']
186 | # del current_task['checklist_order']
187 | # del current_task['current_cl_item']
188 |
189 | cl = current_task.checklist
190 |
191 | for task_id, task in cl.to_dict().items():
192 | if not task['checked']:
193 | current_task['current_cl_item'] = task_id
194 | break
195 |
196 | # cur_cl_id = 'oui'
197 | if not 'current_cl_item' in current_task \
198 | or not "checklist" in current_task \
199 | or not len(current_task['checklist']) \
200 | or not current_task['current_cl_item']\
201 | or not isinstance(current_task.current_cl_item, str):
202 | md.append('**No checklistitem selected**')
203 | return '\n'.join(md)
204 | else:
205 | cur_cl_id = current_task.current_cl_item
206 | print(cur_cl_id)
207 | md.append(current_task.current_cl_item +
208 | ' \t' +
209 | cl[cur_cl_id].name)
210 |
211 | md.append('\n#### All Checklist Items')
212 | for item_id in current_task.get('checklist_order', []):
213 | item = current_task['checklist'][item_id]
214 | if not len(item['name'].strip()):
215 | continue
216 |
217 | check_mark = "x" if item.get('checked', False) else " "
218 | status = f" ({item['status']})" if item.get('status') else ""
219 | md.append(
220 | f"- 【{check_mark}】(id=`{item_id}`) {item['name']}{status}. The id is `{item_id}`")
221 |
222 | # Add devlog entries if they exist
223 | if 'devlog' in item and item['devlog']:
224 | for log_entry in item['devlog']:
225 | md.append(f" > {log_entry}")
226 | return '\n'.join(md)
227 |
228 |
229 | @tool
230 | def current_checklist():
231 | return Tool['current_task']().checklist
232 |
233 | return str(Tool['current_task']().checklist)
234 |
235 |
236 | @tool
237 | def set_current_checklist_item(cl_item_id):
238 | Tool['current_task']().current_cl_item = cl_item_id
239 | return f"{cl_item_id} set as current"
240 |
--------------------------------------------------------------------------------