├── .gitbook.yaml
├── .gitignore
├── .vscode
└── settings.json
├── LICENSE.txt
├── README.md
├── agents
├── README.md
├── demodata
│ ├── frontend_data
│ │ ├── companies.html
│ │ └── frontend_data_queries.py
│ └── sync.toml
├── jetpack
│ ├── chat
│ │ ├── artifacts.html
│ │ ├── chat_completions.py
│ │ ├── chat_messages.html
│ │ ├── chat_queries.py
│ │ └── chat_wit.py
│ ├── coder
│ │ ├── coder_completions.py
│ │ ├── coder_wit.py
│ │ ├── retriever_completions.py
│ │ └── retriever_wit.py
│ ├── design
│ │ ├── design.css
│ │ └── design.html
│ ├── frontend
│ │ ├── frontend_queries.py
│ │ ├── frontend_wit.py
│ │ └── index.html
│ ├── messages
│ │ ├── __init__.py
│ │ ├── messages_chat.py
│ │ └── messages_coder.py
│ ├── sync.toml
│ └── tailwind.config.js
├── lib
│ ├── completions
│ │ ├── __init__.py
│ │ ├── completion_helpers.py
│ │ ├── function_builder.py
│ │ └── prompt_builder.py
│ ├── envs
│ │ ├── env_api.py
│ │ └── env_wit.py
│ └── tools
│ │ ├── __init__.py
│ │ ├── data_parser.py
│ │ └── store_wrapper.py
└── tests
│ ├── jetpack
│ ├── __init__.py
│ ├── helpers_runtime.py
│ ├── test_coder.py
│ ├── test_coder_job.py
│ ├── test_retriever.py
│ └── test_retriever_completions.py
│ └── lib
│ └── tools
│ └── test_data_parser.py
├── aos
├── cli
│ ├── README.md
│ ├── __init__.py
│ ├── actor_push.py
│ ├── agents_file.py
│ ├── cli.py
│ ├── cli_agent.py
│ ├── cli_local.py
│ ├── cli_start.py
│ ├── cli_store.py
│ ├── root_actor_offline.py
│ ├── sync_file.py
│ └── sync_item.py
├── grit
│ ├── README.md
│ ├── __init__.py
│ ├── object_model.py
│ ├── object_model_v2.py
│ ├── object_serialization.py
│ ├── object_store.py
│ ├── references.py
│ ├── stores
│ │ ├── __init__.py
│ │ ├── file
│ │ │ ├── __init__.py
│ │ │ ├── file_object_store.py
│ │ │ └── file_references.py
│ │ ├── lmdb
│ │ │ ├── __init__.py
│ │ │ ├── lmdb_object_store.py
│ │ │ ├── lmdb_references.py
│ │ │ └── shared_env.py
│ │ └── memory
│ │ │ ├── __init__.py
│ │ │ ├── memory_object_store.py
│ │ │ └── memory_references.py
│ └── tree_helpers.py
├── runtime
│ ├── __init__py
│ ├── apex
│ │ ├── README.md
│ │ ├── apex_api_pb2.py
│ │ ├── apex_api_pb2.pyi
│ │ ├── apex_api_pb2_grpc.py
│ │ ├── apex_client.py
│ │ ├── apex_core_loop.py
│ │ ├── apex_server.py
│ │ ├── apex_workers_pb2.py
│ │ ├── apex_workers_pb2.pyi
│ │ ├── apex_workers_pb2_grpc.py
│ │ └── test.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── actor_executor.py
│ │ ├── core_loader.py
│ │ ├── discovery_executor.py
│ │ ├── external_storage_executor.py
│ │ ├── ipc.py
│ │ ├── presence_executor.py
│ │ ├── query_executor.py
│ │ ├── request_response_executor.py
│ │ ├── resolvers.py
│ │ ├── root_executor.py
│ │ └── runtime.py
│ ├── crypto
│ │ └── did_key.py
│ ├── store
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agent_object_store.py
│ │ ├── agent_references.py
│ │ ├── agent_store_pb2.py
│ │ ├── agent_store_pb2.pyi
│ │ ├── agent_store_pb2_grpc.py
│ │ ├── base_client.py
│ │ ├── grit_store_pb2.py
│ │ ├── grit_store_pb2.pyi
│ │ ├── grit_store_pb2_grpc.py
│ │ ├── lmdb_backend.py
│ │ ├── main.py
│ │ ├── main_refs.py
│ │ ├── main_two.py
│ │ ├── store_client.py
│ │ └── store_server.py
│ ├── web
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agents_client.py
│ │ └── web_server.py
│ └── worker
│ │ ├── README.md
│ │ ├── worker_api_pb2.py
│ │ ├── worker_api_pb2.pyi
│ │ ├── worker_api_pb2_grpc.py
│ │ ├── worker_client.py
│ │ ├── worker_core_loop.py
│ │ └── worker_server.py
└── wit
│ ├── __init__.py
│ ├── data_model.py
│ ├── data_model_utils.py
│ ├── default_wits.py
│ ├── discovery.py
│ ├── errors.py
│ ├── external_storage.py
│ ├── presence.py
│ ├── prototype.py
│ ├── query.py
│ ├── request_response.py
│ ├── wit_api.py
│ ├── wit_routers.py
│ ├── wit_state.py
│ └── wut.py
├── conftest.py
├── docs
├── README.md
├── SUMMARY.md
├── articles
│ └── manifesto.md
├── design
│ ├── design.md
│ └── wit-resolution.md
├── images
│ ├── agents-fig-1-jetpack-demo.png
│ ├── agents-fig-2-jetpack-demo.png
│ ├── design-fig-1-runtime-grit.png
│ ├── design-fig-2-actors.png
│ ├── design-fig-3-grit-model.png
│ ├── design-fig-4-wit-function.png
│ ├── design-fig-5-wit-function-genesis.png
│ ├── design.excalidraw
│ └── screenshot.png
└── thinking
│ ├── DID.md
│ ├── README.md
│ ├── bootstrapping.md
│ ├── coder_actor.md
│ ├── decorators_design.md
│ ├── distributed.md
│ ├── distributed_runtime.md
│ ├── error_handling.md
│ ├── gRPC.md
│ ├── indexing.md
│ ├── inspiration.md
│ ├── manifests.md
│ ├── object_problem.md
│ ├── presence.md
│ ├── pruning.md
│ ├── queries.md
│ ├── rails.md
│ ├── scaling_actors.md
│ ├── search_actor.md
│ ├── sync_vs_async.md
│ ├── todo.md
│ ├── update.md
│ ├── webserver.md
│ ├── wit_loading.md
│ └── workers.md
├── poetry.lock
├── protogen.sh
├── protos
├── README.md
└── aos
│ └── runtime
│ ├── apex
│ ├── apex_api.proto
│ └── apex_workers.proto
│ ├── store
│ ├── agent_store.proto
│ └── grit_store.proto
│ └── worker
│ └── worker_api.proto
├── pyproject.toml
└── tests
├── cli
├── helpers_sync.py
├── test_actor_push.py
├── test_sync_file_push.py
├── test_sync_item_push_paths.py
└── test_sync_item_push_values.py
├── grit
├── stores
│ ├── test_file_object_store.py
│ ├── test_file_references.py
│ ├── test_lmdb_object_store.py
│ └── test_lmdb_references.py
└── test_object_serialization.py
├── perf
├── __init__.py
├── perf.py
└── perf_grid.py
├── runtime
├── core
│ ├── helpers_runtime.py
│ ├── test_actor_executor.py
│ ├── test_core_loader.py
│ ├── test_resolvers_core.py
│ ├── test_runtime.py
│ ├── test_runtime_msgs_fanout.py
│ ├── test_runtime_msgs_pending.py
│ ├── test_runtime_msgs_request_response.py
│ ├── test_runtime_msgs_single.py
│ ├── test_runtime_wit_in_core.py
│ ├── test_runtime_wit_prototype.py
│ └── test_runtime_wit_sync.py
├── store
│ └── test_grit_store.py
└── worker
│ └── test_worker.py
├── web
├── helpers_web.py
├── test_web_server.py
├── test_web_server_grit.py
├── test_web_server_sse.py
├── test_web_server_wit.py
└── test_web_server_wit_query.py
└── wit
├── helpers_wit.py
├── test_data_model_blob.py
├── test_data_model_inbox.py
├── test_data_model_outbox.py
├── test_data_model_tree.py
├── test_router_message.py
├── test_router_query.py
├── test_wit_state.py
└── test_wit_step.py
/.gitbook.yaml:
--------------------------------------------------------------------------------
1 | root: ./docs/
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | *.grit/
6 | # C extensions
7 | *.so
8 |
9 | # Agent OS
10 | .aos/
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | pip-wheel-metadata/
26 | share/python-wheels/
27 | *.egg-info/
28 | .installed.cfg
29 | *.egg
30 | MANIFEST
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .nox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | *.py,cover
53 | .hypothesis/
54 | .pytest_cache/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | db.sqlite3-journal
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # pyenv
87 | .python-version
88 |
89 | # pipenv
90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
93 | # install all needed dependencies.
94 | #Pipfile.lock
95 |
96 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
97 | __pypackages__/
98 |
99 | # Celery stuff
100 | celerybeat-schedule
101 | celerybeat.pid
102 |
103 | # SageMath parsed files
104 | *.sage.py
105 |
106 | # Environments
107 | .env
108 | .venv
109 | env/
110 | venv/
111 | ENV/
112 | env.bak/
113 | venv.bak/
114 |
115 | # Spyder project settings
116 | .spyderproject
117 | .spyproject
118 |
119 | # Rope project settings
120 | .ropeproject
121 |
122 | # mkdocs documentation
123 | /site
124 |
125 | # mypy
126 | .mypy_cache/
127 | .dmypy.json
128 | dmypy.json
129 |
130 | # Pyre type checker
131 | .pyre/
132 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "python.analysis.extraPaths": [
3 | ],
4 | "python.testing.pytestArgs": [
5 | "tests"
6 | ],
7 | "python.testing.unittestEnabled": false,
8 | "python.testing.pytestEnabled": true
9 | }
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | The MIT License
2 |
3 | Copyright (c) 2023 Lukas Buehler
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/agents/demodata/frontend_data/companies.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
21 |
22 |
23 |
24 | Companies and Revenue
25 |
26 |
27 |
28 | Company |
29 | Contact |
30 | Country |
31 | Revenue |
32 |
33 |
34 | QuantumTech Solutions |
35 | Alice Hamilton |
36 | USA |
37 | $5,000,000 |
38 |
39 |
40 | Nova Industries |
41 | Jordan Smith |
42 | UK |
43 | $4,500,000 |
44 |
45 |
46 | Orion Enterprises |
47 | Carlos Diaz |
48 | Spain |
49 | $6,200,000 |
50 |
51 |
52 | Pixel Dynamics |
53 | Chloe Lee |
54 | South Korea |
55 | $3,800,000 |
56 |
57 |
58 | Delta Innovations |
59 | Liam O'Conner |
60 | Ireland |
61 | $4,100,000 |
62 |
63 |
64 | GlobeSoft Tech |
65 | Mohammed Rahman |
66 | India |
67 | $5,300,000 |
68 |
69 |
70 | Azure Dynamics |
71 | Emily Stone |
72 | Australia |
73 | $4,700,000 |
74 |
75 |
76 | SolarWave Systems |
77 | Andre Duval |
78 | France |
79 | $5,500,000 |
80 |
81 |
82 | Polaris Solutions |
83 | Beatrice Schmidt |
84 | Germany |
85 | $4,600,000 |
86 |
87 |
88 | Atlas Tech |
89 | Rafael Ortiz |
90 | Argentina |
91 | $3,900,000 |
92 |
93 |
94 | Neptune Networking |
95 | Sophia Ivanova |
96 | Russia |
97 | $5,200,000 |
98 |
99 |
100 | Zenith Dynamics |
101 | Lucas Tan |
102 | Singapore |
103 | $4,400,000 |
104 |
105 |
106 | Helios Tech |
107 | Grace Wang |
108 | China |
109 | $6,000,000 |
110 |
111 |
112 | Nimbus Networks |
113 | Mason Abdi |
114 | Kenya |
115 | $3,600,000 |
116 |
117 |
118 | Stratos Systems |
119 | Ella Silva |
120 | Brazil |
121 | $5,100,000 |
122 |
123 |
124 | Horizon Innovations |
125 | Noah Jensen |
126 | Denmark |
127 | $4,300,000 |
128 |
129 |
130 | Galaxy Gateways |
131 | Amelia Suzuki |
132 | Japan |
133 | $4,800,000 |
134 |
135 |
136 | MeteorTech |
137 | Luke Njoroge |
138 | South Africa |
139 | $4,200,000 |
140 |
141 |
142 | Cosmic Code |
143 | Isabella Rossi |
144 | Italy |
145 | $5,400,000 |
146 |
147 |
148 | LunarSoft |
149 | Benjamin Vargas |
150 | Mexico |
151 | $4,900,000 |
152 |
153 |
154 |
155 |
156 |
157 |
--------------------------------------------------------------------------------
/agents/demodata/frontend_data/frontend_data_queries.py:
--------------------------------------------------------------------------------
1 |
2 | from jinja2 import Environment, TemplateNotFound, select_autoescape
3 | from aos.grit import *
4 | from aos.wit import *
5 |
6 | app = Wit()
7 |
8 | @app.query("companies")
9 | async def on_query_companies(core:Core, actor_id:ActorId):
10 | return await render_template(core, "/code/companies.html")
11 |
12 | env = Environment(autoescape=select_autoescape())
13 | async def render_template(core:Core, template_path, **kwargs) -> BlobObject:
14 | template_blob = await core.get_path(template_path)
15 | if(template_blob is None):
16 | raise TemplateNotFound(f"Template not found: {template_path}")
17 | template_str = template_blob.get_as_str()
18 | template = env.from_string(template_str)
19 | rendered = template.render(**kwargs)
20 | rendered_blob = BlobObject.from_str(rendered)
21 | rendered_blob.set_headers_empty()
22 | rendered_blob.set_header('Content-Type', 'text/html')
23 | return rendered_blob
--------------------------------------------------------------------------------
/agents/demodata/sync.toml:
--------------------------------------------------------------------------------
1 | [agent]
2 | name = "demodata"
3 |
4 | [all]
5 | external_paths=["./agents/demodata/frontend_data"]
6 |
7 | [[actors]]
8 | name="frontend_data"
9 | push="./agents/demodata/frontend_data:/code"
10 | wit="external:agents.demodata.frontend_data.frontend_data_queries:app"
11 | wit_query="external:agents.demodata.frontend_data.frontend_data_queries:app"
--------------------------------------------------------------------------------
/agents/jetpack/chat/artifacts.html:
--------------------------------------------------------------------------------
1 | {% if artifacts|length == 0 %}
2 | No artifacts yet
3 | {% else %}
4 |
9 | {% endif %}
--------------------------------------------------------------------------------
/agents/jetpack/chat/chat_messages.html:
--------------------------------------------------------------------------------
1 | {% for message in messages %}
2 |
3 |
4 | {% if message.from_name == 'user' %}
5 | You
6 | {% else %}
7 | Agent
8 | {% endif %}
9 |
10 |
11 | {{message.html|safe}}
12 |
13 |
14 | {% endfor %}
15 |
16 |
--------------------------------------------------------------------------------
/agents/jetpack/chat/chat_queries.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from jinja2 import Environment, TemplateNotFound, select_autoescape
3 | from aos.grit import *
4 | from aos.wit import *
5 | from agents.jetpack.messages import ChatMessage
6 | from agents.jetpack.chat.chat_wit import ChatState
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 | app = Wit()
11 |
12 | @app.query("messages")
13 | async def on_query_messages(core:Core, messagekey:str=None):
14 | message_filter = messagekey
15 | messages = await ChatMessage.load_from_tree(await core.gett("messages"), message_filter)
16 | logger.info(f"messages: {len(messages)}, filter: {message_filter}")
17 | return await render_template(core, "/templates/chat_messages.html", messages=messages)
18 |
19 | @app.query("artifacts")
20 | async def on_query_artifacts(core:Core, state:ChatState, ctx:QueryContext):
21 | url_path = f"../../{ctx.actor_id.hex()}/query"
22 | artifacts = []
23 | if state.code_spec is not None:
24 | artifacts.append({"title": "Specification", "url": f"{url_path}/artifact-spec", "emoji": "📋"})
25 | if state.code_plan is not None and state.code_plan.plan is not None:
26 | artifacts.append({"title": "Plan", "url": f"{url_path}/artifact-plan", "emoji": "📃"})
27 | if state.code_deploy is not None and state.code_deploy.code is not None:
28 | artifacts.append({"title": "Code", "url": f"{url_path}/artifact-code", "emoji": "▶️"})
29 | return await render_template(core, "/templates/artifacts.html", artifacts=artifacts)
30 |
31 | @app.query("artifact-spec")
32 | async def on_query_artifacts_spec(core:Core, state:ChatState):
33 | logger.info("on_query_artifacts_spec")
34 | return state.code_spec
35 |
36 | @app.query("artifact-plan")
37 | async def on_query_artifacts_plan(core:Core, state:ChatState):
38 | logger.info("on_query_artifacts_plan")
39 | return state.code_plan.plan
40 |
41 | @app.query("artifact-code")
42 | async def on_query_artifacts_code(core:Core, state:ChatState):
43 | logger.info("on_query_artifacts_code")
44 | return state.code_deploy.code
45 |
46 | env = Environment(autoescape=select_autoescape())
47 | async def render_template(core:Core, template_path, **kwargs) -> BlobObject:
48 | template_blob = await core.get_path(template_path)
49 | if(template_blob is None):
50 | raise TemplateNotFound(f"Template not found: {template_path}")
51 | template_str = template_blob.get_as_str()
52 | template = env.from_string(template_str)
53 | rendered = template.render(**kwargs)
54 | rendered_blob = BlobObject.from_str(rendered)
55 | rendered_blob.set_headers_empty()
56 | rendered_blob.set_header('Content-Type', 'text/html')
57 | return rendered_blob
--------------------------------------------------------------------------------
/agents/jetpack/frontend/frontend_queries.py:
--------------------------------------------------------------------------------
1 |
2 | from jinja2 import Environment, TemplateNotFound, select_autoescape
3 | from aos.grit import *
4 | from aos.wit import *
5 | from agents.jetpack.frontend.frontend_wit import FrontendState
6 |
7 | app = Wit()
8 |
9 | @app.query("web")
10 | async def on_query_web(ctx:QueryContext, state:FrontendState):
11 | if 'chat' in ctx.query_args_json:
12 | current_chat = ctx.query_args_json['chat']
13 | if isinstance(current_chat, list):
14 | current_chat = current_chat[0]
15 | else:
16 | current_chat = 'main'
17 | template_kwargs = {
18 | 'agent_id': ctx.agent_id.hex(),
19 | 'frontend_id': ctx.actor_id.hex(),
20 | 'chat_actors': {k:v.hex() for k,v in state.chat_actors.items()},
21 | 'chat_titles': state.chat_titles,
22 | 'current_chat': current_chat,
23 | 'current_chat_id': state.chat_actors[current_chat].hex(),
24 | 'current_chat_title': state.chat_titles[current_chat],
25 | }
26 | return await render_template(ctx.core, "/templates/index.html", **template_kwargs)
27 |
28 | env = Environment(autoescape=select_autoescape())
29 | async def render_template(core:Core, template_path, **kwargs) -> BlobObject:
30 | template_blob = await core.get_path(template_path)
31 | if(template_blob is None):
32 | raise TemplateNotFound(f"Template not found: {template_path}")
33 | template_str = template_blob.get_as_str()
34 | template = env.from_string(template_str)
35 | rendered = template.render(**kwargs)
36 | rendered_blob = BlobObject.from_str(rendered)
37 | rendered_blob.set_headers_empty()
38 | rendered_blob.set_header('Content-Type', 'text/html')
39 | return rendered_blob
--------------------------------------------------------------------------------
/agents/jetpack/frontend/frontend_wit.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import re
3 | from aos.grit import *
4 | from aos.wit import *
5 | from agents.jetpack.messages import *
6 | from agents.jetpack.chat.chat_wit import create_chat_actor
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 | class FrontendState(WitState):
11 | chat_actors:dict[str, ActorId] = {}
12 | chat_titles:dict[str, str] = {}
13 |
14 | app = Wit()
15 |
16 | @app.genesis_message
17 | async def on_genesis(msg:InboxMessage, ctx:MessageContext, state:FrontendState) -> None:
18 | logger.info("received genesis")
19 | #create the fist chat
20 | await create_chat("Main", ctx, state)
21 |
22 | @app.message('create-chat')
23 | async def on_create_chat(chat:dict, ctx:MessageContext, state:FrontendState) -> None:
24 | logger.info("received create chat")
25 | title = chat['name']
26 | slug = await create_chat(title, ctx, state)
27 | ctx.outbox.add_reply_msg(ctx.message, slug, mt="new-chat")
28 |
29 | async def create_chat(title:str, ctx:MessageContext, state:FrontendState):
30 | #create the chat
31 | slug = slugify(title)
32 | chat_id = await create_chat_actor(ctx, name=slug)
33 | state.chat_actors[slug] = chat_id
34 | state.chat_titles[slug] = title
35 | return slug
36 |
37 | def slugify(text:str):
38 | text = text.lower()
39 | return re.sub(r'[\W_]+', '-', text)
--------------------------------------------------------------------------------
/agents/jetpack/messages/__init__.py:
--------------------------------------------------------------------------------
1 | from .messages_chat import *
2 | from .messages_coder import *
--------------------------------------------------------------------------------
/agents/jetpack/messages/messages_chat.py:
--------------------------------------------------------------------------------
1 | import mistune
2 | from uuid import UUID, uuid1
3 | from datetime import datetime
4 | from pydantic import BaseModel
5 | from aos.wit import *
6 |
7 | #==============================================================
8 | # Chat Messages
9 | #==============================================================
10 | class ChatMessage(BaseModel):
11 | id: UUID
12 | content: str
13 | timestamp: datetime
14 | from_name: str
15 | from_id: str|None = None
16 |
17 | @property
18 | def html(self):
19 | return mistune.html(self.content)
20 |
21 | @classmethod
22 | def from_user(cls, content:str):
23 | return cls(id=uuid1(), content=content, from_name='user', timestamp=datetime.now())
24 |
25 | @classmethod
26 | def from_actor(cls, content:str, actor_id:ActorId|None = None):
27 | return cls(
28 | id=uuid1(), content=content, from_name='assistant', from_id=actor_id.hex() if actor_id else None, timestamp=datetime.now())
29 |
30 | @classmethod
31 | async def load_from_tree(cls, tree:TreeObject, message_filter:list[str]=None) -> list['ChatMessage']:
32 | message_keys = tree.keys()
33 | message_keys = filter_and_sort_message_keys(message_keys, message_filter)
34 | messages = []
35 | for k in message_keys:
36 | blob_obj = await tree.getb(k)
37 | msg = cls(**blob_obj.get_as_json())
38 | messages.append(msg)
39 | return messages
40 |
41 | def filter_and_sort_message_keys(message_keys:list[str], message_filter:list[str]|None = None) -> list[str]:
42 | #cleanup the filters
43 | if(message_filter is None):
44 | message_filter = []
45 | if(isinstance(message_filter, str)):
46 | message_filter = [message_filter]
47 | message_filter = [k for k in message_filter if k != 'null' and k != 'undefined']
48 | #filter the actual message keys
49 | if(len(message_filter) > 0):
50 | message_keys = [k for k in message_keys if k in message_filter]
51 | #convert keys to UUIDs (for sorting)
52 | message_keys = [UUID(k) for k in message_keys]
53 | message_keys = sorted(message_keys, key= lambda x: x.time)
54 | #convert back to string
55 | return [str(k) for k in message_keys]
56 |
57 |
58 |
--------------------------------------------------------------------------------
/agents/jetpack/messages/messages_coder.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 |
3 | #==============================================================
4 | # Code Generation Messages
5 | #==============================================================
6 | class CodeRequest(BaseModel):
7 | task_description: str
8 | input_examples: list[str]|None = None
9 |
10 | class CodeSpec(BaseModel):
11 | task_description: str
12 | input_examples: list[str]|None = None
13 | data_examples: dict[str,str]|None = None
14 | input_spec: dict|None = None
15 | output_spec: dict|None = None
16 |
17 | @staticmethod
18 | def empty_inputoutput_spec() -> dict:
19 | return {"properties": {}, "type": "object" }
20 |
21 | class CodePlanned(BaseModel):
22 | plan: str
23 |
24 | class CodeDeployed(BaseModel):
25 | code: str
26 |
27 | class CodeExecution(BaseModel):
28 | #provide one or the other, if both are provider, the arguments will be used
29 | input_arguments: dict|None = None
30 | input_description: str|None = None
31 |
32 | class CodeExecuted(BaseModel):
33 | input_arguments: dict
34 | output: dict
35 |
36 | class CodeFailed(BaseModel):
37 | errors: str
38 |
--------------------------------------------------------------------------------
/agents/jetpack/sync.toml:
--------------------------------------------------------------------------------
1 | [agent]
2 | name = "jetpack"
3 |
4 | [all]
5 | external_paths=["./agents/jetpack/frontend", "./agents/jetpack/chat", "./agents/jetpack/coder"]
6 |
7 | [[actors]]
8 | name="frontend"
9 | push=["./agents/jetpack/frontend/index.html:/templates/index.html"]
10 | wit_genesis="external:agents.jetpack.frontend.frontend_wit:app"
11 | wit="external:agents.jetpack.frontend.frontend_wit:app"
12 | wit_query="external:agents.jetpack.frontend.frontend_queries:app"
13 |
14 | [[actors]]
15 | name="chat"
16 | is_prototype=true
17 | push=["./agents/jetpack/chat/chat_messages.html:/templates/chat_messages.html", "./agents/jetpack/chat/artifacts.html:/templates/artifacts.html"]
18 | wit_genesis="external:agents.jetpack.chat.chat_wit:app"
19 | wit="external:agents.jetpack.chat.chat_wit:app"
20 | wit_query="external:agents.jetpack.chat.chat_queries:app"
21 |
22 | [[actors]]
23 | name="coder"
24 | is_prototype=true
25 | wit_genesis="external:agents.jetpack.coder.coder_wit:app"
26 | wit="external:agents.jetpack.coder.coder_wit:app"
27 |
28 | [[actors]]
29 | name="retriever"
30 | is_prototype=true
31 | wit_genesis="external:agents.jetpack.coder.retriever_wit:app"
32 | wit="external:agents.jetpack.coder.retriever_wit:app"
--------------------------------------------------------------------------------
/agents/jetpack/tailwind.config.js:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/agents/jetpack/tailwind.config.js
--------------------------------------------------------------------------------
/agents/lib/completions/__init__.py:
--------------------------------------------------------------------------------
1 | from .prompt_builder import PromptBuilder
2 | from .function_builder import FunctionBuilder
3 | from .completion_helpers import *
4 |
5 | def config():
6 | import os
7 | import openai
8 | from dotenv import load_dotenv
9 | load_dotenv()
10 | openai.api_key = os.getenv("OPENAI_API_KEY")
11 |
12 | config()
--------------------------------------------------------------------------------
/agents/lib/completions/completion_helpers.py:
--------------------------------------------------------------------------------
1 | import json
2 | import re
3 | from .function_builder import FunctionBuilder
4 | from .prompt_builder import PromptBuilder
5 |
6 | def build_chat_kwargs(
7 | prompts:PromptBuilder,
8 | functions:FunctionBuilder|None=None,
9 | function_call:str|None=None,
10 | ) -> dict:
11 | kwargs = {}
12 | if prompts is not None:
13 | kwargs['messages'] = prompts.build()
14 | if functions is not None:
15 | kwargs['functions'] = functions.build()
16 | if function_call is not None:
17 | kwargs['function_call'] = { "name": function_call }
18 | return kwargs
19 |
20 | def parse_completions(response:dict) -> list[str|tuple[str, dict]]:
21 | completions = []
22 | for choice in response['choices']:
23 | if(choice['finish_reason'] == 'stop' or choice['finish_reason'] == 'function_call'):
24 | function_call = choice['message'].get('function_call')
25 | if function_call is not None:
26 | function_name = function_call['name']
27 | arguments_str = function_call['arguments']
28 | arguments:dict = json.loads(arguments_str)
29 | completions.append((function_name, arguments))
30 | else:
31 | completions.append(choice['message'].get('content'))
32 | else:
33 | raise Exception(f"Unexpected finish_reason: {choice['finish_reason']}")
34 | return completions
35 |
36 | def parse_completion(response:dict) -> str|tuple[str, dict]:
37 | completions = parse_completions(response)
38 | if len(completions) == 1:
39 | return completions[0]
40 | else:
41 | raise Exception(f"Expected only one completion, but got {len(completions)}")
42 |
43 | def parse_message_completion(response:dict) -> str:
44 | completion = parse_completion(response)
45 | if isinstance(completion, str):
46 | return completion
47 | else:
48 | raise Exception(f"Expected a message completion, not a function call, got: {completion}")
49 |
50 | def parse_function_completion(response:dict) -> tuple[str, dict]:
51 | completion = parse_completion(response)
52 | if isinstance(completion, tuple):
53 | return completion
54 | else:
55 | raise Exception(f"Expected a function call completion, not a message, got: {completion}")
56 |
57 | def parse_code_completion(response:dict) -> str:
58 | completions = parse_completions(response)
59 | if len(completions) == 1:
60 | if isinstance(completions[0], str):
61 | return strip_code(completions[0])
62 | else:
63 | raise Exception(f"Expected a message completion, not a function call, got: {completions[0]}")
64 | else:
65 | raise Exception(f"Expected only one completion, but got {len(completions)}")
66 |
67 | def strip_code(code:str) -> str:
68 | #if the code is on within a markdown code block, extract it
69 | #remove all linest start with ```
70 | if "```" in code:
71 | if "```python" or "```Python" in code:
72 | regex = r"```[pP]ython\n([\s\S]*?)```"
73 | else:
74 | regex = r"```([\s\S]*?)```"
75 | match = re.search(regex, code)
76 | if match:
77 | code = match.group(1)
78 | return code
79 | else:
80 | raise Exception("Could not find code in markdown code block")
81 | else:
82 | return code
--------------------------------------------------------------------------------
/agents/lib/completions/function_builder.py:
--------------------------------------------------------------------------------
1 | import inspect
2 | import json
3 | from typing import Type
4 | from pydantic import BaseModel
5 |
6 | class FunctionBuilder:
7 | def __init__(self):
8 | self.functions = []
9 |
10 | def build(self) -> list[dict]:
11 | return self.functions
12 |
13 | def _clean_message(self, message:str, clean_whitespace:bool=True) -> str:
14 | if message is None:
15 | return ""
16 | if clean_whitespace:
17 | return inspect.cleandoc(message)
18 | #return "\n".join(line.strip() for line in message.splitlines())
19 | else:
20 | return message
21 |
22 | def register_function(self,
23 | name:str,
24 | description:str,
25 | parameters:dict|Type[BaseModel],
26 | ):
27 | if parameters is None:
28 | parameters = json.loads('{"type": "object", "properties": {}}')
29 | if inspect.isclass(parameters) and issubclass(parameters, BaseModel):
30 | parameters = parameters.model_json_schema()
31 | self.functions.append({
32 | "name": name,
33 | "description": self._clean_message(description),
34 | "parameters": parameters
35 | })
36 | return self
37 |
38 | def append_to_last_description(self, description:str, newline:bool=True, clean_whitespace:bool=True):
39 | if len(self.functions) == 0:
40 | raise ValueError("Must have at least one function to append description to")
41 | if newline:
42 | self.functions[-1]['description'] += "\n"
43 | self.functions[-1]['description'] += self._clean_message(description, clean_whitespace)
44 | return self
45 |
--------------------------------------------------------------------------------
/agents/lib/completions/prompt_builder.py:
--------------------------------------------------------------------------------
1 |
2 | import json
3 | import inspect
4 | from pydantic import BaseModel
5 |
6 | class PromptBuilder:
7 | def __init__(self):
8 | self.system_message = ""
9 | self.messages = []
10 |
11 | def build(self) -> list[dict]:
12 | messages = []
13 | if self.system_message:
14 | messages.append({
15 | 'role': 'system',
16 | 'content': self.system_message
17 | })
18 | messages += self.messages
19 | return messages
20 |
21 |
22 | def _clean_message(self, message:str, clean_whitespace:bool=True) -> str:
23 | if message is None:
24 | return ""
25 | if clean_whitespace:
26 | return inspect.cleandoc(message)
27 | #return "\n".join(line.strip() for line in message.splitlines())
28 | else:
29 | return message
30 |
31 | def append_system(self, message:str):
32 | self.system_message += self._clean_message(message) + "\n"
33 | return self
34 |
35 | def append_msg(self, message:str, role:str='user', clean_whitespace:bool=True):
36 | self.messages.append({
37 | 'role': role,
38 | 'content': self._clean_message(message, clean_whitespace)
39 | })
40 | return self
41 |
42 | def append_to_prev(self, message:str, newline:bool=True, clean_whitespace:bool=True):
43 | """Append to the previous message. If there is no previous message, raise an error."""
44 | if len(self.messages) == 0:
45 | raise ValueError("Must have at least one message to append new message to")
46 | if newline:
47 | self.messages[-1]['content'] += "\n"
48 | self.messages[-1]['content'] += self._clean_message(message, clean_whitespace)
49 | return self
50 |
51 | def append_to_prev_code(self, code:str, newline:bool=True, codetype:str="python"):
52 | if len(self.messages) == 0:
53 | raise ValueError("Must have at least one message to append code to")
54 | if "```" not in code:
55 | code = f"```{codetype}\n"+self._clean_message(code)+"\n```"
56 | if newline:
57 | self.messages[-1]['content'] += "\n"
58 | self.messages[-1]['content'] += code
59 | return self
60 |
61 | def append_to_prev_json(self, data:dict|BaseModel, newline:bool=True):
62 | if isinstance(data, BaseModel):
63 | data_str = data.model_dump_json()
64 | elif isinstance(data, dict):
65 | data_str = json.dumps(data)
66 | else:
67 | raise ValueError("Must be dict or BaseModel")
68 | return self.append_to_prev_code(json.dumps(data_str), newline=newline, codetype="json")
69 |
70 |
--------------------------------------------------------------------------------
/agents/lib/envs/env_api.py:
--------------------------------------------------------------------------------
1 |
2 | # This should become an env wrapper that makes it easier to work with parent envs in a synchronous way.
3 |
4 | class Env:
5 | def __init__(self) -> None:
6 | pass
--------------------------------------------------------------------------------
/agents/lib/tools/__init__.py:
--------------------------------------------------------------------------------
1 | from .store_wrapper import StoreWrapper
2 | from .data_parser import DataParser
--------------------------------------------------------------------------------
/agents/lib/tools/data_parser.py:
--------------------------------------------------------------------------------
1 | import openai
2 | from agents.lib.completions import *
3 |
4 | class DataParser:
5 |
6 | async def parse(self, input:str, output_schema:dict|str, query:str) -> dict:
7 | # print("parse input", input)
8 | # print("parse output_schema", output_schema)
9 | # print("parse query", query)
10 | prompt = PromptBuilder()
11 |
12 | prompt.append_system(
13 | """You are a very smart assistant. Your task is to parse the given input string into the provided schema.""")
14 | prompt.append_msg(
15 | """The input string can be anything. For example, it can be parsed HTML from a website, JSON, prose text, or even code.
16 | Whatever the format is, make sure to take into account the desired output schema and the natural language query.""")
17 |
18 | prompt.append_msg(
19 | """Here is the input string:""")
20 | prompt.append_to_prev_code(input, codetype="")
21 |
22 | prompt.append_msg(
23 | "Here is natural language query: "+query)
24 |
25 | funcs = FunctionBuilder()
26 | funcs.register_function(
27 | "structured",
28 | "Based on the context so far and the input string, the structured function takes this data as input.",
29 | output_schema)
30 |
31 | response = await openai.ChatCompletion.acreate(
32 | model="gpt-4-0613",
33 | temperature=0,
34 | **build_chat_kwargs(prompt, funcs, function_call="structured"),
35 | )
36 |
37 | fn, arguments = parse_function_completion(response)
38 | if fn != "structured":
39 | raise Exception(f"Expected function name structured, but got {fn}")
40 | return arguments
--------------------------------------------------------------------------------
/agents/lib/tools/store_wrapper.py:
--------------------------------------------------------------------------------
1 | import filetype
2 | from aos.grit import *
3 | from aos.wit import *
4 |
5 | class StoreWrapper:
6 | """Create a wrapper around the object store that has a flat interface, easy to understand and explain to an LLM."""
7 | def __init__(self, store:ObjectStore):
8 | self.messages = []
9 | self.store = store
10 |
11 | async def load_bytes(self, id:str) -> bytes | None:
12 | blob = await self.store.load(to_object_id(id))
13 | return blob.data if blob else None
14 |
15 | async def load_str(self, id:str) -> str | None:
16 | blob = await self.store.load(to_object_id(id))
17 | if blob is None:
18 | return None
19 | blob_obj = BlobObject(blob)
20 | return blob_obj.get_as_str()
21 |
22 | async def load_json(self, id:str) -> dict | None:
23 | blob = await self.store.load(to_object_id(id))
24 | if blob is None:
25 | return None
26 | blob_obj = BlobObject(blob)
27 | return blob_obj.get_as_json()
28 |
29 | async def store_bytes(self, data:bytes, content_type:str|None=None) -> str:
30 | obj = BlobObject.from_bytes(data)
31 | if content_type is None:
32 | content_type = filetype.guess_mime(data)
33 | if content_type is not None:
34 | obj.set_header("Content-Type", content_type)
35 | obj_id = await obj.persist(self.store)
36 | return obj_id.hex()
37 |
38 | async def store_str(self, data:str) -> str:
39 | obj_id = await BlobObject.from_str(data).persist(self.store)
40 | return obj_id.hex()
41 |
42 | async def store_json(self, data:dict) -> str:
43 | obj_id = await BlobObject.from_json(data).persist(self.store)
44 | return obj_id.hex()
45 |
--------------------------------------------------------------------------------
/agents/tests/jetpack/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/agents/tests/jetpack/__init__.py
--------------------------------------------------------------------------------
/agents/tests/jetpack/helpers_runtime.py:
--------------------------------------------------------------------------------
1 | from aos.wit import *
2 | from aos.runtime.core import *
3 |
4 | async def wait_for_message_type(runtime:Runtime, mt:str) -> Message:
5 | with runtime.subscribe_to_messages() as queue:
6 | while True:
7 | mailbox_update = await queue.get()
8 | if(mailbox_update is None):
9 | break
10 | message_id = mailbox_update[2]
11 | message:Message = await runtime.store.load(message_id)
12 | if message.headers is not None and "mt" in message.headers:
13 | print("test: message received:", message.headers["mt"], message_id.hex())
14 | if message.headers["mt"] == mt:
15 | return message
16 | print("test: runtime closed subscription.")
17 | return None
--------------------------------------------------------------------------------
/agents/tests/jetpack/test_coder.py:
--------------------------------------------------------------------------------
1 | from aos.wit import *
2 | from aos.runtime.core import *
3 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
4 | from jetpack.messages import CodeSpec
5 | from jetpack.coder.coder_wit import app, create_coder_actor
6 | from .helpers_runtime import *
7 |
8 | # run with: poetry run pytest -s -o log_cli=true examples/coder/tests/
9 |
10 | async def test_coder__img_resize():
11 | store = MemoryObjectStore()
12 | refs = MemoryReferences()
13 | resolver = ExternalResolver(store)
14 | resolver.register('gen_wit', app)
15 |
16 | runtime = Runtime(store, refs, resolver=resolver)
17 | running_task = asyncio.create_task(runtime.start())
18 |
19 | #genesis of the generator actor
20 | genesis_message = await create_coder_actor(
21 | store, "image_coder",
22 | None,
23 | None,
24 | 'gen_wit')
25 |
26 | await runtime.inject_message(genesis_message)
27 | await asyncio.sleep(0.2)
28 |
29 | #send the spec of what should be coded
30 | upscale_spec = CodeSpec(
31 | task_description="""
32 | Can you download an image and upscales it to 2000x2000 pixels while maintaining the aspect ratio?
33 | Then saves the image again.""",
34 | input_examples=[
35 | "Use the following image: https://i.imgur.com/06lMSD5.jpeg",
36 | "Can you try this https://i.imgur.com/E0IOEPx.jpeg"
37 | ],)
38 | await runtime.inject_message(OutboxMessage.from_new(genesis_message.recipient_id, upscale_spec, mt="spec"))
39 |
40 | message = await wait_for_message_type(runtime, "code_deployed")
41 | #todo, load image id and see what is in it
42 |
43 | print("stopping test runtime")
44 | #stop
45 | runtime.stop()
46 | await asyncio.wait_for(running_task, timeout=1)
--------------------------------------------------------------------------------
/agents/tests/jetpack/test_coder_job.py:
--------------------------------------------------------------------------------
1 | from aos.wit import *
2 | from aos.runtime.core import *
3 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
4 | from jetpack.messages import CodeSpec, CodeExecution, CodeExecuted
5 | from jetpack.coder.coder_wit import *
6 | from .helpers_runtime import *
7 |
8 | # run with: poetry run pytest -s -o log_cli=true agents/tests/
9 |
10 | async def test_coder__img_resize_as_job():
11 | store = MemoryObjectStore()
12 | refs = MemoryReferences()
13 | resolver = ExternalResolver(store)
14 | resolver.register('coder_wit', app)
15 |
16 | runtime = Runtime(store, refs, resolver=resolver)
17 | running_task = asyncio.create_task(runtime.start())
18 |
19 | upscale_spec = CodeSpec(
20 | task_description="""Download an image from a provided URL, and upscales the image to 2000x2000 pixels while maintaining the aspect ratio? Use Image.LANCZOS. Then save the image again and return the id of the persisted image.""",
21 | input_spec=json.loads('{"properties": {"img_url": {"title": "Img Url", "type": "string"}}, "required": ["img_url"], "title": "Input", "type": "object"}'),
22 | output_spec=json.loads('{"properties": {"id": {"title": "Store Id", "type": "string"}}, "required": ["id"], "title": "Output", "type": "object"}'),
23 | input_examples=[
24 | "Use the following image: https://i.imgur.com/06lMSD5.jpeg",
25 | ],)
26 |
27 | job_exec = CodeExecution(
28 | input_arguments={"img_url": "https://i.imgur.com/E0IOEPx.jpeg"},
29 | )
30 |
31 | #genesis of the generator actor
32 | create_actor_message = await create_coder_actor(
33 | store,
34 | "test_coder_job",
35 | upscale_spec,
36 | job_exec,
37 | "coder_wit")
38 |
39 | await runtime.inject_message(create_actor_message)
40 | await asyncio.sleep(0.1)
41 |
42 | message = await wait_for_message_type(runtime, "code_executed")
43 | result:CodeExecuted = (await BlobObject.from_blob_id(runtime.store, message.content)).get_as_model(CodeExecuted)
44 | print("execution output:", result.output)
45 | assert result.output["id"] is not None
46 |
47 | print("stopping test runtime")
48 | #stop
49 | runtime.stop()
50 | await asyncio.wait_for(running_task, timeout=1)
--------------------------------------------------------------------------------
/agents/tests/jetpack/test_retriever.py:
--------------------------------------------------------------------------------
1 | from aos.wit import *
2 | from aos.runtime.core import *
3 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
4 | from jetpack.coder.retriever_wit import *
5 | from jetpack.coder.coder_wit import app as coder_app
6 | from jetpack.messages import CodeSpec
7 | from .helpers_runtime import *
8 |
9 | # run with: poetry run pytest -s -o log_cli=true examples/coder/tests/
10 |
11 | async def test_retrieve():
12 |
13 | store = MemoryObjectStore()
14 | refs = MemoryReferences()
15 | resolver = ExternalResolver(store)
16 | resolver.register('retriever_wit', app)
17 | resolver.register('coder_wit', coder_app)
18 |
19 | runtime = Runtime(store, refs, resolver=resolver)
20 | running_task = asyncio.create_task(runtime.start())
21 |
22 | spec = CodeSpec(
23 | task_description="""Get the data from this site (http://127.0.0.1:5001/ag/demodata/wit/actors/frontend_data/query/companies) and append it to csv file at '/home/lukas/test.csv'""",
24 | input_spec=json.loads('{"properties": {}, "type": "object"}'),
25 | output_spec=json.loads('{"properties": {"rows_updated": {"title": "How many rows were appended", "type": "string"}}, "required": ["rows_updated"], "title": "Output", "type": "object"}'),
26 | )
27 |
28 | #genesis of the generator actor
29 | create_actor_message = await create_retriever_actor(
30 | store,
31 | spec,
32 | None,
33 | "retriever_wit")
34 |
35 | await runtime.inject_message(create_actor_message)
36 | await asyncio.sleep(0.1)
37 |
38 | message = await wait_for_message_type(runtime, "complete")
39 | # result:CodeExecuted = (await BlobObject.from_blob_id(runtime.store, message.content)).get_as_model(CodeExecuted)
40 | # print("execution output:", result.output)
41 | # assert result.output["id"] is not None
42 |
43 | print("stopping test runtime")
44 | #stop
45 | runtime.stop()
46 | await asyncio.wait_for(running_task, timeout=1)
--------------------------------------------------------------------------------
/agents/tests/jetpack/test_retriever_completions.py:
--------------------------------------------------------------------------------
1 | import json
2 | from jetpack.coder.retriever_completions import *
3 | from jetpack.messages.messages_coder import CodeSpec
4 |
5 | async def test_coder_retrieval__none_needed():
6 | spec = CodeSpec(
7 | task_description="""Download an image from a provided URL, and upscales the image to 2000x2000 pixels while maintaining the aspect ratio? Use Image.LANCZOS. Then save the image again and return the id of the persisted image.""",
8 | input_spec=json.loads('{"properties": {"img_url": {"title": "Img Url", "type": "string"}}, "required": ["img_url"], "title": "Input", "type": "object"}'),
9 | output_spec=json.loads('{"properties": {"id": {"title": "Store Id", "type": "string"}}, "required": ["id"], "title": "Output", "type": "object"}'),
10 | input_examples=[
11 | "Use the following image: https://i.imgur.com/06lMSD5.jpeg",
12 | ],)
13 | result = await retrieve_completion(
14 | spec.task_description,
15 | spec.input_examples,
16 | )
17 | assert result is None
18 |
19 |
20 | async def test_coder_retrieval__two_needed():
21 | spec = CodeSpec(
22 | task_description="""Get the data from this site (http://127.0.0.1:5001/ag/demodata/wit/actors/frontend_data/query/companies) and append it to excel sheet at /home/me/report.xlsx""",
23 | input_spec=json.loads('{"properties": {}, "type": "object"}'),
24 | output_spec=json.loads('{"properties": {"rows_updated": {"title": "How many rows were appended", "type": "string"}}, "required": ["rows_updated"], "title": "Output", "type": "object"}'),
25 | )
26 | result = await retrieve_completion(
27 | spec.task_description,
28 | spec.input_examples,
29 | )
30 | assert result is not None
31 | assert len(result) == 2
32 | assert "http://127.0.0.1:5001/ag/demodata/wit/actors/frontend_data/query/companies" in result
33 | assert "/home/me/report.xlsx" in result
--------------------------------------------------------------------------------
/agents/tests/lib/tools/test_data_parser.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 | from agents.lib.tools import DataParser
3 |
4 | html = """
5 |
6 |
7 |
8 |
25 |
26 |
27 |
28 | HTML Table
29 |
30 |
31 |
32 | Company |
33 | Contact |
34 | Country |
35 |
36 |
37 | Alfreds Futterkiste |
38 | Maria Anders |
39 | Germany |
40 |
41 |
42 | Centro comercial Moctezuma |
43 | Francisco Chang |
44 | Mexico |
45 |
46 |
47 | Ernst Handel |
48 | Roland Mendel |
49 | Austria |
50 |
51 |
52 | Island Trading |
53 | Helen Bennett |
54 | UK |
55 |
56 |
57 | Laughing Bacchus Winecellars |
58 | Yoshi Tannamuri |
59 | Canada |
60 |
61 |
62 | Magazzini Alimentari Riuniti |
63 | Giovanni Rovelli |
64 | Italy |
65 |
66 |
67 |
68 |
69 |
70 | """
71 |
72 | class Output(BaseModel):
73 | company: str
74 | contact: str
75 | country: str
76 |
77 | async def test_website_parse(tmp_path):
78 | parser = DataParser()
79 | parsed = await parser.parse(
80 | html,
81 | Output.model_json_schema(),
82 | 'The company in Mexico')
83 |
84 | assert parsed == {
85 | 'company': 'Centro comercial Moctezuma',
86 | 'contact': 'Francisco Chang',
87 | 'country': 'Mexico'
88 | }
--------------------------------------------------------------------------------
/aos/cli/README.md:
--------------------------------------------------------------------------------
1 | # Agent OS CLI
2 |
3 | ## How to Install
4 | In `/usr/local/bin` create a script called `aos` with the following content:
5 | ```bash
6 | poetry -C "/home/pathtorepo/dev/agent-os/" run aos "$@"
7 | ```
--------------------------------------------------------------------------------
/aos/cli/__init__.py:
--------------------------------------------------------------------------------
1 | from . sync_item import *
2 | from . actor_push import *
3 | from . sync_file import *
4 |
--------------------------------------------------------------------------------
/aos/cli/agents_file.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | import os
3 | import tomlkit
4 | from tomlkit import TOMLDocument, table, aot, item
5 | from tomlkit.container import Container
6 |
7 |
8 | # Functions to work with a remotes.toml file
9 | # Utilizes https://github.com/sdispater/tomlkit to work with TOML data.
10 | #
11 | # The expected toml format is:
12 | # --------------------------
13 | # [[agents]]
14 | # alias = "agent_alias"
15 | # agent_id = "azxzjemxwzxzx" #hex id of the agent
16 | # point = 121231 #point key of the agent
17 | # --------------------------
18 |
19 | @dataclass
20 | class Agent:
21 | alias:str
22 | agent_id:str
23 | point:int
24 |
25 | def load_agents(toml_file_path:str) -> list[Agent]:
26 | doc = _read_toml_file(toml_file_path)
27 | return loads_agents(doc)
28 |
29 | def loads_agents(toml:str|TOMLDocument) -> list[Agent]:
30 | if(isinstance(toml, str)):
31 | doc = _read_toml_string(toml)
32 | else:
33 | doc = toml
34 | agents = doc.get("agents", None)
35 | if agents is None:
36 | return []
37 | return [Agent(**a) for a in agents]
38 |
39 | def add_agent(toml_file_path:str, agent:Agent):
40 | doc = _read_toml_file(toml_file_path)
41 | agents = doc.get("agents", None)
42 | if agents is None:
43 | agents = aot()
44 | doc.append("agents", agents)
45 | else:
46 | for a in agents:
47 | if a["alias"] == agent.alias:
48 | raise Exception(f"Agent with alias '{agent.alias}' already exists.")
49 | agent_item = item({
50 | "alias": agent.alias,
51 | "agent_id": agent.agent_id,
52 | "point": agent.point
53 | })
54 | agents.append(agent_item)
55 | _write_toml_file(toml_file_path, doc)
56 |
57 |
58 | def _read_toml_file(file_path) -> TOMLDocument:
59 | file_path = _convert_posix_to_win(file_path)
60 | with open(file_path, 'r') as f:
61 | return _read_toml_string(f.read())
62 |
63 | def _read_toml_string(toml_string) -> TOMLDocument:
64 | return tomlkit.loads(toml_string)
65 |
66 | def _write_toml_file(file_path, doc:TOMLDocument):
67 | file_path = _convert_posix_to_win(file_path)
68 | with open(file_path, 'w') as f:
69 | f.write(doc.as_string())
70 |
71 | def _convert_posix_to_win(path:str) -> str:
72 | if os.name == "nt" and "/" in path:
73 | return path.replace("/", os.sep)
74 | return path
75 |
--------------------------------------------------------------------------------
/aos/cli/cli.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import asyncio
3 | import os
4 | import shutil
5 | import sys
6 | import click
7 | from .cli_local import cli as cli_local
8 | from .cli_start import cli as cli_start
9 | from .cli_agent import cli as cli_agent
10 | from .cli_store import cli as cli_store
11 |
12 | #print logs to console
13 | logging.basicConfig(level=logging.INFO)
14 |
15 | # Main CLI to work with agent projects.
16 | # It utilizes the 'click' library.
17 |
18 |
19 | @click.group()
20 | def cli():
21 | pass
22 |
23 | cli.add_command(cli_local, name="local")
24 | cli.add_command(cli_start, name="start")
25 | cli.add_command(cli_agent, name="agent")
26 | cli.add_command(cli_store, name="store")
27 |
28 | if __name__ == '__main__':
29 | cli(None)
--------------------------------------------------------------------------------
/aos/cli/cli_store.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import asyncio
3 | import os
4 | import click
5 | from dataclasses import dataclass
6 | from aos.cli.actor_push import ActorPush
7 | from aos.grit import *
8 | from aos.runtime.store import agent_store_pb2, grit_store_pb2
9 | from aos.runtime.apex import apex_api_pb2
10 | from aos.runtime.web.agents_client import AgentsClient
11 | from . import sync_file as sf
12 | from . import agents_file as af
13 | from .agents_file import Agent
14 |
15 | #print logs to console
16 | logging.basicConfig(level=logging.INFO)
17 |
18 | # Main CLI to work with agent projects.
19 | # It utilizes the 'click' library.
20 |
21 | @dataclass
22 | class StoreContext:
23 | verbose:bool
24 | work_dir:str
25 | aos_dir:str
26 | agents_file_path:str
27 | apex_address:str
28 |
29 | def enforce_paths_exist(self):
30 | if not os.path.exists(self.work_dir):
31 | raise click.ClickException(f"work directory '{self.work_dir}' does not exist.")
32 | if not os.path.exists(self.aos_dir):
33 | raise click.ClickException(f"aos directory '{self.aos_dir}' does not exist.")
34 | if not os.path.exists(self.agents_file_path):
35 | raise click.ClickException(f"agents file '{self.agents_file_path}' does not exist.")
36 |
37 | @click.group()
38 | @click.pass_context
39 | @click.option("--work-dir", "-d", help="Work directory. By default, uses the current directory. All other files and paths will be relative to this.")
40 | @click.option("--apex-address", required=False, default="localhost:50052", help="Address of the apex server.")
41 | @click.option("--verbose", "-v", is_flag=True, help="Will print verbose messages.")
42 | def cli(ctx:click.Context, verbose:bool, work_dir:str|None, apex_address:str):
43 | if(work_dir is None):
44 | work_dir = os.getcwd()
45 | aos_dir = os.path.join(work_dir, ".aos")
46 | agents_file_path = os.path.join(aos_dir, "agents.toml")
47 |
48 | if(verbose):
49 | print(" work dir: " + work_dir)
50 | print(" aos dir: " + aos_dir)
51 | print(" agents file: " + agents_file_path)
52 |
53 | if(not os.path.exists(work_dir)):
54 | raise click.ClickException(f"Work directory '{work_dir}' (absolute: '{os.path.abspath(work_dir)}') does not exist.")
55 |
56 | ctx.obj = StoreContext(
57 | verbose=verbose,
58 | work_dir=work_dir,
59 | aos_dir=aos_dir,
60 | agents_file_path=agents_file_path,
61 | apex_address=apex_address)
62 |
63 | #===========================================================
64 | # 'agents' command
65 | #===========================================================
66 | @cli.command()
67 | @click.pass_context
68 | @click.option("--running", "-r", is_flag=True, help="Only running agents.")
69 | def agents(ctx:click.Context, running:bool):
70 | print("-> Listing Agent")
71 |
72 | store_ctx:StoreContext = ctx.obj
73 |
74 | async def ainit():
75 | client = AgentsClient(store_ctx.apex_address)
76 |
77 | if not running:
78 | print("Getting all agents from store...")
79 | agents = await client.get_agents()
80 | else:
81 | print("Getting running agents from apex...")
82 | agents = await client.get_running_agents()
83 |
84 | print(f"{'point':<10} {'agent_id':<20}")
85 | for agent_id, point in agents.items():
86 | print(f"{point:<10} {agent_id.hex():<20}")
87 |
88 |
89 | asyncio.run(ainit())
90 |
--------------------------------------------------------------------------------
/aos/cli/root_actor_offline.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from aos.grit import *
3 | from aos.grit import Mailbox
4 | from aos.wit import *
5 | from aos.runtime.core.actor_executor import ExecutionContext, ActorExecutor, _WitExecution, MailboxUpdate
6 | from aos.runtime.core.root_executor import create_or_load_root_actor
7 |
8 | async def add_offline_message_to_root_outbox(
9 | object_store:ObjectStore,
10 | references:References,
11 | point:Point,
12 | msg:OutboxMessage,
13 | set_previous:bool=False,
14 | ) -> StepId:
15 | '''Dangerous! Do not use if the runtime is running!'''
16 | #extra check
17 | agent_id, last_step_id = await create_or_load_root_actor(object_store, references, point)
18 | last_step = await object_store.load(last_step_id)
19 | if(last_step.outbox is None):
20 | last_step_outbox = {}
21 | else:
22 | last_step_outbox = await object_store.load(last_step.outbox)
23 | #set previous id if needed
24 | if(msg.recipient_id in last_step_outbox and set_previous and not msg.is_signal):
25 | msg.previous_id = last_step_outbox[msg.recipient_id]
26 | #new outbox
27 | new_outbox = last_step_outbox.copy()
28 | msg_id = await msg.persist(object_store)
29 | new_outbox[msg.recipient_id] = msg_id
30 | new_outbox_id = await object_store.store(new_outbox)
31 | #new step
32 | new_step = Step(last_step_id, agent_id, last_step.inbox, new_outbox_id, last_step.core)
33 | new_step_id = await object_store.store(new_step)
34 | await references.set(ref_step_head(agent_id), new_step_id)
35 | return new_step_id
36 |
37 | async def remove_offline_message_from_root_outbox(
38 | object_store:ObjectStore,
39 | references:References,
40 | point:Point,
41 | recipient_id:ActorId,
42 | ) -> StepId:
43 | '''Dangerous! Do not use if the runtime is running!'''
44 | #todo: dedupe the code with add_offline_message_to_agent_outbox
45 | agent_id, last_step_id = await create_or_load_root_actor(object_store, references, point)
46 | last_step = await object_store.load(last_step_id)
47 | if(last_step.outbox is None):
48 | last_step_outbox = {}
49 | else:
50 | last_step_outbox = await object_store.load(last_step.outbox)
51 | #new outbox
52 | new_outbox = last_step_outbox.copy()
53 | if(recipient_id in new_outbox):
54 | del new_outbox[recipient_id]
55 | if(len(new_outbox) > 0):
56 | new_outbox_id = await object_store.store(new_outbox)
57 | else:
58 | new_outbox_id = None
59 | #new step
60 | new_step = Step(last_step_id, agent_id, last_step.inbox, new_outbox_id, last_step.core)
61 | new_step_id = await object_store.store(new_step)
62 | await references.set(ref_step_head(agent_id), new_step_id)
63 | return new_step_id
--------------------------------------------------------------------------------
/aos/grit/README.md:
--------------------------------------------------------------------------------
1 | This contains the Grit object model, the Grit serialization standard, and the standard interfaces to read and write grit objects (grit store and references).
2 |
3 | The `stores` submodules are non-server implementations to test Grit and other core functions, but are not used in the runtime. The runtime implements its own Grit storage server (see `runtime/store`).
--------------------------------------------------------------------------------
/aos/grit/__init__.py:
--------------------------------------------------------------------------------
1 | from . object_model import *
2 | from . object_store import ObjectLoader, ObjectStore
3 | from . object_serialization import (object_to_bytes, bytes_to_object, is_object_id_str, is_object_id, to_object_id_str,
4 | to_object_id, get_object_id, get_random_object_id, is_blob, is_message, is_step, is_tree, is_mailbox, is_object_id_match,
5 | point_to_bytes, bytes_to_point)
6 | from . references import References, ref_actor_name, ref_root_actor, ref_prototype_name, ref_step_head
7 |
8 |
--------------------------------------------------------------------------------
/aos/grit/object_model.py:
--------------------------------------------------------------------------------
1 | from typing import NamedTuple
2 |
3 | # Type aliases and structures that define the entire object model for the Grit object store.
4 |
5 | ObjectId = bytes #32 bytes, sha256 of bytes of object
6 |
7 | BlobId = ObjectId
8 | Headers = dict[str, str]
9 | Blob = NamedTuple("Blob",
10 | [('headers', Headers | None),
11 | ('data', bytes)])
12 |
13 | TreeId = ObjectId
14 | Tree = dict[str, BlobId | TreeId] # a tree key must be an ascii string
15 |
16 | ActorId = ObjectId # hash of core of message that created the actor, i.e, object id of the core tree
17 | MessageId = ObjectId
18 | Message = NamedTuple("Message",
19 | [('previous', MessageId | None), #if none, it's a signal, otherwise, a queue
20 | ('headers', Headers | None),
21 | ('content', BlobId | TreeId)])
22 | MailboxId = ObjectId
23 | Mailbox = dict[ActorId, MessageId]
24 |
25 | StepId = ObjectId
26 | Step = NamedTuple("Step",
27 | [('previous', StepId | None),
28 | ('actor', ActorId),
29 | ('inbox', MailboxId | None),
30 | ('outbox', MailboxId | None),
31 | ('core', TreeId)])
32 |
33 | Object = Blob | Tree | Message | Mailbox | Step
34 |
35 | # Few more type helpers that are used throughout
36 | MailboxUpdate = tuple[ActorId, ActorId, MessageId] # sender_id, recipient_id, message_id
37 | AgentId = ActorId # the agent is defined by the id of the root actor
38 | Point = int # the point key of the agent
39 |
40 |
--------------------------------------------------------------------------------
/aos/grit/object_model_v2.py:
--------------------------------------------------------------------------------
1 | from typing import NamedTuple
2 |
3 | # Type aliases and structures that define the entire object model for the Grit object store.
4 |
5 | ObjectId = bytes #32 bytes, sha256 of bytes of object
6 |
7 | BlobId = ObjectId
8 | TreeId = ObjectId
9 | ListId = ObjectId
10 |
11 | Headers = dict[str, str]
12 | Blob = NamedTuple("Blob",
13 | [('headers', Headers | None),
14 | ('data', bytes)])
15 |
16 | Tree = dict[str, BlobId | TreeId | ListId] # a tree key must be an ascii string
17 |
18 | List = list[BlobId | TreeId | ListId] #NEW
19 |
20 | ActorId = ObjectId # hash of core of message that created the actor, i.e, object id of the core tree
21 | MessageId = ObjectId
22 | Message = NamedTuple("Message",
23 | [('previous', MessageId | None), #if none, it's a signal, otherwise, a queue
24 | ('prune', MessageId | None),
25 | #NEW: /if set, previous is not allowed to be set, instead, the previous message has to be set here,
26 | # which migh be pruned by grit (ie not available anymore)
27 | ('headers', Headers | None),
28 | ('type', str),
29 | #NEW aka, "message_type"/"mt" -- is this a good idea, or should it remain part of the headers?
30 | # the pro is that the message types could be made more explicit in the object model here since the runtime inspects the message types substiantly (e.g., "genesis", "update", and, in the future "gc/garbage/disconnect")
31 | ('content', BlobId | TreeId | ListId | None)]) #NEW with None option, because many messages are just a singal or a ping, and have no content
32 | MailboxId = ObjectId
33 |
34 | Mailbox = dict[tuple(ActorId, str|None), MessageId]
35 | #NEW: Channel name (str), to allow to send on multiple channels to an actor
36 | # if channel name is None then it is the "default channel"
37 | # ActorId can be either sender or receiver
38 | # Rename Mailbox to "Channels"
39 |
40 | StepId = ObjectId
41 |
42 | # TODO: check this out to see if we can use something from the at protocol repo structure
43 | # https://atproto.com/specs/repository
44 |
45 | Step = NamedTuple("Step",
46 | [('previous', StepId | None),
47 | ('actor', ActorId),
48 | ('inbox', MailboxId | None), #NEW: rename to "inputs" or "incoming", if Mailbox gets renamed to "Channels"
49 | ('outbox', MailboxId | None), #NEW: rename to "outputs" or "outgoing"
50 | ('core', TreeId)]) #still, cores must be trees and not a list (unlike JSON, where the top level can be a list or a dict)
51 |
52 | Object = Blob | Tree | List | Message | Mailbox | Step
53 |
54 |
55 | # TODO: in serialization, add grit/object model version header
56 |
--------------------------------------------------------------------------------
/aos/grit/object_store.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from .object_model import *
3 |
4 | class ObjectLoader(ABC):
5 | """Interface for loading objects from the Grit object store."""
6 | @abstractmethod
7 | async def load(self, objectId:ObjectId) -> Object | None:
8 | pass
9 |
10 | @abstractmethod
11 | def load_sync(self, objectId:ObjectId) -> Object | None:
12 | pass
13 |
14 | class ObjectStore(ObjectLoader, ABC):
15 | """Interface for persisting objects in the Grit object store."""
16 | @abstractmethod
17 | async def store(self, object:Object) -> ObjectId:
18 | pass
19 |
20 | @abstractmethod
21 | def store_sync(self, object:Object) -> ObjectId:
22 | pass
23 |
24 |
--------------------------------------------------------------------------------
/aos/grit/references.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from .object_model import ObjectId, ActorId
3 |
4 | class References(ABC):
5 | """Interface for saving and quering references in the Grit object store."""
6 | @abstractmethod
7 | async def get(self, ref:str) -> ObjectId | None:
8 | pass
9 |
10 | #todo: refactor this to "get_refs", with ref prefix
11 | @abstractmethod
12 | async def get_all(self) -> dict[str, ObjectId]:
13 | pass
14 |
15 | @abstractmethod
16 | async def set(self, ref:str, object_id:ObjectId) -> None:
17 | pass
18 |
19 | @abstractmethod
20 | def get_sync(self, ref:str) -> ObjectId | None:
21 | pass
22 |
23 | @abstractmethod
24 | def get_all_sync(self) -> dict[str, ObjectId]:
25 | pass
26 |
27 | @abstractmethod
28 | def set_sync(self, ref:str, object_id:ObjectId) -> None:
29 | pass
30 |
31 | # Helper functions to create correcly formated references
32 | def ref_step_head(actor_id:ActorId|str) -> str:
33 | if(isinstance(actor_id, ActorId)):
34 | actor_id = actor_id.hex()
35 | return f"heads/{actor_id}"
36 |
37 | def ref_actor_name(actor_name_ref:str) -> str:
38 | if actor_name_ref == "root":
39 | raise ValueError("Actor name 'root' is reserver for the runtime root actor.")
40 | return f"actors/{actor_name_ref}"
41 |
42 | def ref_prototype_name(prototype_name_ref:str) -> str:
43 | return f"prototypes/{prototype_name_ref}"
44 |
45 | #todo: rename to "root_actor"
46 | def ref_root_actor() -> str:
47 | return "runtime/agent"
48 |
--------------------------------------------------------------------------------
/aos/grit/stores/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/aos/grit/stores/__init__.py
--------------------------------------------------------------------------------
/aos/grit/stores/file/__init__.py:
--------------------------------------------------------------------------------
1 | from . file_object_store import FileObjectStore
2 | from . file_references import FileReferences
3 | __all__ = ['FileObjectStore', 'FileReferences']
--------------------------------------------------------------------------------
/aos/grit/stores/file/file_references.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 | from pathlib import PureWindowsPath
4 | import threading
5 | from ...object_model import *
6 | from ...references import References
7 |
8 | class FileReferences(References):
9 | # to share the dictionary between sync and async code, a retrant lock is needed
10 | # the event loop, when executing coroutines, can re-enter, but other threads can't
11 | _thread_lock:threading.RLock
12 | # however, to coordinate the async coroutines, also a async lock is needed
13 | _async_lock:asyncio.Lock
14 |
15 | _ref:dict[str, ObjectId]
16 |
17 | def __init__(self, store_path:str):
18 | super().__init__()
19 | self._async_lock = asyncio.Lock()
20 | self._thread_lock = threading.RLock()
21 | self._ref = {}
22 | self.store_path = store_path
23 | self.references_path = os.path.join(store_path, 'refs')
24 | os.makedirs(os.path.join(store_path, 'refs'), exist_ok=True)
25 | #walk the refs directory and load all the references
26 | for root, _dirs, files in os.walk(self.references_path):
27 | for file in files:
28 | ref = os.path.relpath(os.path.join(root, file), self.references_path)
29 | if os.name == "nt": #convert the path to forward slash posix path
30 | ref = PureWindowsPath(ref).as_posix()
31 | with open(os.path.join(root, file), "r") as f:
32 | object_id = bytes.fromhex(f.read())
33 | self._ref[ref] = object_id
34 |
35 |
36 | async def get(self, ref:str) -> ObjectId | None:
37 | return self.get_sync(ref)
38 |
39 | async def get_all(self) -> dict[str, ObjectId]:
40 | return self.get_all_sync()
41 |
42 | async def set(self, ref:str, object_id:ObjectId):
43 | with self._thread_lock:
44 | async with self._async_lock:
45 | self._set_and_persist(ref, object_id)
46 |
47 | def get_sync(self, ref:str) -> ObjectId | None:
48 | return self._ref.get(ref, None)
49 |
50 | def get_all_sync(self) -> dict[str, ObjectId]:
51 | return self._ref.copy()
52 |
53 | def set_sync(self, ref:str, object_id:ObjectId) -> None:
54 | with self._thread_lock:
55 | self._set_and_persist(ref, object_id)
56 |
57 | def _set_and_persist(self, ref:str, object_id:ObjectId) -> None:
58 | self._ref[ref] = object_id
59 | #save to file
60 | file_path = os.path.join(self.references_path, ref)
61 | dir_path = os.path.dirname(file_path)
62 | if not os.path.exists(dir_path):
63 | os.makedirs(dir_path, exist_ok=True)
64 | with open(file_path, 'w') as f:
65 | f.write(object_id.hex())
--------------------------------------------------------------------------------
/aos/grit/stores/lmdb/__init__.py:
--------------------------------------------------------------------------------
1 | from . shared_env import SharedEnvironment
2 | from . lmdb_object_store import LmdbObjectStore
3 | from . lmdb_references import LmdbReferences
4 | __all__ = ['SharedEnvironment', 'LmdbObjectStore', 'LmdbReferences']
--------------------------------------------------------------------------------
/aos/grit/stores/lmdb/lmdb_object_store.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import lmdb
3 | from functools import lru_cache
4 | from async_lru import alru_cache
5 | from ...object_model import *
6 | from ...object_serialization import *
7 | from ...object_store import ObjectStore
8 | from . shared_env import SharedEnvironment
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 | class LmdbObjectStore(ObjectStore):
13 | def __init__(self, shared_env:SharedEnvironment):
14 | super().__init__()
15 | if(not isinstance(shared_env, SharedEnvironment)):
16 | raise Exception(f"shared_env must be of type SharedEnvironment, not '{type(shared_env)}'.")
17 | self._shared_env = shared_env
18 |
19 | async def store(self, object:Object) -> ObjectId:
20 | return self.store_sync(object)
21 |
22 | async def load(self, object_id:ObjectId) -> Object | None:
23 | return self.load_sync(object_id)
24 |
25 | def store_sync(self, object:Object) -> ObjectId:
26 | if(object is None):
27 | raise ValueError("object must not be None.")
28 | bytes = object_to_bytes(object)
29 | object_id = get_object_id(bytes)
30 | try:
31 | with self._shared_env.begin_object_txn() as txn:
32 | txn.put(object_id, bytes, overwrite=False)
33 | return object_id
34 | except lmdb.MapFullError:
35 | logger.warning(f"===> Resizing LMDB map... in obj store, (obj id: {object_id.hex()}) <===")
36 | self._shared_env._resize()
37 | #try again
38 | with self._shared_env.begin_object_txn() as txn:
39 | txn.put(object_id, bytes, overwrite=False)
40 | return object_id
41 |
42 | @lru_cache(maxsize=1024*10) # noqa: B019
43 | def load_sync(self, object_id:ObjectId) -> Object | None:
44 | if(object_id is None):
45 | raise ValueError("object_id must not be None.")
46 | if(not is_object_id(object_id)):
47 | raise TypeError(f"object_id must be of type ObjectId, not '{type(object_id)}'.")
48 | with self._shared_env.begin_object_txn(write=False) as txn:
49 | bytes = txn.get(object_id, default=None)
50 | if bytes is None:
51 | return None
52 | return bytes_to_object(bytes)
53 |
--------------------------------------------------------------------------------
/aos/grit/stores/lmdb/lmdb_references.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import lmdb
3 | from ...object_model import *
4 | from ...object_serialization import *
5 | from ...references import References
6 | from .shared_env import SharedEnvironment
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 | class LmdbReferences(References):
11 | def __init__(self, shared_env:SharedEnvironment):
12 | super().__init__()
13 | if(not isinstance(shared_env, SharedEnvironment)):
14 | raise Exception(f"shared_env must be of type SharedEnvironment, not '{type(shared_env)}'.")
15 | self._shared_env = shared_env
16 |
17 | async def set(self, ref:str, object_id:ObjectId) -> None:
18 | self.set_sync(ref, object_id)
19 |
20 | async def get(self, ref:str) -> ObjectId | None:
21 | return self.get_sync(ref)
22 |
23 | async def get_all(self) -> dict[str, ObjectId]:
24 | return self.get_all_sync()
25 |
26 | def set_sync(self, ref:str, object_id:ObjectId) -> None:
27 | if(ref is None):
28 | raise ValueError("ref must not be None.")
29 | ref_bytes = ref.encode('utf-8')
30 |
31 | try:
32 | with self._shared_env.begin_refs_txn() as txn:
33 | if not txn.put(ref_bytes, object_id, overwrite=True):
34 | raise Exception(f"Not able to set '{ref}' in lmdb 'refs' database.")
35 | except lmdb.MapFullError as lmdb_error:
36 | logger.warning(f"===> Resizing LMDB map... in refs store, (ref: {ref}, obj id: {object_id.hex()}) <===")
37 | self._shared_env._resize()
38 | #try again
39 | with self._shared_env.begin_refs_txn() as txn:
40 | if not txn.put(ref_bytes, object_id, overwrite=True):
41 | raise Exception(f"Not able to set '{ref}' in lmdb 'refs' database.") from lmdb_error
42 |
43 |
44 | def get_sync(self, ref:str) -> ObjectId | None:
45 | with self._shared_env.begin_refs_txn(write=False) as txn:
46 | return txn.get(ref.encode('utf-8'), default=None)
47 |
48 | def get_all_sync(self) -> dict[str, ObjectId]:
49 | with self._shared_env.begin_refs_txn(write=False) as txn:
50 | kv = dict(txn.cursor().iternext())
51 | return {k.decode('utf-8'): v for k, v in kv.items()}
52 |
--------------------------------------------------------------------------------
/aos/grit/stores/lmdb/shared_env.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import lmdb
4 |
5 | logger = logging.getLogger(__name__)
6 |
7 | class SharedEnvironment:
8 | def __init__(self, store_path:str, writemap:bool=False):
9 | self.store_path = store_path
10 | self._resizing = False
11 | os.makedirs(self.store_path, exist_ok=True)
12 | self.env = lmdb.Environment(
13 | store_path,
14 | max_dbs=5,
15 | # writemap=True is what makes lmdb FAST (about 10x faster than if its False),
16 | # BUT it makes the DB file as big as the mapsize (at least on some file systems).
17 | # Plus, it comes with fewer safety guarantees.
18 | # See: https://lmdb.readthedocs.io/en/release/#writemap-mode
19 | writemap=writemap,
20 | metasync=False,
21 | # Flush write buffers asynchronously to disk
22 | # if wirtemap is False, this is ignored
23 | map_async=True,
24 | # 10 MB, is ignored if it's bigger already
25 | map_size=1024*1024*10,
26 | )
27 |
28 | def get_env(self) -> lmdb.Environment:
29 | return self.env
30 |
31 | def get_object_db(self) -> lmdb._Database:
32 | return self.env.open_db('obj'.encode('utf-8'))
33 |
34 | def get_refs_db(self) -> lmdb._Database:
35 | return self.env.open_db('refs'.encode('utf-8'))
36 |
37 | def begin_object_txn(self, write=True, buffers=False) -> lmdb.Transaction:
38 | return self.env.begin(db=self.get_object_db(), write=write, buffers=buffers)
39 |
40 | def begin_refs_txn(self, write=True, buffers=False) -> lmdb.Transaction:
41 | return self.env.begin(db=self.get_refs_db(), write=write, buffers=buffers)
42 |
43 | def _resize(self) -> int:
44 | self._resizing = True
45 | current_size = self.env.info()['map_size']
46 | if current_size > 1024*1024*1024*10: # 10 GB
47 | multiplier = 1.2
48 | elif current_size > 1024*1024*1024: # 1 GB
49 | multiplier = 1.5
50 | else: # under 1 GB
51 | multiplier = 3.0
52 | # must be rounded to next int! otherwise lmdb will segfault later (spent several hours on this)
53 | new_size = round(current_size * multiplier)
54 | logger.info(f"Resizing LMDB map from {current_size/1024/1024} MB to {new_size/1024/1024} MB")
55 | self.env.set_mapsize(new_size)
56 | self._resizing = False
57 | return new_size
--------------------------------------------------------------------------------
/aos/grit/stores/memory/__init__.py:
--------------------------------------------------------------------------------
1 | from . memory_object_store import MemoryObjectStore
2 | from . memory_references import MemoryReferences
3 | __all__ = ['MemoryObjectStore', 'MemoryReferences']
--------------------------------------------------------------------------------
/aos/grit/stores/memory/memory_object_store.py:
--------------------------------------------------------------------------------
1 | from ...object_model import *
2 | from ...object_serialization import *
3 | from ...object_store import ObjectStore
4 |
5 | class MemoryObjectStore(ObjectStore):
6 | #no lockign needed here, because all the dict operations used here are atomic
7 | _store:dict[ObjectId, Object]
8 |
9 | def __init__(self):
10 | super().__init__()
11 | self._store = {}
12 |
13 | async def store(self, object:Object) -> ObjectId:
14 | return self.store_sync(object)
15 |
16 | async def load(self, object_id:ObjectId) -> Object | None:
17 | return self.load_sync(object_id)
18 |
19 | def store_sync(self, object:Object) -> ObjectId:
20 | bytes = object_to_bytes(object)
21 | object_id = get_object_id(bytes)
22 | self._store[object_id] = bytes
23 | return object_id
24 |
25 | def load_sync(self, object_id:ObjectId) -> Object | None:
26 | bytes = self._store.get(object_id)
27 | if bytes is None:
28 | return None
29 | return bytes_to_object(bytes)
30 |
--------------------------------------------------------------------------------
/aos/grit/stores/memory/memory_references.py:
--------------------------------------------------------------------------------
1 | from ...object_model import *
2 | from ...references import References
3 |
4 | class MemoryReferences(References):
5 | #no lockign needed here, because all the dict operations used here are atomic
6 | _ref:dict[str, ObjectId]
7 |
8 | def __init__(self):
9 | super().__init__()
10 | self._ref = {}
11 |
12 | async def get(self, ref:str) -> ObjectId | None:
13 | return self._ref.get(ref, None)
14 |
15 | async def get_all(self) -> dict[str, ObjectId]:
16 | return self._ref.copy()
17 |
18 | async def set(self, ref:str, object_id:ObjectId) -> None:
19 | self._ref[ref] = object_id
20 |
21 | def get_sync(self, ref:str) -> ObjectId | None:
22 | return self._ref.get(ref, None)
23 |
24 | def get_all_sync(self) -> dict[str, ObjectId]:
25 | return self._ref.copy()
26 |
27 | def set_sync(self, ref:str, object_id:ObjectId) -> None:
28 | self._ref[ref] = object_id
--------------------------------------------------------------------------------
/aos/runtime/__init__py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/aos/runtime/__init__py
--------------------------------------------------------------------------------
/aos/runtime/apex/README.md:
--------------------------------------------------------------------------------
1 | # Apex Server (Orchestrator)
2 |
--------------------------------------------------------------------------------
/aos/runtime/apex/apex_client.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import asyncio
3 | import grpc
4 | import logging
5 | from concurrent import futures
6 | from aos.runtime.apex import apex_api_pb2, apex_api_pb2_grpc
7 | from aos.runtime.apex import apex_workers_pb2, apex_workers_pb2_grpc
8 | from aos.runtime.store.base_client import BaseClient
9 |
10 | import logging
11 | logger = logging.getLogger(__name__)
12 |
13 | class ApexClient(BaseClient):
14 | def __init__(self, server_address="localhost:50052"):
15 | super().__init__(server_address)
16 |
17 | def get_apex_api_stub_sync(self):
18 | return apex_api_pb2_grpc.ApexApiStub(self.channel_sync)
19 |
20 | def get_apex_api_stub_async(self):
21 | return apex_api_pb2_grpc.ApexApiStub(self.channel_async)
22 |
23 | def get_apex_workers_stub_sync(self):
24 | return apex_workers_pb2_grpc.ApexWorkersStub(self.channel_sync)
25 |
26 | def get_apex_workers_stub_async(self):
27 | return apex_workers_pb2_grpc.ApexWorkersStub(self.channel_async)
28 |
--------------------------------------------------------------------------------
/aos/runtime/apex/test.py:
--------------------------------------------------------------------------------
1 | # run the grit server
2 |
3 | import asyncio
4 | import logging
5 | import time
6 | import grpc
7 | from typing import AsyncIterable
8 |
9 | from aos.grit import *
10 | from aos.runtime.apex import apex_workers_pb2
11 | from aos.wit import *
12 | from .apex_client import ApexClient
13 |
14 | async def arun() -> None:
15 | client = ApexClient()
16 |
17 | worker_stub = client.get_apex_workers_stub_async()
18 |
19 | #register first
20 | response = await worker_stub.RegisterWorker(apex_workers_pb2.WorkerRegistrationRequest(worker_id="worker1"))
21 | ticket = response.ticket
22 | print("registered worker1 with ticket", ticket)
23 |
24 | async def generate_messages() -> AsyncIterable[apex_workers_pb2.WorkerToApexMessage]:
25 | yield apex_workers_pb2.WorkerToApexMessage(
26 | worker_id="worker1",
27 | type=apex_workers_pb2.WorkerToApexMessage.PING)
28 | print("sent ping")
29 | await asyncio.sleep(1)
30 | yield apex_workers_pb2.WorkerToApexMessage(
31 | worker_id="worker1",
32 | ticket=ticket,
33 | type=apex_workers_pb2.WorkerToApexMessage.READY)
34 | while True:
35 | yield apex_workers_pb2.WorkerToApexMessage(
36 | worker_id="worker1",
37 | type=apex_workers_pb2.WorkerToApexMessage.PING)
38 | print("sent ping")
39 | await asyncio.sleep(5)
40 |
41 | apex_stream:AsyncIterable[apex_workers_pb2.ApexToWorkerMessage] = worker_stub.ConnectWorker(generate_messages())
42 |
43 | try:
44 | async for message in apex_stream:
45 | print("received apex message", message.type)
46 | print("apex stream done")
47 | except grpc.aio.AioRpcError as e:
48 | if e.code() == grpc.StatusCode.CANCELLED:
49 | print("apex stream cancelled")
50 | else:
51 | raise e
52 |
53 | await client.close()
54 | logging.info("Done")
55 |
56 | if __name__ == "__main__":
57 | logging.basicConfig(level=logging.INFO)
58 | asyncio.run(arun())
59 |
--------------------------------------------------------------------------------
/aos/runtime/core/__init__.py:
--------------------------------------------------------------------------------
1 | from .runtime import Runtime
2 | from .actor_executor import ExecutionContext
3 | from .resolvers import *
4 |
--------------------------------------------------------------------------------
/aos/runtime/core/discovery_executor.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from aos.grit import *
3 | from aos.wit.discovery import Discovery
4 |
5 | class DiscoveryExecutor(Discovery):
6 | """Executes searches to find other actors."""
7 | references:References
8 | def __init__(self, references:References):
9 | self.references = references
10 |
11 | async def find_named_actor(self, actor_name:str) -> ActorId | None:
12 | return await self.references.get(ref_actor_name(actor_name))
13 |
14 | async def find_prototype(self, prototype_name:str) -> ActorId | None:
15 | return await self.references.get(ref_prototype_name(prototype_name))
16 |
17 |
18 |
--------------------------------------------------------------------------------
/aos/runtime/core/external_storage_executor.py:
--------------------------------------------------------------------------------
1 | import os
2 | from aos.wit.external_storage import ExternalStorage
3 |
4 | class ExternalStorageExecutor(ExternalStorage):
5 | """Provides access to external storage."""
6 | def __init__(self, root_dir:str, agent_dir:str, actor_dir:str|None):
7 | self.root_dir = root_dir
8 | self.agent_dir = agent_dir
9 | self.actor_dir = actor_dir
10 |
11 | def get_dir(self, sub_dir:str|None=None) -> str:
12 | """Returns a directory where the actor can store files. The directory will be created if it does not exist."""
13 | if self.actor_dir is None:
14 | raise Exception("Actor directory not set. Make a storage executor for this actor.")
15 | if sub_dir:
16 | dir = os.path.join(self.root_dir, self.agent_dir, self.actor_dir, sub_dir)
17 | else:
18 | dir = os.path.join(self.root_dir, self.agent_dir, self.actor_dir)
19 | if not os.path.exists(dir):
20 | os.makedirs(dir, exist_ok=True)
21 | return dir
22 |
23 | def make_for_actor(self, actor_dir:str) -> ExternalStorage:
24 | """Creates an external storage executor for the actor."""
25 | return ExternalStorageExecutor(self.root_dir, self.agent_dir, actor_dir)
--------------------------------------------------------------------------------
/aos/runtime/core/ipc.py:
--------------------------------------------------------------------------------
1 |
2 | # An "In-Process Cluster" for testing
3 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
4 | from aos.grit import *
5 | from aos.wit import *
6 | from .runtime import Runtime
7 | from .resolvers import ExternalResolver
8 |
9 | class InProcessCluster:
10 | def __init__(
11 | self,
12 | wits:dict[str, Wit],
13 | create_query_wit: bool = False,
14 | store:ObjectStore = None,
15 | refs:References = None,
16 | ) -> None:
17 | self.wits = wits
18 | self.create_query_wit = create_query_wit
19 |
20 | if store is not None:
21 | self.store = store
22 | else:
23 | self.store = MemoryObjectStore()
24 | if refs is not None:
25 | self.refs = refs
26 | else:
27 | self.refs = MemoryReferences()
28 |
29 | self.resolver = ExternalResolver(self.store)
30 | for wit_name, wit in self.wits.items():
31 | self.resolver.register(wit_name, wit)
32 |
33 | self.runtime = Runtime(store=self.store, references=self.refs, resolver=self.resolver)
34 | self._running_task:asyncio.Task = None
35 |
36 | async def __aenter__(self):
37 | await self.start()
38 | return self
39 |
40 | async def __aexit__(self, *args):
41 | await self.stop()
42 |
43 | async def start(self):
44 | self._running_task = asyncio.create_task(self.runtime.start())
45 | await asyncio.sleep(0.01)
46 | await self.create_actors()
47 | await asyncio.sleep(0.01)
48 |
49 | async def stop(self):
50 | self.runtime.stop()
51 | await asyncio.wait_for(self._running_task, timeout=1)
52 |
53 | async def create_genesis_message(self, wit_name:str) -> MailboxUpdate:
54 | '''Creates a genesis message and returns a MailboxUpdate'''
55 | gen_core:TreeObject = Core.from_external_wit_ref(wit_name, wit_name if self.create_query_wit else None)
56 | gen_message = await OutboxMessage.from_genesis(self.store, gen_core)
57 | gen_message_id = await gen_message.persist(self.store)
58 | return (self.runtime.agent_id, gen_message.recipient_id, gen_message_id)
59 |
60 | async def create_actors(self):
61 | for wit_name, wit in self.wits.items():
62 | gen_message = await self.create_genesis_message(wit_name)
63 | await self.runtime.inject_mailbox_update(gen_message)
64 | #create a named actor ref
65 | await self.refs.set(ref_actor_name(wit_name), gen_message[1])
66 |
67 | async def inject_content(self, actor_name:str, content:ValidMessageContent, is_signal:bool=False, mt:str|None=None):
68 | actor_id = await self.refs.get(ref_actor_name(actor_name))
69 | message = OutboxMessage.from_new(actor_id, content, is_signal=is_signal, mt=mt)
70 | await self.runtime.inject_message(message)
71 |
72 | async def get_actor(self, actor_name:str) -> ActorId:
73 | return await self.refs.get(ref_actor_name(actor_name))
74 |
75 | async def get_actor_step(self, actor_name:str) -> Step:
76 | actor_id = await self.refs.get(ref_actor_name(actor_name))
77 | step_id = await self.refs.get(ref_step_head(actor_id))
78 | return await self.store.load(step_id)
79 |
80 | async def get_actor_core(self, actor_name:str) -> Core:
81 | step = await self.get_actor_step(actor_name)
82 | return await Core.from_core_id(self.store, step.core)
--------------------------------------------------------------------------------
/aos/runtime/core/presence_executor.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from aos.grit import *
3 | from aos.wit.presence import Presence
4 |
5 | class NoOpPresenceExecutor(Presence):
6 | """Implements the presence interface as a no op.
7 | For a full implementation, see portals, which uses Redis.
8 | """
9 |
10 | async def check(self, channel:str) -> bool:
11 | """Checks if anyone is present on this channel."""
12 | return False
13 |
14 | async def publish(self, channel:str, message:Blob) -> None:
15 | """Publishes a message to the channel."""
16 | pass
--------------------------------------------------------------------------------
/aos/runtime/core/query_executor.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | import logging
3 | from aos.grit import *
4 | from aos.runtime.core.external_storage_executor import ExternalStorageExecutor
5 | from aos.wit.discovery import Discovery
6 | from aos.wit.errors import QueryError
7 | from aos.wit.external_storage import ExternalStorage
8 | from aos.wit.query import Query
9 | from .resolvers import Resolver
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 | class QueryExecutor(Query):
14 | """Executes wit queries against an actor's head step."""
15 | loader:ObjectLoader
16 | references:References
17 | resolver:Resolver
18 | agent_id:ActorId
19 |
20 | def __init__(self,
21 | loader:ObjectLoader,
22 | references:References,
23 | resolver:Resolver,
24 | agent_id:ActorId,
25 | discovery:Discovery|None=None,
26 | external_storage:ExternalStorageExecutor|None=None):
27 |
28 | self.loader = loader
29 | self.references = references
30 | self.resolver = resolver
31 | self.agent_id = agent_id
32 | self.discovery = discovery
33 | self.external_storage = external_storage
34 |
35 | async def _run(self, actor_id:ActorId, query_name:str, context:Blob|None) -> Tree | Blob | None:
36 | actor_id_str = actor_id.hex()
37 | current_step_id = await self.references.get(ref_step_head(actor_id))
38 | if(current_step_id is None):
39 | raise QueryError(f"Actor '{actor_id_str}' does not have a HEAD step, '{ref_step_head(actor_id)}'. "+
40 | "Make sure its genesis step has completed.")
41 | current_step_id_str = current_step_id.hex()
42 |
43 | #load the current step
44 | current_step:Step = await self.loader.load(current_step_id)
45 | if(current_step is None):
46 | raise QueryError(f"Actor '{actor_id_str}' has a HEAD step '{current_step_id_str}' that does not exist.")
47 | if(not is_step(current_step)):
48 | raise QueryError(f"Actor '{actor_id_str}' has a HEAD step '{current_step_id_str}' that is not a step.")
49 | if(current_step.actor != actor_id):
50 | raise QueryError(f"Actor '{actor_id_str}' has a HEAD step '{current_step_id_str}' that does not belong to it. "+
51 | "The actor inside the step doesnot match actor '{current_step.actor.hex()}'.")
52 |
53 | query_func = await self.resolver.resolve(current_step.core, 'wit_query', is_required=False)
54 | if(query_func is None):
55 | raise QueryError(f"Actor '{actor_id_str}' has no query function.")
56 | args = (current_step_id, query_name, context)
57 | kwargs ={
58 | 'loader': self.loader,
59 | 'object_loader': self.loader,
60 | 'actor_id': actor_id,
61 | 'agent_id': self.agent_id,
62 | 'query': self
63 | }
64 | if(self.discovery is not None):
65 | kwargs['discovery'] = self.discovery
66 | if(self.external_storage is not None):
67 | kwargs['external_storage'] = self.external_storage.make_for_actor(actor_id.hex())
68 | try:
69 | result = await query_func(*args, **kwargs)
70 | return result
71 | except Exception as e:
72 | logger.error(f"Query '{query_name}' to '{actor_id_str}', with step '{current_step_id_str}', failed with an exception: {e}", exc_info=e)
73 | raise QueryError(f"Query '{query_name}' to '{actor_id_str}', with step '{current_step_id_str}', failed with an exception: {e}") from e
74 |
75 |
76 |
--------------------------------------------------------------------------------
/aos/runtime/core/request_response_executor.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from aos.grit import *
3 | from aos.wit import *
4 | from .root_executor import RootActorExecutor
5 |
6 | #==========================================================================================
7 | # Rails are request-response calls to an actor.
8 | #
9 | # This is to avoid excessive callback hell in certain cases.
10 | # See the documentation in /thinking for more details.
11 | #
12 | # The current implementation just wraps the RuntimeExecutor since it had
13 | # already everything we needed to implement the first prototype.
14 | # In the future, we will want to make the rails executor its own runtime actor.
15 |
16 | class RequestResponseExecutor(RequestResponse):
17 | def __init__(self, store:ObjectStore, root_executor:RootActorExecutor) -> None:
18 | self.store = store
19 | self.runtime_executor = root_executor
20 |
21 | async def run(
22 | self,
23 | msg:OutboxMessage,
24 | response_types:list[str],
25 | timeout:float|None = None,
26 | ) -> InboxMessage:
27 |
28 | if response_types is None or len(response_types) == 0:
29 | raise Exception("Need at least one response message type to wait for.")
30 |
31 | if not msg.is_signal:
32 | raise Exception("The request 'msg' must be a signal. Set is_signal to True.")
33 |
34 | mailbox_update = await msg.persist_to_mailbox_update(self.store, self.runtime_executor.actor_id)
35 | with self.runtime_executor.subscribe_to_messages() as queue:
36 | # send to executor
37 | await self.runtime_executor.update_current_outbox([mailbox_update])
38 | # wait for response
39 | while True:
40 | if timeout is not None:
41 | # the timeout will throw here, if it gets triggered, and bubble up
42 | mailbox_update = await asyncio.wait_for(queue.get(), timeout)
43 | else:
44 | mailbox_update = await queue.get()
45 |
46 | if mailbox_update is None:
47 | raise Exception("Runtime terminated runtime actor.")
48 |
49 | sender_id = mailbox_update[0]
50 | message_id = mailbox_update[2]
51 | message = await InboxMessage.from_message_id(self.store, sender_id, message_id)
52 |
53 | if message.mt is not None and message.mt in response_types:
54 | return message
55 |
56 |
--------------------------------------------------------------------------------
/aos/runtime/store/README.md:
--------------------------------------------------------------------------------
1 | # Grit Server
2 |
3 | ```
4 | poetry run python -m aos.runtime.store.grit_store_server
5 |
6 | poetry run python -m aos.runtime.store.main
7 |
8 |
9 | poetry run python -m aos.cluster.grit.grit_server
10 |
11 | ```
12 |
13 |
14 | ## Sync vs Async
15 |
16 | How to combine sync and async handlers
17 | https://stackoverflow.com/questions/66889987/combining-grpc-standard-and-async-methods-in-the-same-python-server
--------------------------------------------------------------------------------
/aos/runtime/store/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/aos/runtime/store/__init__.py
--------------------------------------------------------------------------------
/aos/runtime/store/agent_object_store.py:
--------------------------------------------------------------------------------
1 | from aos.grit.object_model import *
2 | from aos.grit.object_serialization import *
3 | from aos.grit.object_store import ObjectStore
4 | from aos.runtime.store import grit_store_pb2, grit_store_pb2_grpc
5 | from .store_client import StoreClient
6 |
7 | #todo: this should probably move to the worker? depending who needs it
8 |
9 | #todo: add a cache to the store, track the total bytes stored (using the object size), set limits there
10 |
11 | class AgentObjectStore(ObjectStore):
12 | """An object store for a single agent. It connects to the Grit store server to perist data."""
13 | def __init__(self, store_client:StoreClient, agent_id:ActorId):
14 | super().__init__()
15 | if not is_object_id(agent_id):
16 | raise ValueError("agent_id must be an ObjectId (bytes).")
17 | self._agent_id = agent_id
18 | self._store_client = store_client
19 | self._store_stub_sync = store_client.get_grit_store_stub_sync()
20 | self._store_stub_async = store_client.get_grit_store_stub_async()
21 |
22 | def _to_store_request(self, object:Object):
23 | data = object_to_bytes(object)
24 | object_id = get_object_id(data)
25 | return grit_store_pb2.StoreRequest(
26 | agent_id=self._agent_id,
27 | object_id=object_id,
28 | data=data)
29 |
30 | def _to_load_request(self, object_id:ObjectId):
31 | return grit_store_pb2.LoadRequest(
32 | agent_id=self._agent_id,
33 | object_id=object_id)
34 |
35 | async def store(self, object:Object) -> ObjectId:
36 | request = self._to_store_request(object)
37 | await self._store_stub_async.Store(request)
38 | return request.object_id
39 |
40 | async def load(self, object_id:ObjectId) -> Object | None:
41 | if not is_object_id(object_id):
42 | raise ValueError(f"object_id is not a properly structured ObjectId: type '{type(object_id)}', len {len(object_id)}.")
43 | response:grit_store_pb2.LoadResponse = await self._store_stub_async.Load(
44 | self._to_load_request(object_id))
45 | if response.data is None:
46 | return None
47 | return bytes_to_object(response.data)
48 |
49 | def store_sync(self, object:Object) -> ObjectId:
50 | request = self._to_store_request(object)
51 | self._store_stub_sync.Store(request)
52 | return request.object_id
53 |
54 | def load_sync(self, object_id:ObjectId) -> Object | None:
55 | response:grit_store_pb2.LoadResponse = self._store_stub_sync.Load(
56 | self._to_load_request(object_id))
57 | if response.data is None:
58 | return None
59 | return bytes_to_object(response.data)
--------------------------------------------------------------------------------
/aos/runtime/store/agent_references.py:
--------------------------------------------------------------------------------
1 | from aos.grit.object_model import *
2 | from aos.grit.object_serialization import *
3 | from aos.grit.references import References
4 | from aos.runtime.store import grit_store_pb2
5 | from .store_client import StoreClient
6 |
7 | #todo: this should probably move to the worker? depending who needs it
8 |
9 | #todo: add a cache to the store, track the total bytes stored (using the object size), set limits there
10 |
11 | class AgentReferences(References):
12 | """An references store for a single agent. It connects to the Grit store server to perist the references."""
13 | def __init__(self, store_client:StoreClient, agent_id:ActorId):
14 | super().__init__()
15 | if not is_object_id(agent_id):
16 | raise ValueError("agent_id must be an ObjectId (bytes).")
17 | self._agent_id = agent_id
18 | self._store_client = store_client
19 | self._store_stub_sync = store_client.get_grit_store_stub_sync()
20 | self._store_stub_async = store_client.get_grit_store_stub_async()
21 |
22 |
23 | async def get(self, ref:str) -> ObjectId | None:
24 | if not ref:
25 | raise ValueError("ref is empty.")
26 | request = grit_store_pb2.GetRefRequest(agent_id=self._agent_id, ref=ref)
27 | response:grit_store_pb2.GetRefResponse = await self._store_stub_async.GetRef(request)
28 | if not response.HasField("object_id"):
29 | return None
30 | if not is_object_id(response.object_id):
31 | raise ValueError(f"object_id is not a properly structured ObjectId: type '{type(response.object_id)}', len {len(response.object_id)}.")
32 | return response.object_id
33 |
34 |
35 | async def get_all(self) -> dict[str, ObjectId]:
36 | request = grit_store_pb2.GetRefsRequest(agent_id=self._agent_id)
37 | response:grit_store_pb2.GetRefsResponse = await self._store_stub_async.GetRefs(request)
38 | return {ref: object_id for ref, object_id in response.refs.items()}
39 |
40 |
41 | async def set(self, ref:str, object_id:ObjectId) -> None:
42 | if not ref:
43 | raise ValueError("ref is empty.")
44 | if not is_object_id(object_id):
45 | raise ValueError(f"object_id is not a properly structured ObjectId: type '{type(object_id)}', len {len(object_id)}.")
46 | request = grit_store_pb2.SetRefRequest(agent_id=self._agent_id, ref=ref, object_id=object_id)
47 | await self._store_stub_async.SetRef(request)
48 |
49 |
50 | def get_sync(self, ref:str) -> ObjectId | None:
51 | request = grit_store_pb2.GetRefRequest(agent_id=self._agent_id, ref=ref)
52 | response:grit_store_pb2.GetRefResponse = self._store_stub_sync.GetRef(request)
53 | if not response.HasField("object_id"):
54 | return None
55 | if not is_object_id(response.object_id):
56 | raise ValueError(f"object_id is not a properly structured ObjectId: type '{type(response.object_id)}', len {len(response.object_id)}.")
57 | return response.object_id
58 |
59 |
60 | def get_all_sync(self) -> dict[str, ObjectId]:
61 | request = grit_store_pb2.GetRefsRequest(agent_id=self._agent_id)
62 | response:grit_store_pb2.GetRefsResponse = self._store_stub_sync.GetRefs(request)
63 | return {ref: object_id for ref, object_id in response.refs.items()}
64 |
65 |
66 | def set_sync(self, ref:str, object_id:ObjectId) -> None:
67 | request = grit_store_pb2.SetRefRequest(agent_id=self._agent_id, ref=ref, object_id=object_id)
68 | self._store_stub_sync.SetRef(request)
69 |
--------------------------------------------------------------------------------
/aos/runtime/store/base_client.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import asyncio
3 | import grpc
4 | import logging
5 | import time
6 | from concurrent import futures
7 |
8 | class BaseClient:
9 | def __init__(self, server_address):
10 | self.server_address = server_address
11 |
12 | # the async and sync api cannot be shared
13 | # however, opening two channels is okay, because, appratenly, there is something called a "sun channel"
14 | # wich is a shared resource between the two client channels (if their configuration is the same)
15 | # see: https://stackoverflow.com/a/62761510 (last comment)
16 | self.channel_async = grpc.aio.insecure_channel(
17 | self.server_address,
18 | options=[
19 | #("grpc.enable_retries", 0),
20 | # ("grpc.min_reconnect_backoff_ms", 5000),
21 | # ("grpc.max_reconnect_backoff_ms", 10000),
22 | #('grpc.enable_http_proxy', 0),
23 | ])
24 |
25 | self.channel_sync = grpc.insecure_channel(
26 | self.server_address,
27 | options=[
28 | #("grpc.use_local_subchannel_pool", 1),
29 | # ("grpc.min_reconnect_backoff_ms", 5000),
30 | # ("grpc.max_reconnect_backoff_ms", 10000),
31 | #('grpc.enable_http_proxy', 0),
32 | ])
33 |
34 | async def wait_for_async_channel_ready(self, timeout_seconds:float=3000):
35 | try:
36 | await asyncio.wait_for(self.channel_async.channel_ready(), timeout_seconds)
37 | except asyncio.TimeoutError as e:
38 | raise asyncio.TimeoutError(f"{type(self).__name__}: Timeout waiting for {timeout_seconds} seconds for channel to be ready.") from e
39 |
40 | def get_channel_sync(self):
41 | return self.channel_sync
42 |
43 | def get_channel_async(self):
44 | return self.channel_async
45 |
46 | async def close(self, grace_period=1.0):
47 | await self.channel_async.close(grace_period)
48 | self.channel_sync.close()
49 |
--------------------------------------------------------------------------------
/aos/runtime/store/main.py:
--------------------------------------------------------------------------------
1 | # run the grit server
2 |
3 | import asyncio
4 | import logging
5 | import time
6 |
7 | from aos.grit import *
8 | from aos.wit import *
9 | from aos.runtime.store import grit_store_pb2, agent_store_pb2
10 | from . agent_object_store import AgentObjectStore
11 | from .store_client import StoreClient
12 |
13 | async def arun() -> None:
14 | client = StoreClient()
15 |
16 | response:agent_store_pb2.CreateAgentResponse = await client.get_agent_store_stub_async().CreateAgent(agent_store_pb2.CreateAgentRequest())
17 |
18 | logging.info(f"Created agent with id {response.agent_id.hex()}")
19 | logging.info(f"Created agent with point {response.point}")
20 | agent_id = response.agent_id
21 |
22 | # create a few more agents (just to make sure it all works)
23 | response2:agent_store_pb2.CreateAgentResponse = await client.get_agent_store_stub_async().CreateAgent(agent_store_pb2.CreateAgentRequest())
24 | response3:agent_store_pb2.CreateAgentResponse = await client.get_agent_store_stub_async().CreateAgent(agent_store_pb2.CreateAgentRequest())
25 |
26 | #get agents, just to test
27 | print("=====================================")
28 | print("Getting agents")
29 | agent_by_id = await client.get_agent_store_stub_async().GetAgent(agent_store_pb2.GetAgentRequest(agent_id=response.agent_id))
30 | print(agent_by_id)
31 | agent_by_point = await client.get_agent_store_stub_async().GetAgent(agent_store_pb2.GetAgentRequest(point=response.point))
32 | print(agent_by_point)
33 | all_agents = await client.get_agent_store_stub_async().GetAgents(agent_store_pb2.GetAgentsRequest())
34 | print(all_agents)
35 |
36 | #set a var and filter by it
37 | await client.get_agent_store_stub_async().SetVar(agent_store_pb2.SetVarRequest(agent_id=response.agent_id, key="test", value="test"))
38 | agent_by_var = await client.get_agent_store_stub_async().GetAgents(agent_store_pb2.GetAgentsRequest(var_filters={"test":"test"}))
39 | print("agent by var", agent_by_var)
40 | print("=====================================")
41 |
42 | object_store = AgentObjectStore(client, agent_id)
43 |
44 | t1 = time.perf_counter()
45 | for i in range(100000):
46 | should_log = i % 1000 == 0
47 |
48 | blob1 = BlobObject.from_str("Hi "+str(i))
49 | object_id = await blob1.persist(object_store)
50 | if should_log:
51 | logging.info(f"Persisted object with id {object_id.hex()}")
52 |
53 | # this is not good for perf testing, because most of the reads will be cached
54 | # blob2 = await BlobObject.from_blob_id(object_store, object_id)
55 | # if should_log:
56 | # logging.info(f"Loaded object with id {blob2.get_as_str()}")
57 |
58 | if should_log:
59 | # time elapsed since beginning
60 | t_snapshot = time.perf_counter()
61 | logging.info(f"Elapsed time: {t_snapshot-t1:0.2f} seconds")
62 |
63 | t2 = time.perf_counter()
64 | logging.info(f"Elapsed time: {t2-t1:0.2f} seconds")
65 |
66 | await client.close()
67 | logging.info("Done")
68 |
69 | if __name__ == "__main__":
70 | logging.basicConfig(level=logging.INFO)
71 | asyncio.run(arun())
72 |
--------------------------------------------------------------------------------
/aos/runtime/store/main_refs.py:
--------------------------------------------------------------------------------
1 | # run the grit server
2 |
3 | import asyncio
4 | import logging
5 | import time
6 |
7 | from aos.grit import *
8 | from aos.wit import *
9 | from . agent_references import AgentReferences
10 | from .store_client import StoreClient
11 |
12 | async def arun() -> None:
13 | client = StoreClient()
14 | refs1 = AgentReferences(client, get_random_object_id())
15 | refs2 = AgentReferences(client, get_random_object_id())
16 |
17 | await refs1.set("ref1", get_random_object_id())
18 | await refs1.set("ref2", get_random_object_id())
19 | await refs2.set("ref1", get_random_object_id())
20 | await refs2.set("ref2", get_random_object_id())
21 |
22 | logging.info(f"refs1: {(await refs1.get('ref1')).hex()}")
23 | logging.info(f"refs2: {(await refs2.get('ref1')).hex()}")
24 |
25 | logging.info(f"all refs1: {await refs1.get_all()}")
26 |
27 | await client.close()
28 | logging.info("Done")
29 |
30 | if __name__ == "__main__":
31 | logging.basicConfig(level=logging.INFO)
32 | asyncio.run(arun())
33 |
--------------------------------------------------------------------------------
/aos/runtime/store/main_two.py:
--------------------------------------------------------------------------------
1 | # run the grit server
2 |
3 | import asyncio
4 | import logging
5 | import time
6 |
7 | from aos.grit import *
8 | from aos.grit.stores.lmdb import SharedEnvironment, LmdbReferences, LmdbObjectStore
9 | from aos.wit import *
10 |
11 | #Quick test to compare the benchmark to in-proc lmdb
12 | async def arun() -> None:
13 | shared_env = SharedEnvironment("/tmp/grit_store_two", writemap=True)
14 | object_store = LmdbObjectStore(shared_env)
15 |
16 | t1 = time.perf_counter()
17 | for i in range(100000):
18 | should_log = i % 1000 == 0
19 |
20 | blob1 = BlobObject.from_str("Hi "+str(i))
21 | object_id = await blob1.persist(object_store)
22 | if should_log:
23 | logging.info(f"Persisted object with id {object_id.hex()}")
24 |
25 | blob2 = await BlobObject.from_blob_id(object_store, object_id)
26 | if should_log:
27 | logging.info(f"Loaded object with id {blob2.get_as_str()}")
28 |
29 | if should_log:
30 | # time elapsed since beginning
31 | t_snapshot = time.perf_counter()
32 | logging.info(f"Elapsed time: {t_snapshot-t1:0.2f} seconds")
33 |
34 | t2 = time.perf_counter()
35 | logging.info(f"Elapsed time: {t2-t1:0.2f} seconds")
36 |
37 | logging.info("Done")
38 |
39 | if __name__ == "__main__":
40 | logging.basicConfig(level=logging.INFO)
41 | asyncio.run(arun())
42 |
--------------------------------------------------------------------------------
/aos/runtime/store/store_client.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import asyncio
3 | import grpc
4 | from concurrent import futures
5 | from aos.runtime.store import grit_store_pb2_grpc, agent_store_pb2_grpc, agent_store_pb2
6 | from .base_client import BaseClient
7 |
8 | import logging
9 | logger = logging.getLogger(__name__)
10 |
11 | # the idea is that only one of these clients exists and then the object store and refs classes create one off stubs
12 |
13 | class StoreClient(BaseClient):
14 | def __init__(self, server_address="localhost:50051"):
15 | super().__init__(server_address)
16 |
17 | def get_grit_store_stub_sync(self):
18 | return grit_store_pb2_grpc.GritStoreStub(self.channel_sync)
19 |
20 | def get_grit_store_stub_async(self):
21 | return grit_store_pb2_grpc.GritStoreStub(self.channel_async)
22 |
23 | def get_agent_store_stub_sync(self):
24 | return agent_store_pb2_grpc.AgentStoreStub(self.channel_sync)
25 |
26 | def get_agent_store_stub_async(self):
27 | return agent_store_pb2_grpc.AgentStoreStub(self.channel_async)
28 |
--------------------------------------------------------------------------------
/aos/runtime/web/README.md:
--------------------------------------------------------------------------------
1 | # Webserver
2 |
3 | Communicates with apex, grit, and workers to serve external API requests for wits.
--------------------------------------------------------------------------------
/aos/runtime/web/__init__.py:
--------------------------------------------------------------------------------
1 | from . web_server import WebServer
--------------------------------------------------------------------------------
/aos/runtime/worker/README.md:
--------------------------------------------------------------------------------
1 | # Python Worker
2 |
3 | For actors whose wit is implemented in Python.
--------------------------------------------------------------------------------
/aos/runtime/worker/worker_client.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import asyncio
3 | import grpc
4 | import logging
5 | from concurrent import futures
6 | from aos.runtime.worker import worker_api_pb2, worker_api_pb2_grpc
7 | from aos.runtime.store.base_client import BaseClient
8 |
9 | import logging
10 | logger = logging.getLogger(__name__)
11 |
12 | class WorkerClient(BaseClient):
13 | def __init__(self, server_address="localhost:50053"):
14 | super().__init__(server_address)
15 |
16 | def get_worker_api_stub_sync(self):
17 | return worker_api_pb2_grpc.WorkerApiStub(self.channel_sync)
18 |
19 | def get_worker_api_stub_async(self):
20 | return worker_api_pb2_grpc.WorkerApiStub(self.channel_async)
21 |
--------------------------------------------------------------------------------
/aos/wit/__init__.py:
--------------------------------------------------------------------------------
1 | from . data_model import *
2 | from . errors import *
3 | from . wit_state import *
4 | from . wit_api import *
5 | from . wit_routers import MessageContext, QueryContext
6 | from . query import Query
7 | from . request_response import RequestResponse
8 | from . prototype import (create_actor_from_prototype, create_actor_from_prototype_with_state, create_actor_from_prototype_msg_with_state,
9 | wrap_in_prototype,
10 | get_prototype_args, get_prototype_args_as_json, get_prototype_args_as_model)
11 | from . discovery import Discovery
12 | from . external_storage import ExternalStorage
13 | from . presence import Presence
14 | from . default_wits import empty
--------------------------------------------------------------------------------
/aos/wit/default_wits.py:
--------------------------------------------------------------------------------
1 | from aos.grit import *
2 | from aos.wit import *
3 |
4 | empty = Wit()
5 |
6 | @empty.run_wit
7 | async def empty_wit(inbox:Inbox, outbox:Outbox, core:Core, **kwargs):
8 | messages = await inbox.read_new()
9 | for message in messages:
10 | print("Wit message:", message.sender_id.hex(), message.mt)
11 |
12 | #TODO: add more utility wits here
13 |
--------------------------------------------------------------------------------
/aos/wit/discovery.py:
--------------------------------------------------------------------------------
1 |
2 | from abc import ABC, abstractmethod
3 | from aos.grit import *
4 |
5 | class Discovery(ABC):
6 | @abstractmethod
7 | async def find_named_actor(self, actor_name:str) -> ActorId | None:
8 | pass
9 |
10 | @abstractmethod
11 | async def find_prototype(self, prototype_name:str) -> ActorId | None:
12 | pass
--------------------------------------------------------------------------------
/aos/wit/errors.py:
--------------------------------------------------------------------------------
1 |
2 | class InvalidCoreException(Exception):
3 | pass
4 |
5 | class InvalidWitException(Exception):
6 | pass
7 |
8 | class InvalidMessageException(Exception):
9 | pass
10 |
11 | class InvalidGenesisException(Exception):
12 | pass
13 |
14 | class InvalidUpdateException(Exception):
15 | pass
16 |
17 | class QueryError(Exception):
18 | query_not_found:bool = False
19 | pass
--------------------------------------------------------------------------------
/aos/wit/external_storage.py:
--------------------------------------------------------------------------------
1 |
2 | from abc import ABC, abstractmethod
3 |
4 | class ExternalStorage(ABC):
5 | """Provides access to external storage. External here means not Grit. So a file system or a cloud storage."""
6 | @abstractmethod
7 | def get_dir(self, sub_dir:str|None=None) -> str:
8 | """Returns a directory where the actor can store files. The directory will be created if it does not exist."""
9 | pass
10 |
--------------------------------------------------------------------------------
/aos/wit/presence.py:
--------------------------------------------------------------------------------
1 |
2 | from abc import ABC, abstractmethod
3 | from aos.grit import *
4 |
5 | class Presence(ABC):
6 | """A service to check the presence of a user (or any entity) on an arbitrary channel.
7 | This is used to communicate out of band during execution of a wit function.
8 |
9 | For example, when streaming an LLM completion inside a wit, this can be used to return the text stream
10 | to the user while executing the function as a single step."""
11 |
12 | @abstractmethod
13 | async def check(self, channel:str) -> bool:
14 | """Checks if anyone is present on this channel."""
15 | pass
16 |
17 | async def publish(self, channel:str, message:Blob) -> None:
18 | """Publishes a message to the channel."""
19 | pass
--------------------------------------------------------------------------------
/aos/wit/query.py:
--------------------------------------------------------------------------------
1 |
2 | from abc import ABC, abstractmethod
3 | from typing import Type
4 | from aos.grit import *
5 | from aos.wit import BlobObject, BaseModelType
6 | from pydantic import BaseModel
7 |
8 | class Query(ABC):
9 | @abstractmethod
10 | async def _run(
11 | self,
12 | actor_id:ActorId,
13 | query_name:str,
14 | context:Blob|None,
15 | ) -> Tree | Blob | None:
16 | pass
17 |
18 | async def run(
19 | self,
20 | actor_id:ActorId,
21 | query_name:str,
22 | query_context:Blob|BlobObject|BaseModel|dict|str|None = None,
23 | ) -> Tree | Blob | None:
24 | if query_context is not None:
25 | if isinstance(query_context, BlobObject):
26 | query_context = query_context.get_as_blob()
27 | elif isinstance(query_context, BaseModel) or isinstance(query_context, dict):
28 | query_context = BlobObject.from_json(query_context).get_as_blob()
29 | elif isinstance(query_context, str):
30 | query_context = BlobObject.from_str(query_context).get_as_blob()
31 | elif is_blob(query_context):
32 | query_context = query_context
33 | else:
34 | raise ValueError("query_context must be a Blob, BlobObject, BaseModel, dict, or str.")
35 | return await self._run(actor_id, query_name, query_context)
36 |
37 | async def run_as_model(
38 | self,
39 | actor_id:ActorId,
40 | query_name:str,
41 | pydantic_model:Type[BaseModelType],
42 | query_context:Blob|BlobObject|BaseModel|dict|str|None = None,
43 | ) -> BaseModelType | None:
44 | result = await self.run(actor_id, query_name, query_context)
45 | if result is None:
46 | return None
47 | if not is_blob(result):
48 | raise ValueError(f"Query result must be a blob, cannot convert to {pydantic_model}.")
49 | result = BlobObject.from_blob(result)
50 | return result.get_as_model(pydantic_model)
--------------------------------------------------------------------------------
/aos/wit/request_response.py:
--------------------------------------------------------------------------------
1 |
2 | from abc import ABC, abstractmethod
3 | from .data_model import OutboxMessage, InboxMessage
4 |
5 | class RequestResponse(ABC):
6 | @abstractmethod
7 | async def run(
8 | self,
9 | msg:OutboxMessage,
10 | response_types:list[str],
11 | timeout:float|None = None,
12 | ) -> InboxMessage:
13 | pass
14 |
--------------------------------------------------------------------------------
/aos/wit/wut.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from aos.grit import *
3 | from .data_model import *
4 | from .data_model_utils import *
5 | from .wit_routers import _WitMessageRouter, _WitQueryRouter, _WitMessageWrapper, _NamedQueryWrapper
6 |
7 | # The 'wut' query is just a convention to implement a query name that reurns information about the wit.
8 | # It is not required, but it is a good idea to implement it.
9 | #
10 | # Since the routers have all the information about the message and query handlers, we can just plug into
11 | # the routers and generate that information automatically. Kind of like how FastApi generates OpenAPI specs.
12 | #
13 | # The current implementation is just a proof of concept.
14 |
15 | class _WutGenerator:
16 | def __init__(self,
17 | message_router:_WitMessageRouter|None,
18 | query_router:_WitQueryRouter|None,
19 | register_with_query_router:bool=True,):
20 | self.message_router = message_router
21 | self.query_router = query_router
22 | # see if it should register itself with the query router
23 | if self.query_router is not None and register_with_query_router:
24 | self.query_router.register_query_handler("wut", self.generate_wut)
25 |
26 | def generate_wut(self) -> BlobObject:
27 | """Generate a wut file from the registered wits."""
28 | # Of course, this is just a demo, this should really produce a proper OpenAPI type of spec,
29 | # use pydanic schemas, and so on
30 | # TODO: implement this properly
31 | wut = {}
32 | if(self.message_router):
33 | wit_handlers = {}
34 | wut["messages"] = wit_handlers
35 | wrapper: _WitMessageWrapper
36 | for message_type, wrapper in self.message_router._wit_message_handlers.items():
37 | if(message_type == "genesis" or message_type == "update"):
38 | continue
39 | if wrapper.input_param is None:
40 | wit_handlers[message_type] = str(type(InboxMessage))
41 | else:
42 | wit_handlers[message_type] = str(wrapper.input_param.annotation)
43 | if(self.query_router):
44 | query_handlers = {}
45 | wut["queries"] = query_handlers
46 | wrapper: _NamedQueryWrapper
47 | for query_name, wrapper in self.query_router._query_handlers.items():
48 | if wrapper.input_param is None:
49 | query_handlers[query_name] = ""
50 | else:
51 | query_handlers[query_name] = str(wrapper.input_param.annotation)
52 | return BlobObject.from_json(wut)
--------------------------------------------------------------------------------
/conftest.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/conftest.py
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Documentation
2 |
3 | The documentation is very much work in progress.
4 |
5 | ❓If you want to understand the motivation behind the project, read the [manifesto](articles/manifesto.md).
6 | ❓To learn how everything works, start reading the [design docs](design/design.md).
--------------------------------------------------------------------------------
/docs/SUMMARY.md:
--------------------------------------------------------------------------------
1 | # Summary
2 |
3 | What structure should the docs be?
4 | - Getting Started
5 | - Architecture / Design / Concepts
6 | - Building Agents
7 | - Reference
8 | - Agent OS Design
9 |
10 | ## Getting Started
11 |
12 | ## Design & Specs
13 | * [OS Design](design/design.md)
--------------------------------------------------------------------------------
/docs/design/wit-resolution.md:
--------------------------------------------------------------------------------
1 | # Wit Function Resolution
2 | The runtime, when executing an actor, needs to first find the wit function as it is defined in the core.
3 |
4 | The `/wit` node at the root of the core points to the wit function either inside the core or externally.
5 |
6 | ## Wit Resolution by External Reference
7 | An external pointer to a wit has the following contents in `/wit`:
8 | ```
9 | external:wit_ref
10 | or
11 | external:module_name[.sub_module_name]:wit_function_name
12 | ```
13 | To make life easier during development, a core does not have to contain the actual wit code itself. It can just point to it. So, if it's an `external:` pointer, it will first look in the internal resolver table for a manually registered function factory that matches `wit_ref`. We utilize this extensively for unit testing.
14 |
15 | If the reference points to a `module:function`, it will look in the normally loaded Python modules. It is the developer's responsibility to make sure that the module path is in `sys.path`, but the CLI helps with that.
16 |
17 | ## Wit Resolution from Core
18 | If the wit is loaded from the core itself (which is the default), it must be formatted like this:
19 | ```
20 | /code:module_name[.sub_module_name]:wit_function_name
21 | ```
22 | In this case, the resolver will look in the core of an actor for the code of a wit function.
23 |
24 | Core references must start with a slash, `/`, and the shortest core path is just a single slash, indicating the whole core can be searched for modules. Although it is a better practice to put all code into a `/code` sub-node.
25 |
26 | If the wit point to it's own core then it must contain a Python module at the specified module path (root path : module path : function name). The module can load other packages and modules just like on a file system. (The only limitation, right now, is that absolute imports with submodules don't work, e.g. `import my_mod.my_sub_mod`, instead use `from my_mod.my_sub_mod import *` syntax.)
27 |
28 | For example, in the high-level "greetings" example above, we would persist the code of the greetings wit inside the core under, say, `/code/greetings_wit.py`, and then set the `/wit` node to `/code:greetings_wit:app`. With the decorator pattern the entry point is the `app` object itself, which is callable.
--------------------------------------------------------------------------------
/docs/images/agents-fig-1-jetpack-demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/docs/images/agents-fig-1-jetpack-demo.png
--------------------------------------------------------------------------------
/docs/images/agents-fig-2-jetpack-demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/docs/images/agents-fig-2-jetpack-demo.png
--------------------------------------------------------------------------------
/docs/images/design-fig-1-runtime-grit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/docs/images/design-fig-1-runtime-grit.png
--------------------------------------------------------------------------------
/docs/images/design-fig-2-actors.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/docs/images/design-fig-2-actors.png
--------------------------------------------------------------------------------
/docs/images/design-fig-3-grit-model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/docs/images/design-fig-3-grit-model.png
--------------------------------------------------------------------------------
/docs/images/design-fig-4-wit-function.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/docs/images/design-fig-4-wit-function.png
--------------------------------------------------------------------------------
/docs/images/design-fig-5-wit-function-genesis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/docs/images/design-fig-5-wit-function-genesis.png
--------------------------------------------------------------------------------
/docs/images/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/docs/images/screenshot.png
--------------------------------------------------------------------------------
/docs/thinking/DID.md:
--------------------------------------------------------------------------------
1 | # Decentralized Indentifyers
2 |
3 | We'll use DIDs to identify an agent.
4 |
5 | The runtime acts as a DID controller for the agent's DID.
6 |
7 | The private key associated with the agent's DID will be used to derrive most of the encryption and signing.
8 |
9 |
10 | ## References
11 | - https://github.com/did-method-plc/did-method-plc
--------------------------------------------------------------------------------
/docs/thinking/README.md:
--------------------------------------------------------------------------------
1 | Contains random snippets of thoughts and ideas that are part of the current work stack. A lot of the notes here are out of date, so do not consider them documentation!
2 |
3 | For proper documentation, insofar it exists, refer to to the [/docs](/docs) folder.
--------------------------------------------------------------------------------
/docs/thinking/bootstrapping.md:
--------------------------------------------------------------------------------
1 | How can wits be bootstraped?
2 |
3 | Wits need to be created, and know about each other.
4 |
5 | Key instight: no runtime is needed for this, all bootstraping can be done by storing data in grid. The runtime should then pick up the agents.
6 |
7 | (probably similar with updates, they can just be written (mostly) to grit and then picked up by the runtime and actors)
8 | (and perhaps, bootsrapping and updating can be the same process: just syncing grit with a directory on the file system)
9 | (nah, they should probably be different, otherwise the update might apply wrongly re-create new agents)
10 |
11 | ## Sync File
12 |
13 | When loading wits into grid, there needs to be a way to tell where they live and what should be included.
14 | We'll use TOML files (https://toml.io/en/v1.0.0) for that.
15 |
16 | A discovery entry (or deployment entry) is a pointer to a wit directory, or file. Each entry is a TOML table.
17 |
18 | ```toml
19 | [all] #optional
20 | #push can also be a json (or inline table in TOML)
21 | push = "path/to/common_code:/"
22 | push_values = {
23 | "/data/args": "hello world",
24 | "/data/more_args": {"hello": "world"},
25 | }
26 | push_on_genesis = "path/to/common_code:/"
27 | pull = "path/to/common_code:/"
28 | sync = "path/to/common_code:/" supports both push and pull
29 |
30 | [[actors]]
31 | name = "actor_one" #optional
32 | push = "path/to/wit_one:/code" #is merged with all, can be ommitted, then the all sync is used
33 | wit = "/code:module:function_name"
34 | wit_query = "/code:module:function_name"
35 |
36 | [[actors]]
37 | name = "actor_two"
38 | push = "path/to/wit_two"
39 | wit = "external:module:function_name"
40 | wit_query = "external:module:function_name"
41 | runtime = "python" #which runtime to use, default is python
42 |
43 | ```
44 |
45 | ## Mechanics
46 |
47 | In grit refs, there is an entry that maps each actor name to the actor id. Through that, sync operations know which actor to update.
48 |
49 | All push syncs happen by writing the appropriate genesis or update messages.
50 |
51 |
--------------------------------------------------------------------------------
/docs/thinking/decorators_design.md:
--------------------------------------------------------------------------------
1 |
2 | # Wit and Query Decorators
3 |
4 | Here is how i'd like to use decorators
5 |
6 | ```python
7 |
8 | class TestMessage(BaseModel): #pydantic
9 | hello:str
10 | world:str
11 |
12 | class WitState(BaseState): #wit
13 | hellos:List[str] = []
14 |
15 | wit = Wit()
16 |
17 | @wit.genesis_message
18 | async def on_genesis(msg:InboxMessage):
19 | print("genesis message", msg.message_id.hex())
20 |
21 | @wit.message("test")
22 | async def test_message(content:TestMessage, state: WitState):
23 | print("test message", content)
24 | state.hellos.append(content.hello + " " + content.world)
25 |
26 | @wit.message("ping")
27 | def test_message(msg:InboxMessage, outbox:Outbox):
28 | print("test message", msg)
29 | outbox.send_reply(msg, "pong")
30 |
31 | #or
32 | @wit.messages
33 | def test_message(inbox:Inbox):
34 | print("test message", msg)
35 |
36 | #or
37 | @wit.run
38 | async run_wit(inbox:Inbox, outbox:Outbox, core:Core):
39 | pass
40 |
41 |
42 | ```
--------------------------------------------------------------------------------
/docs/thinking/distributed_runtime.md:
--------------------------------------------------------------------------------
1 | the runtime can be distributed. But there likley has to be a central runtime that organizes which actor runs where, since each actor should only be running once.
2 | (but the running once requirement is not strict as long as outmessages are handled correctly, i.e. only outputs from one actor instance are routed.)
3 |
4 | The central runtime has access to all kinds of user secrets. Distributed actors can ask for those secrets. This allows even actors that are runnint on the user machine to use all kinds of services directly.
5 |
6 | Secret management should likley be separate from the refs and object_store.
7 |
8 |
9 | Also, the best way to do remote client communication is running a wit on the remote client, instead of having the remote client trying to sync with a runtime/individual wit on the server
10 |
--------------------------------------------------------------------------------
/docs/thinking/error_handling.md:
--------------------------------------------------------------------------------
1 | # Error Handling
2 |
3 | How should errors during wit executions be handled?
4 |
5 | To be clear, we are only investigating the case where wit functions are properly executed, and the execution fails due to an error in the wit function itself.
6 |
7 | Anything else outside of that is a "runtime" or "framework" bug that stops the execution of the runtime.
8 |
9 | ## Approaches
10 |
11 | There are really three key problems that need solving:
12 | 1) what to do with the actor when a wit function call fails? Stop it, retry with backoff, sound the alarm, etc?
13 | 2) what to do with the message that caused the error? Retry it, discard it, etc?
14 | 3) how to communicate the error back to the sender of the message?
15 |
16 | ### Naive
17 | The most simple and naive approach assumes that the wit functions deal with errors themselves fully and if an error bubbles up to the the actor executor, the executor stops.
18 |
19 | This means it's the responsibility of the wit implementer to decide if a new message should be commited to the step's inbox if the processing of that message produces an error. If an execution does not commit the message but also doesn't fail, the runtime will retry the message forever until it is commited.
20 |
21 | This approach is what is currently implemented in the agent-os. The problem is that it offers not help with automated error handling or communication back to the sender.
22 |
23 | ### Automatic Catch and Retry
24 |
25 | On top of the naive approach we can add helpers that retry a message, and communicate the errors back to the user. This could, for example, be implemented in the `wit` package itself, or in a separate package that wraps the `wit` package.
26 |
27 | Error handling would then become a matter of configuring the retry behavior and the error communication behavior.
28 |
29 | ### Part of the Protocol
30 |
31 | Another option is to make errors part of the protocol on how wit functions are executed. This would mean that the runtime would be responsible for retrying messages and communicating errors back to the sender. I think this is too invasive and would make the runtime too complex and inflexible.
32 |
33 | Some of the retry logic needs to be implemented in the runtime, though, because things like exponential backoff are not something that can be implemented in a wit function.
34 |
35 |
--------------------------------------------------------------------------------
/docs/thinking/gRPC.md:
--------------------------------------------------------------------------------
1 |
2 | Getting gRPC to work reliably, at scale, won't be easy.
3 |
4 | Here are some references:
5 | - Notes on TCP and gRPC connection gotchas (very good) https://www.evanjones.ca/tcp-connection-timeouts.html
6 | - What the timeout settings are for python: https://github.com/grpc/grpc/issues/25540
7 |
--------------------------------------------------------------------------------
/docs/thinking/indexing.md:
--------------------------------------------------------------------------------
1 | # Indexing
2 |
3 | There are two, somewhat related, albeit seprable, indexing problems in agent os.
4 |
5 | The first prpblem is very grit specific: large trees will get slow to modify and load (once in memory the dictionary is relatively fast). Plus, we cannot save tree collections that are larger than memory, e.g., many millions of sub items (trees or blobs).
6 |
7 | The second problem is indexing fields inside, say, blobs, and making those searchable, and filterable. Vector indexes are a subproblem of this, but not the only one. This problem becomes more atenuated once we have large collections inside grit, but indexes could also be required of just a few large blon items, with say, large jsons inside (or CSVs, or similar). So in many ways this is orthogonal to the first problem.
8 |
9 | The first problem is more akin to a primary key index and the second problem is more akin to a secondary index.
10 |
11 | My sense is that the solution is to solve these two problems separately. Once for large grit trees (in a grit native way) and once for various sub indexes (in a more general way).
12 |
13 | ## Grit Indexing
14 |
15 | The most straight forward solution is to use some sort of tree sharding or partitioning. Define a partitioning function for the tree keys and then partition the data into multiple sub-trees that are managed by the paritioner module.
16 |
17 | The partitioning can be recursive more like a trie, where a piece of the main key is taken and then the tree is split into multiple sub-trees. This is a bit like a radix tree, but with a more general partitioning function. The problem with this approach is the need for rebalancing.
18 |
19 | The actual structure we want is closest to a HAMT (https://en.wikipedia.org/wiki/Hash_array_mapped_trie)
--------------------------------------------------------------------------------
/docs/thinking/inspiration.md:
--------------------------------------------------------------------------------
1 | # Inspiration for the Agent OS
2 |
3 |
4 | Orleans: Virtual Actor model
5 |
6 | Urbit: State transition function, and Solid State Interpreter
7 |
8 | Git: DAG
9 |
10 | Crypto/IPFS: Merkle DAG
11 |
12 | Temporal.io: persistence of workflows (although wits dont do that), or better, "persistent actor model"
13 |
14 | FastApi: for the wit api
15 |
16 | AtProtocol: connecting repos with DIDs (https://atproto.com/specs/repository)
--------------------------------------------------------------------------------
/docs/thinking/manifests.md:
--------------------------------------------------------------------------------
1 | # Manifests
2 |
3 | ## How to know what a wit needs to run?
4 |
5 | We need a way to indicate to the runtime or worker what the wit/actor needs to run.
6 |
7 | ## Indicators
8 | - Language or language runtime
9 | - Library versions (sem versioned)
10 |
11 | I think that's it.
12 |
13 | ## Structure
14 |
15 | Where to save this information? We could re-use the wit structure for this. But probably should be a separate file.
16 |
17 | I've been thinking we should probably use json for the manifests since it is universal and can be easily verified with a schema.
--------------------------------------------------------------------------------
/docs/thinking/object_problem.md:
--------------------------------------------------------------------------------
1 | Wit functions are basically used like objects. With constructors and all.
2 |
3 | How to codify this in a OOP way?
4 |
5 | ## Constructors
6 |
7 | One of the main challenges is how to dynamically change the constructor type from from in-core, to external, to external-ref only. And do that everywhere where actors create other actors?
8 |
--------------------------------------------------------------------------------
/docs/thinking/presence.md:
--------------------------------------------------------------------------------
1 | # Presence
2 |
3 | The webserver needs some sort of presence feature where the frontend can set certain "presence markers" which can then be queried by the actors. This will allow actors to decide if they should stream results or just generate them at once. And mayabe also decide how much time to spend on producing an answer.
4 |
5 | This could just be implemented as a wit query to the the root actor or some website actor proxy.
--------------------------------------------------------------------------------
/docs/thinking/rails.md:
--------------------------------------------------------------------------------
1 | # Rails
2 |
3 | Rails allow the orchestration of multiple wit functions *synchronously*. Rails use utility actors under the hood that proxy messages.
4 |
5 | ## Problem
6 | Since wit functions are fundamentally async, it is hard to compose multiple wit functions together.
7 |
8 | Let's say we have a wit that downloads an image and saves it in grit
9 |
10 | ```
11 | async def download_image(str url, store, outbox)
12 | img = requests.get(url)
13 | img_id = store.store(img)
14 | oubox.send(requester_id, img_id)
15 | ```
16 |
17 | How should we reasonably call this function from a different wit?
18 |
19 | Enter continuations or completion handlers. This is how async was done back in the day: with callbacks. The problem with callbacks is that they are hard to compose. You can't just call a function and get a result back. You have to pass in a function that will be called when the result is ready.
20 |
21 | In the case of Wit message handlers, what needs to be correct is only the returning message type and maybe some sort of correlation id to be able to assoiate requests with responses (or, what is the same, commands with events).
22 |
23 | ## IO Monad
24 | One way to solve this, especially if multiple actors need to be coordinated is to use a monad. For example, Urbit, which is entirely async in its "agents", uses "treads" to coordinate async actions. In Urbit, a thread is a monad that can be used to compose async actions. https://developers.urbit.org/reference/arvo/threads/overview
25 |
26 | ## Introducing "Rails"
27 | Let's call our IO monad "rails" (or "trains", not sure yet). A rail defines a linear path of several chained Wit function calls. Specificaly, it enables request-response patterns with wit functions, but also other things like timeouts, and so on.
28 |
29 | In the agent OS, rails can only be properly started from the runtime itself (or the actor executor), a wit can use a rail helper, which is passed via context, to start a rail. Under the hood, a rail is just a wit too that proxies events for the caller./
30 |
31 | ## Deadlocks
32 | As long as the rails-subsystem does not allow reentracy into the actor that "owns" or initiated the rail, dead-locks can be avoided. Also, as long as a rail is active, no other actor should be allowed to create a different rail that messages an actor with that active rail. Again, this could cause deadlocks.
33 |
34 | ## Workers and Coordination
35 | There should probably be a "rails worker" that runs on the same worker as the actor runs, and consequently there might be many of them. And a main rails wit, that cooridnates all existing rails (probably with a timeout). Or, at least, the rails coorinator needs to kick in if a particular rail references actors that are not managed on the local worker.
36 |
37 | ## Perf
38 | Rails will be sloooow! Because it will require the compution of many steps, both the actual composed steps of real wits, and internally to store the state. So they should be used with caution.
--------------------------------------------------------------------------------
/docs/thinking/scaling_actors.md:
--------------------------------------------------------------------------------
1 | # How to Scale Individual Actors?
2 |
3 | Idea: allow a wit to mark itself as "parallelizable" and then the runtime can create multiple instances of that actor. Instead of there being a single head ref, there will be multiple, managed by the runtime to distribute load. So bascially an actor that will be instantiated multipe times.
4 |
5 | A thing to consider, to keep the state management more predictable, is to not allow those kinds of actors to modify their own core. Ie. they are only allowed to keep their original core that defined their id, maybe with all cores being the same?
6 |
7 | This the runtime could then round-robin incoming messages to those scaled actors. And it could even propose the messages in some order to each actor, and the actor could choose to take it from the inbox or not. Such a setup would allow actors to work as parallelized consumers in a kind of producer-consumer pattern.
8 |
9 | This kind of scaled setup is also desireable for the root actor which will see a lot of traffic.
10 |
11 | Moreover, if we implement channels in the messages, this would make it even more powerful. The runtime could then route messages to the correct actor based on the channel. This would allow for a more fine-grained control of ehich scaled actors gets or takes a message.
12 |
13 | The only downside is that this would result in multiple inboxes and outboxes with differeent steps. However, if the scaled actors share a common actor id, this is not so much a problem because to other, singleton actors, the scaled actor looks like a single, addressable actor--and a single sender as well.
14 |
15 | With messaging there probably needs to be some more rules so that this works, especially with sending messages from scaled actors that are queues. If multiple actors produce a queue and send it to the same singleton actor, then there is an ambiguity on which queue to accept from all those senders. The solution is probably to either have different actor ids for each scaled actor (but then we lose other benefits), have scaled actors only send signals, or have them always use channels for sending messages that indentify the sending actor is some other way. The final option is to have scaled actors be addressable either by a shared actor id or by an individual id that is different for each scaled actor. The sender id from scaled actors would always be their individual id.
--------------------------------------------------------------------------------
/docs/thinking/search_actor.md:
--------------------------------------------------------------------------------
1 | Create embeddings for two chunk streams:
2 | - the actual file chunked
3 | - a forward interpolated version of the file chunked (processing each chunk by an LLM asking it to replace all the ambiguities of the current chunk with specifics from the previous context which are the last n chunks preceeding the current one)
4 |
5 | Additionally, we can combine this with more traditional keyword based search.
6 |
7 | Finally, we search each stream and combine the weights of each matched chunk. If one matches in the actual file chunk, the interpolated one, and the keyword search, it will have a higher weight than if it only matches in one of the streams.
--------------------------------------------------------------------------------
/docs/thinking/sync_vs_async.md:
--------------------------------------------------------------------------------
1 | Currently, the system is designed for async. But this will be a mjor shop-stopper for many python developers, since they cannot trivially use their (mainly) sync libraries with the async event loop. (Only a few sync actors can run in parallel because threads are expensive.) Another reason is that LLMs are more likely to write correct sync code than async code.
2 |
3 | Therefore, writing sync wits must be possible. But then there is the problem on how to mix the sync and async colored functions.
4 |
5 | The object store and refs now need a sync and async version! Since it is only a handful of methods, that's fine. But what is a bigger challenge are the data model classes, which are also async through and throug and contain longer, intermixed code. To create two versions is a bit of a pain.
6 |
7 | -------------------
8 |
9 | So, this is a problem of [colored functions](http://journal.stuffwithstuff.com/2015/02/01/what-color-is-your-function/). On the library level, we can just choose to either only support async and let it be up to the user to wrap blocking stuff in a thread or support sync all the way down to the object and reference store...
10 |
11 | But, since this is a python lib, sync versions will likely have to be supported, just to not scare away users.
12 |
13 | -------------------
14 |
15 | ```python
16 | class ObjectStore:
17 | async def store(self, obj:Object) -> ObjectId:
18 | async def load(self, object_id:ObjectId) -> Object:
19 |
20 | def store_sync(self, obj:Object) -> ObjectId:
21 | def load_sync(self, object_id:ObjectId) -> Object:
22 | ```
23 |
24 | The idea, here, is that the async version is the default, and the sync variant is a consession. Which is the inverse from many other python libs, but, I think, makes sense here.
25 |
26 | The more I think about it, the more I'm convinced that this is the way. It's somewaht dirty and, but such is life. Trying to solve this in a "purer" way, might end up with a more elegant solution, but it will be more complex and more difficult to understand. Manually adding _sync variants is a bit of a pain, but it's not too bad. And it's a one-time thing. This is mostly for the higher-level and user-facing apis, the lower level, internal apis will still be async only.
27 |
28 | -----
29 | Discussion here:
30 | https://discuss.python.org/t/how-can-async-support-dispatch-between-sync-and-async-variants-of-the-same-code/15014/9
31 | and example solution here
32 | https://github.com/django/asgiref/blob/d451a724c93043b623e83e7f86743bbcd9a05c45/asgiref/sync.py#L84
33 |
34 | Consider this talk on how to build protocol libraries:
35 | https://www.youtube.com/watch?v=7cC3_jGwl_U
--------------------------------------------------------------------------------
/docs/thinking/update.md:
--------------------------------------------------------------------------------
1 | # Updating Actors
2 |
3 | Updates are tricky.
4 |
5 | Here is my current idea:
6 | 1. An update is just a message with a new core, and a header `{"mt": "update"}`.
7 | 2. The wit is then executes *as usual*, that is, the wit from the previous step is ran to execute the transition function. The wit should not do special update work, but should perform any internal state cleanup in preparation for the update. Most of the time, the wit ignores the message thoug.
8 | 3. The runtime then does treats the message like a genesis message: it looks for a `wit_update` node in the core. If it finds one, it runs the one *in* the core of the update. The `wit_update` node is a function that takes the old core and returns a new core, usually in the process replacing it, and migrating any state.
9 |
10 | In effect, update messages, run two state transition right one after another.
11 |
12 | ## Wire it Up
13 | The `actor_executor` (and/or runtime) will do increasingly more work to ensure that steps get executed correctly (and that safety precausions are respected). The ideal place to put an update function is in `_resolve_wit_function`, where the executor can detect if there is an update message and then *wrap* the wit function inside an update function. It would then work in the following way:
14 | 1. The wit computes its next step but does not merge the core.
15 | 2. If thre is a `wit_update` entry in the *new* core, it executes it.
16 | 3. If there is no update function it just uses a default one that merges the cores relatively naively.
--------------------------------------------------------------------------------
/docs/thinking/webserver.md:
--------------------------------------------------------------------------------
1 |
2 | needed
3 | - inject messages for actors
4 | - receive messages from actors (both polling and realtime)
5 | - query actor state
6 | - trees
7 | - can be specified if enire tree is returend or just the current level
8 |
9 |
10 |
11 | ## HTTP Grit
12 | `grit` endpoints are read-only. The runtime does not have to be running.
13 |
14 | `$GRIT_URL = http://:/grit/`
15 |
16 | return all refs
17 | `GET $GRIT_URL/refs`
18 |
19 | return the object with that id
20 | `GET $GRIT_URL/objects/`
21 |
22 |
23 | ## HTTP Wit
24 |
25 | `wit` endpoints support interaction with wits via the runtime
26 |
27 | `$WIT_URL http://:/wit/`
28 |
29 | (NOT NEEDED) get messages sent to the runtime from all actors
30 | `GET $WIT_URL/messages/`
31 |
32 | (NOT NEEDED) get messags sent from a specific actor
33 | `GET $WIT_URL/messages/`
34 |
35 | recieve new message notifications from all actors
36 | `GET $WIT_URL/messages-sse/`
37 |
38 | create a new message for an actor
39 | `POST $WIT_URL/messages/`
40 |
41 | query a wit (running query code in the core of the wit)
42 | `GET $WIT_URL/query//?query_strings=`
--------------------------------------------------------------------------------
/docs/thinking/workers.md:
--------------------------------------------------------------------------------
1 | workers are like runtimes, but simpler. They recieve messages from the runtime of what actors to run. Usually, it's one worker per process.
2 |
3 | Worker also use virtual steps to manage its state and are thus introspectable.
--------------------------------------------------------------------------------
/protogen.sh:
--------------------------------------------------------------------------------
1 | OUT=./
2 | mkdir -p $OUT
3 | poetry run python -m grpc_tools.protoc -I./protos --python_out=$OUT --pyi_out=$OUT --grpc_python_out=$OUT ./protos/aos/runtime/store/grit_store.proto
4 | poetry run python -m grpc_tools.protoc -I./protos --python_out=$OUT --pyi_out=$OUT --grpc_python_out=$OUT ./protos/aos/runtime/store/agent_store.proto
5 | poetry run python -m grpc_tools.protoc -I./protos --python_out=$OUT --pyi_out=$OUT --grpc_python_out=$OUT ./protos/aos/runtime/apex/apex_workers.proto
6 | poetry run python -m grpc_tools.protoc -I./protos --python_out=$OUT --pyi_out=$OUT --grpc_python_out=$OUT ./protos/aos/runtime/apex/apex_api.proto
7 | poetry run python -m grpc_tools.protoc -I./protos --python_out=$OUT --pyi_out=$OUT --grpc_python_out=$OUT ./protos/aos/runtime/worker/worker_api.proto
--------------------------------------------------------------------------------
/protos/README.md:
--------------------------------------------------------------------------------
1 | # Protocol Buffers Definitions
2 |
3 | For IPC between grit, apex, workers, and the webserver (and CLI).
4 |
5 | The folder structure of the proto definitions MUST match the desired output location and module name. Because protoc generates only absolute imports, which do not work unles they match the aos module structure.
6 |
7 | See: https://github.com/protocolbuffers/protobuf/issues/1491
8 |
9 | ## Reference
10 |
11 | - https://github.com/codethecoffee/proto-cheatsheet
12 | - https://protobuf.dev/programming-guides/proto3/
--------------------------------------------------------------------------------
/protos/aos/runtime/apex/apex_api.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | // APIs for CLI and Web
4 | service ApexApi {
5 |
6 | rpc GetApexStatus(GetApexStatusRequest) returns (GetApexStatusResponse) {}
7 |
8 | rpc GetRunningAgents(GetRunningAgentsRequest) returns (GetRunningAgentsResponse) {}
9 | rpc GetRunningAgent(GetRunningAgentRequest) returns (GetRunningAgentResponse) {}
10 | rpc StartAgent(StartAgentRequest) returns (StartAgentResponse) {}
11 | rpc StopAgent(StopAgentRequest) returns (StopAgentResponse) {}
12 |
13 | //TODO:
14 | // GetApexStatusStream (one way stream)
15 | }
16 |
17 | //=========================================================
18 | // Agent Management
19 | //=========================================================
20 | message StartAgentRequest {
21 | bytes agent_id = 1;
22 | }
23 |
24 | message StartAgentResponse {
25 | }
26 |
27 | message StopAgentRequest {
28 | bytes agent_id = 1;
29 | }
30 |
31 | message StopAgentResponse {
32 | }
33 |
34 | message GetRunningAgentsRequest {}
35 |
36 | message GetRunningAgentsResponse {
37 | repeated AgentInfo agents = 10;
38 | }
39 |
40 | message GetRunningAgentRequest {
41 | bytes agent_id = 1;
42 | }
43 |
44 | message GetRunningAgentResponse {
45 | optional AgentInfo agent = 10;
46 | }
47 |
48 | message AgentInfo{
49 | bytes agent_id = 1;
50 | uint64 point = 2;
51 | string worker_id = 3;
52 | string worker_address = 4;
53 | map capabilities = 10;
54 | }
55 |
56 | message GetApexStatusRequest {}
57 |
58 | message GetApexStatusResponse {
59 | enum ApexStatus {
60 | UNKNOWN = 0;
61 | STARTING = 1;
62 | RUNNING = 2;
63 | STOPPING = 3;
64 | ERROR = 10;
65 | }
66 | ApexStatus status = 1;
67 | string store_address = 3;
68 | repeated WorkerInfo workers = 10;
69 | }
70 |
71 | message WorkerInfo{
72 | string worker_id = 1;
73 | string worker_address = 2;
74 | repeated bytes current_agents = 3;
75 | map capabilities = 10;
76 |
77 | }
78 |
79 |
--------------------------------------------------------------------------------
/protos/aos/runtime/apex/apex_workers.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | //import "google/protobuf/empty.proto";
3 | //import "aos/runtime/apex/test.proto";
4 |
5 | // Internal service for worker nodes to communicate with the Apex node
6 | service ApexWorkers {
7 | // Registers a worker to get ready for a streaming connection.
8 | rpc RegisterWorker(WorkerRegistrationRequest) returns (WorkerRegistrationResponse) {}
9 | // Connects a long running worker to the apex node via two-way streaming. Worker needs to register first.
10 | rpc ConnectWorker(stream WorkerToApexMessage) returns (stream ApexToWorkerMessage) {}
11 | }
12 |
13 | // TODO: the worker stream can be simplified into a single one-way apex stream on agent/worker updates
14 | // workers can subscribe to it, but they look at the store to find actors to work on (using some sort of distributed lock to coordinate between workers)
15 |
16 |
17 | message WorkerRegistrationRequest {
18 | string worker_id = 1;
19 | string worker_address = 2; //how to connect to the worker
20 | }
21 |
22 | message WorkerRegistrationResponse {
23 | string ticket = 1; //used to connect to the stream
24 | }
25 |
26 | message WorkerManifest{
27 | string worker_id = 1;
28 | map capabilities = 10; //what capabilities can it satisfy
29 | repeated Agent current_agents = 20; //not allowed to have any workers in it on READY (aka connect)
30 | repeated bytes desired_agents = 21; //the agents that the worker wants to have (maybe because it had them previosuly, local caches, etc)
31 | //capacity, load, etc so that apex can decide where to send agents
32 | }
33 |
34 | message Agent{
35 | bytes agent_id = 1; //32 bytes, of actor_id type
36 | uint64 point = 2;
37 | map capabilities = 10; //what capabilities are requested by the agent
38 | //map workers = 20; //worker_id, worker_address - > for later, what workers are assigned to this agent, so that workers can coordinate
39 | }
40 |
41 | message AgentAssignment{
42 | bytes agent_id = 1; //required
43 | optional Agent agent = 2; //only needed when giving to worker
44 | //todo: reason for assignment, maybe also what actors
45 | }
46 |
47 |
48 | message ApexToWorkerMessage {
49 | enum MessageType {
50 | PING = 0;
51 | GIVE_AGENT = 10; // give to worker
52 | YANK_AGENT = 11; // take from worker
53 | }
54 |
55 | //always required
56 | MessageType type = 1;
57 |
58 | oneof payload {
59 | AgentAssignment assignment = 10; // GIVE_AGENT, YANK_AGENT
60 | }
61 | }
62 |
63 | message WorkerToApexMessage {
64 | enum MessageType {
65 | PING = 0;
66 | READY = 1; // start receiving messages from apex
67 | //MANIFEST_UPDATE = 2; // send a manifest update to apex (esp, when capacity changes)
68 | RETURN_AGENT = 11; // return agent to apex
69 | }
70 |
71 | //always required
72 | MessageType type = 1;
73 | string worker_id = 2;
74 | string ticket = 3;
75 |
76 | oneof payload {
77 | WorkerManifest manifest = 10; //READY event
78 | AgentAssignment assignment = 11; //RETURN_AGENT
79 | }
80 | }
--------------------------------------------------------------------------------
/protos/aos/runtime/store/agent_store.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | import "google/protobuf/empty.proto";
3 |
4 | // Manage Agents
5 | service AgentStore {
6 | rpc CreateAgent(CreateAgentRequest) returns (CreateAgentResponse) {}
7 | rpc DeleteAgent(DeleteAgentRequest) returns (DeleteAgentResponse) {}
8 | rpc GetAgent(GetAgentRequest) returns (GetAgentResponse) {}
9 | rpc GetAgents(GetAgentsRequest) returns (GetAgentsResponse) {}
10 |
11 | rpc SetVar(SetVarRequest) returns (google.protobuf.Empty) {}
12 | rpc GetVar(GetVarRequest) returns (GetVarResponse) {}
13 | rpc GetVars(GetVarsRequest) returns (GetVarsResponse) {}
14 | rpc DeleteVar(DeleteVarRequest) returns (google.protobuf.Empty) {}
15 | }
16 |
17 | //=========================================================
18 | // Agent CRUD messages
19 | //=========================================================
20 |
21 | message CreateAgentRequest {
22 | optional uint64 point = 2; //if not provided, will be generated internally
23 | }
24 |
25 | message CreateAgentResponse {
26 | bytes agent_id = 1;
27 | uint64 point = 2;
28 | }
29 |
30 | message DeleteAgentRequest {
31 | oneof delete_by {
32 | bytes agent_id = 1;
33 | uint64 point = 2;
34 | }
35 | }
36 | message DeleteAgentResponse {}
37 |
38 | message GetAgentRequest {
39 | oneof get_by {
40 | bytes agent_id = 1;
41 | uint64 point = 2;
42 | }
43 | }
44 |
45 | message GetAgentResponse {
46 | optional bytes agent_id = 1;
47 | optional uint64 point = 2;
48 | bool exists = 3;
49 | }
50 |
51 | message GetAgentsRequest {
52 | //if set, filters the agents to the ones that have the specified key-value pairs
53 | map var_filters = 1;
54 | }
55 |
56 | message GetAgentsResponse {
57 | map agents = 1;
58 | }
59 |
60 | //=========================================================
61 | // Var(iables) CRUD Messages
62 | //=========================================================
63 |
64 | message SetVarRequest {
65 | bytes agent_id = 1;
66 | string key = 2;
67 | string value = 3;
68 | }
69 |
70 | message GetVarRequest {
71 | bytes agent_id = 1;
72 | string key = 2;
73 | }
74 |
75 | message GetVarResponse{
76 | bytes agent_id = 1;
77 | string key = 2;
78 | optional string value = 3;
79 | }
80 |
81 | message GetVarsRequest {
82 | bytes agent_id = 1;
83 | optional string key_prefix = 2;
84 | }
85 |
86 | message GetVarsResponse{
87 | bytes agent_id = 1;
88 | map vars = 2;
89 | }
90 |
91 | message DeleteVarRequest {
92 | bytes agent_id = 1;
93 | string key = 2;
94 | }
95 |
--------------------------------------------------------------------------------
/protos/aos/runtime/store/grit_store.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | import "google/protobuf/empty.proto";
3 |
4 | // Service methods for Grit object and reference store
5 | service GritStore {
6 |
7 | rpc Store(StoreRequest) returns (google.protobuf.Empty) {}
8 | rpc Load(LoadRequest) returns (LoadResponse) {}
9 |
10 | rpc SetRef(SetRefRequest) returns (google.protobuf.Empty) {}
11 | rpc GetRef(GetRefRequest) returns (GetRefResponse) {}
12 | rpc GetRefs(GetRefsRequest) returns (GetRefsResponse) {}
13 |
14 | }
15 |
16 | enum ObjectType {
17 | BLOB = 0;
18 | TREE = 1;
19 | MESSAGE = 4;
20 | MAILBOX = 5;
21 | STEP = 10;
22 | }
23 |
24 | message StoreRequest {
25 | bytes agent_id = 1;
26 | optional bytes object_id = 3;
27 | bytes data = 10;
28 | }
29 | //store response is empty (see above)
30 |
31 | message LoadRequest {
32 | bytes agent_id = 1;
33 | bytes object_id = 3;
34 | }
35 |
36 | message LoadResponse {
37 | bytes agent_id = 1;
38 | bytes object_id = 3;
39 | optional bytes data = 10;
40 | }
41 |
42 | message SetRefRequest {
43 | bytes agent_id = 1;
44 | string ref = 2;
45 | bytes object_id = 3;
46 | }
47 | //set ref response is empty (see above)
48 | //maybe in the future, if there are race conditions on setting a ref,
49 | // we could return the final object id that was set
50 |
51 | message GetRefRequest {
52 | bytes agent_id = 1;
53 | string ref = 2;
54 | }
55 |
56 | message GetRefResponse {
57 | bytes agent_id = 1;
58 | string ref = 2;
59 | optional bytes object_id = 3;
60 | }
61 |
62 | message GetRefsRequest {
63 | bytes agent_id = 1;
64 | optional string ref_prefix = 2;
65 | }
66 |
67 | message GetRefsResponse {
68 | bytes agent_id = 1;
69 | map refs = 2;
70 | }
71 |
72 |
73 |
--------------------------------------------------------------------------------
/protos/aos/runtime/worker/worker_api.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | // APIs for CLI and Web
4 | service WorkerApi {
5 |
6 | // Sends a message to the recipient actor, the sender is the root actor, which represents the agent.
7 | rpc InjectMessage(InjectMessageRequest) returns (InjectMessageResponse) {}
8 | rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) {}
9 | rpc SubscribeToAgent(SubscriptionRequest) returns (stream SubscriptionMessage) {}
10 | }
11 |
12 |
13 | //=========================================================
14 | // Agent or Actor Interactions
15 | //=========================================================
16 | message InjectMessageRequest {
17 | bytes agent_id = 1;
18 | bytes recipient_id = 2;
19 |
20 | oneof message {
21 | //externally crafted message, with a valid message structure (advanced use case)
22 | bytes message_id = 5;
23 | //message_id will be created by runtime by creating the required Grit objects
24 | MessageData message_data = 6;
25 | }
26 | }
27 |
28 | message MessageData{
29 | map headers = 3;
30 | bool is_signal = 4;
31 | bytes previous_id = 5;
32 |
33 | oneof content {
34 | //blob_id or tree_id (see Grit Message)
35 | bytes content_id = 10;
36 | //valid serialized Grit blob object
37 | bytes content_blob = 11;
38 | }
39 | }
40 |
41 | message InjectMessageResponse {
42 | bytes agent_id = 1;
43 | bytes message_id = 2;
44 | }
45 |
46 | message RunQueryRequest {
47 | bytes agent_id = 1;
48 | bytes actor_id = 2;
49 | string query_name = 4;
50 | optional bytes context = 5;
51 | }
52 |
53 | message RunQueryResponse{
54 | bytes agent_id = 1; //32 bytes, of actor_id type
55 | bytes actor_id = 2; //32 bytes, actor_id
56 | optional bytes result = 10; // can be a tree_id or a serizlized Grit Blob
57 | }
58 |
59 | message SubscriptionRequest{
60 | bytes agent_id = 1;
61 | //TODO: add filters
62 | }
63 |
64 | message SubscriptionMessage{
65 | bytes agent_id = 1;
66 | bytes sender_id = 2;
67 | bytes message_id = 3;
68 | MessageData message_data = 4;
69 | }
70 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "agent-os-py"
3 | version = "0.1.1"
4 | description = "A library and runtime to build autonomous AI agents."
5 | authors = []
6 | license = "MIT"
7 | readme = "README.md"
8 | packages = [
9 | {include = "aos"},
10 | ]
11 | repository = "https://github.com/lukebuehler/agent-os"
12 |
13 | [tool.poetry.dependencies]
14 | python = "^3.10"
15 | python-dotenv = "^1.0.0"
16 | pydantic = "^2.0.1"
17 | aiofiles = "^23.1.0"
18 | async-lru = "^2.0.2"
19 | pytest-asyncio = "^0.21.0"
20 | tomlkit = "^0.11.8"
21 | lmdb = "^1.4.1"
22 | watchfiles = "^0.19.0"
23 | starlette = "^0.37.2"
24 | sse-starlette = "^1.6.1"
25 | uvicorn = "^0.22.0"
26 | click = "^8.1.5"
27 | filetype = "^1.2.0"
28 | grpcio = "^1.62.1"
29 | py-multibase = "^1.0.3"
30 | py-multicodec = "^0.2.1"
31 | base58 = "^2.1.1"
32 | cryptography = "^42.0.5"
33 | grpcio-tools = "^1.62.2"
34 |
35 | [tool.poetry.group.test.dependencies]
36 | pytest = "^8.1.1"
37 | httpx = "^0.27.0"
38 | httpx-sse = "^0.4.0"
39 |
40 |
41 | [tool.poetry.group.dev.dependencies]
42 | ruff = "^0.3.7"
43 |
44 |
45 | [tool.poetry.group.agents]
46 | # to install run poetry install --with agents
47 | optional = true
48 |
49 | [tool.poetry.group.agents.dependencies]
50 | jinja2 = "^3.1.3"
51 | openai = "^1.17.0"
52 | pandas = "^2.2.2"
53 | requests = "^2.31.0"
54 | pillow = "^10.3.0"
55 | scikit-learn = "^1.4.2"
56 | transitions = "^0.9.0"
57 | beautifulsoup4 = "^4.12.3"
58 | mistune = "^3.0.2"
59 | yfinance = "^0.2.37"
60 | matplotlib = "^3.8.4"
61 | openpyxl = "^3.1.2"
62 | jsonschema = "^4.21.1"
63 |
64 | [build-system]
65 | requires = ["poetry-core"]
66 | build-backend = "poetry.core.masonry.api"
67 |
68 | [tool.pytest.ini_options]
69 | asyncio_mode = "auto"
70 |
71 | [tool.poetry.scripts]
72 | aos = "aos.cli.cli:cli"
73 | gritserv = "aos.cluster.grit.grit_server:serve"
74 | perf = "tests.perf.perf:main"
75 | gen = "examples.coder.generator.gen_workbench:main"
76 |
77 | [tool.ruff]
78 | select = ["E", "F", "B"]
79 | ignore = ["F403", "F405", "E501"]
80 | line-length = 150
81 |
82 | [tool.ruff.per-file-ignores]
83 | "__init__.py" = ["F401", "E402"]
84 |
--------------------------------------------------------------------------------
/tests/cli/helpers_sync.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from aos.grit.stores.memory.memory_object_store import MemoryObjectStore
4 | from aos.grit import *
5 | from aos.wit import *
6 | from aos.cli import *
7 |
8 | def get_random_actor_id() -> ActorId:
9 | return get_object_id(os.urandom(20))
10 |
11 | def create_file(path, file_name, content: str|bytes|dict):
12 | #print("path type", path)
13 | if os.name == "nt" and "/" in str(path):
14 | path = path.replace("/", os.sep)
15 | os.makedirs(path, exist_ok=True)
16 | if(isinstance(content, dict)):
17 | content = json.dumps(content)
18 | if(isinstance(content, str)):
19 | content = content.encode('utf-8')
20 | with open(os.path.join(path, file_name), "wb") as f:
21 | f.write(content)
22 | return os.path.join(path, file_name)
--------------------------------------------------------------------------------
/tests/cli/test_sync_item_push_paths.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | from aos.grit import *
4 | from aos.wit import *
5 | import aos.cli.sync_item as sync_item
6 | import helpers_sync as helpers
7 |
8 | async def test_sync_from_push_path__with_files(tmp_path):
9 | #files and dirs are sorted alphabetically
10 | tmp_path = os.path.relpath(tmp_path)
11 | helpers.create_file(tmp_path, "file1.txt", "content1")
12 | helpers.create_file(tmp_path, "file2.html", "Congrats!")
13 | helpers.create_file(tmp_path, "file3.json", {"name": "John", "age": 30, "city": "New York"})
14 | helpers.create_file(f"{tmp_path}/code", "wit1.py", "python contents")
15 | helpers.create_file(f"{tmp_path}/code", "wit2.py", "python contents")
16 |
17 | pushpath = f"{tmp_path}:/"
18 | sync_items = sync_item.sync_from_push_path(pushpath)
19 | assert len(sync_items) == 5
20 | assert sync_items[0].dir_path == str(tmp_path)
21 | assert sync_items[0].core_path == "/"
22 | assert sync_items[0].file_name == "file1.txt"
23 | assert sync_items[0].item_name == "file1.txt"
24 |
25 | assert sync_items[3].dir_path == os.path.join(tmp_path, "code")
26 | assert sync_items[3].core_path == "/code"
27 | assert sync_items[3].file_name == "wit1.py"
28 | assert sync_items[3].item_name == "wit1.py"
29 |
30 | async def test_sync_from_push_path__empty(tmp_path):
31 | tmp_path = os.path.relpath(tmp_path)
32 | pushpath = f"{tmp_path}:/"
33 | sync_items = sync_item.sync_from_push_path(pushpath)
34 | assert len(sync_items) == 0
35 |
36 | pushpath = f"{tmp_path}:"
37 | sync_items = sync_item.sync_from_push_path(pushpath)
38 | assert len(sync_items) == 0
39 |
40 | pushpath = f"{tmp_path}"
41 | sync_items = sync_item.sync_from_push_path(pushpath)
42 | assert len(sync_items) == 0
43 |
44 | async def test_sync_from_push_path__not_existing(tmp_path):
45 | tmp_path = os.path.relpath(tmp_path)
46 | with pytest.raises(ValueError):
47 | pushpath = f"{tmp_path}/notexist:/"
48 | sync_items = sync_item.sync_from_push_path(pushpath)
49 |
50 | async def test_sync_from_push_path__invalid_core_path(tmp_path):
51 | tmp_path = os.path.relpath(tmp_path)
52 | with pytest.raises(ValueError):
53 | pushpath = f"{tmp_path}/notexist:blah"
54 | sync_items = sync_item.sync_from_push_path(pushpath)
55 |
56 | async def test_sync_from_push_path__with_ignore(tmp_path):
57 | tmp_path = os.path.relpath(tmp_path)
58 | helpers.create_file(tmp_path, "file1.txt", "content1")
59 | helpers.create_file(os.path.join(tmp_path, "__pycache__"), "cache", "content1")
60 | helpers.create_file(os.path.join(tmp_path, ".grit"), "cache", "content1")
61 |
62 | pushpath = f"{tmp_path}:/"
63 | sync_items = sync_item.sync_from_push_path(pushpath, ignore=["/__pycache__", ".grit"])
64 | assert len(sync_items) == 1
65 | assert sync_items[0].dir_path == str(tmp_path)
66 |
67 |
68 |
69 |
--------------------------------------------------------------------------------
/tests/cli/test_sync_item_push_values.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from aos.grit import *
3 | from aos.wit import *
4 | import aos.cli.sync_item as sync_item
5 | import helpers_sync as helpers
6 |
7 | async def test_sync_sync_from_push_value__empty_or_invalid_fails(tmp_path):
8 | with pytest.raises(ValueError):
9 | sync_items = sync_item.sync_from_push_value("", "")
10 | with pytest.raises(ValueError):
11 | sync_items = sync_item.sync_from_push_value("/", "ss")
12 | with pytest.raises(ValueError):
13 | sync_items = sync_item.sync_from_push_value("/asa/", "ss")
14 | with pytest.raises(ValueError):
15 | sync_items = sync_item.sync_from_push_value(None, "")
16 |
17 | async def test_sync_sync_from_push_value__simple_string_value(tmp_path):
18 | item = sync_item.sync_from_push_value("/test", "test_value")
19 | assert item.dir_path == None
20 | assert item.file_name == None
21 | assert item.core_path == "/"
22 | assert item.item_name == "test"
23 | assert item.item_value == "test_value"
24 |
25 |
--------------------------------------------------------------------------------
/tests/grit/stores/test_file_object_store.py:
--------------------------------------------------------------------------------
1 | import os
2 | from aos.grit import *
3 | from aos.grit.stores.file import FileObjectStore
4 |
5 | def get_random_object_id() -> ObjectId:
6 | return get_object_id(os.urandom(20))
7 |
8 | async def test_read_write(tmp_path):
9 |
10 | objectstore = FileObjectStore(str(tmp_path))
11 |
12 | #save
13 | blob = Blob({'hi': 'there', 'foo': 'bar'}, os.urandom(1024))
14 | blob_id = await objectstore.store(blob)
15 |
16 | tree = {
17 | 'a': get_random_object_id(),
18 | 'b': get_random_object_id(),
19 | 'c': get_random_object_id()}
20 | tree_id = await objectstore.store(tree)
21 |
22 | message_log = Message(
23 | get_random_object_id(),
24 | None,
25 | get_random_object_id())
26 | message_log_id = await objectstore.store(message_log)
27 |
28 | mailbox = {
29 | get_random_object_id(): get_random_object_id(),
30 | get_random_object_id(): get_random_object_id(),
31 | get_random_object_id(): get_random_object_id()}
32 | mailbox_id = await objectstore.store(mailbox)
33 |
34 | step = Step(
35 | get_random_object_id(),
36 | get_random_object_id(),
37 | get_random_object_id(),
38 | get_random_object_id(),
39 | get_random_object_id())
40 | step_id = await objectstore.store(step)
41 |
42 | #load
43 | blob_2 = await objectstore.load(blob_id)
44 | assert blob == blob_2
45 |
46 | tree_2 = await objectstore.load(tree_id)
47 | assert tree == tree_2
48 |
49 | message_log_2 = await objectstore.load(message_log_id)
50 | assert message_log == message_log_2
51 |
52 | mailbox_2 = await objectstore.load(mailbox_id)
53 | assert mailbox == mailbox_2
54 |
55 | step_2 = await objectstore.load(step_id)
56 | assert step == step_2
--------------------------------------------------------------------------------
/tests/grit/stores/test_file_references.py:
--------------------------------------------------------------------------------
1 | import os
2 | from aos.grit import *
3 | from aos.grit.stores.file import FileReferences
4 |
5 | def get_random_object_id() -> ObjectId:
6 | return get_object_id(os.urandom(20))
7 |
8 | async def test_read_write(tmp_path):
9 | references = FileReferences(str(tmp_path))
10 | #save
11 | tree_id = get_random_object_id()
12 | await references.set('tree', tree_id)
13 |
14 | message_log_id = get_random_object_id()
15 | await references.set('message_log', message_log_id)
16 |
17 | mailbox_id = get_random_object_id()
18 | await references.set('mailbox', mailbox_id)
19 |
20 | step_id = get_random_object_id()
21 | await references.set('step', step_id)
22 |
23 | #load
24 | tree_id_2 = await references.get('tree')
25 | assert tree_id == tree_id_2
26 |
27 | message_log_id_2 = await references.get('message_log')
28 | assert message_log_id == message_log_id_2
29 |
30 | mailbox_id_2 = await references.get('mailbox')
31 | assert mailbox_id == mailbox_id_2
32 |
33 | step_id_2 = await references.get('step')
34 | assert step_id == step_id_2
35 |
36 | assert len(await references.get_all()) == 4
37 |
38 | async def test_read_after_close(tmp_path):
39 | references = FileReferences(tmp_path)
40 | tree_id = get_random_object_id()
41 | other_id = get_random_object_id()
42 | await references.set('tree', tree_id)
43 | await references.set('a/b/c', other_id)
44 | del references
45 |
46 | references = FileReferences(tmp_path)
47 | tree_id_2 = await references.get('tree')
48 | other_id_2 = await references.get('a/b/c')
49 | assert tree_id == tree_id_2
50 | assert other_id == other_id_2
51 | assert len(await references.get_all()) == 2
--------------------------------------------------------------------------------
/tests/grit/stores/test_lmdb_object_store.py:
--------------------------------------------------------------------------------
1 | import os
2 | from aos.grit import *
3 | from aos.grit.stores.lmdb import SharedEnvironment, LmdbObjectStore
4 |
5 | def get_random_object_id() -> ObjectId:
6 | return get_object_id(os.urandom(20))
7 |
8 | async def test_read_write(tmp_path):
9 |
10 | shared_env = SharedEnvironment(str(tmp_path))
11 | objectstore = LmdbObjectStore(shared_env)
12 |
13 | #save
14 | blob = Blob({'hi': 'there', 'foo': 'bar'}, os.urandom(1024))
15 | blob_id = await objectstore.store(blob)
16 |
17 | tree = {
18 | 'a': get_random_object_id(),
19 | 'b': get_random_object_id(),
20 | 'c': get_random_object_id()}
21 | tree_id = await objectstore.store(tree)
22 |
23 | message_log = Message(
24 | get_random_object_id(),
25 | None,
26 | get_random_object_id())
27 | message_log_id = await objectstore.store(message_log)
28 |
29 | mailbox = {
30 | get_random_object_id(): get_random_object_id(),
31 | get_random_object_id(): get_random_object_id(),
32 | get_random_object_id(): get_random_object_id()}
33 | mailbox_id = await objectstore.store(mailbox)
34 |
35 | step = Step(
36 | get_random_object_id(),
37 | get_random_object_id(),
38 | get_random_object_id(),
39 | get_random_object_id(),
40 | get_random_object_id())
41 | step_id = await objectstore.store(step)
42 |
43 | #load
44 | blob_2 = await objectstore.load(blob_id)
45 | assert blob == blob_2
46 |
47 | tree_2 = await objectstore.load(tree_id)
48 | assert tree == tree_2
49 |
50 | message_log_2 = await objectstore.load(message_log_id)
51 | assert message_log == message_log_2
52 |
53 | mailbox_2 = await objectstore.load(mailbox_id)
54 | assert mailbox == mailbox_2
55 |
56 | step_2 = await objectstore.load(step_id)
57 | assert step == step_2
--------------------------------------------------------------------------------
/tests/grit/stores/test_lmdb_references.py:
--------------------------------------------------------------------------------
1 | import os
2 | from aos.grit import *
3 | from aos.grit.stores.lmdb import SharedEnvironment, LmdbReferences
4 |
5 | def get_random_object_id() -> ObjectId:
6 | return get_object_id(os.urandom(20))
7 |
8 | async def test_read_write(tmp_path):
9 | shared_env = SharedEnvironment(str(tmp_path))
10 | references = LmdbReferences(shared_env)
11 | #save
12 | tree_id = get_random_object_id()
13 | await references.set('tree', tree_id)
14 |
15 | message_log_id = get_random_object_id()
16 | await references.set('message_log', message_log_id)
17 |
18 | mailbox_id = get_random_object_id()
19 | await references.set('mailbox', mailbox_id)
20 |
21 | step_id = get_random_object_id()
22 | await references.set('step', step_id)
23 |
24 | #load
25 | tree_id_2 = await references.get('tree')
26 | assert tree_id == tree_id_2
27 |
28 | message_log_id_2 = await references.get('message_log')
29 | assert message_log_id == message_log_id_2
30 |
31 | mailbox_id_2 = await references.get('mailbox')
32 | assert mailbox_id == mailbox_id_2
33 |
34 | step_id_2 = await references.get('step')
35 | assert step_id == step_id_2
36 |
37 | all_ids = await references.get_all()
38 | assert len(all_ids) == 4
39 |
40 | async def test_read_after_close(tmp_path):
41 | shared_env = SharedEnvironment(str(tmp_path))
42 | references = LmdbReferences(shared_env)
43 |
44 | tree_id = get_random_object_id()
45 | await references.set('tree', tree_id)
46 | shared_env.get_env().close()
47 | del shared_env
48 | del references
49 |
50 | shared_env = SharedEnvironment(str(tmp_path))
51 | references = LmdbReferences(shared_env)
52 | tree_id_2 = await references.get('tree')
53 | assert tree_id == tree_id_2
--------------------------------------------------------------------------------
/tests/perf/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smartcomputer-ai/agent-os/74397b5b66dc12e78317e7d673ad92ea9251e975/tests/perf/__init__.py
--------------------------------------------------------------------------------
/tests/perf/perf.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | import asyncio
5 | import tempfile
6 | from grit.stores.lmdb import SharedEnvironment, LmdbReferences, LmdbObjectStore
7 | from . perf_grid import perf_grid_run
8 |
9 | async def amain():
10 | # store = MemoryObjectStore()
11 | # refs = MemoryReferences()
12 | with tempfile.TemporaryDirectory() as temp_dir:
13 | print(f"Temp dir is {temp_dir}")
14 | # store = FileObjectStore(temp_dir)
15 | # refs = FileReferences(temp_dir)
16 | shared_env = SharedEnvironment(temp_dir, writemap=True)
17 | store = LmdbObjectStore(shared_env)
18 | refs = LmdbReferences(shared_env)
19 | # store = MemoryObjectStore()
20 | # refs = MemoryReferences()
21 | await perf_grid_run(store, refs)
22 | file_path = temp_dir
23 | files = os.listdir(file_path)
24 | file_bytes = sum(os.path.getsize(os.path.join(file_path,f)) for f in files) / 1024 / 1024
25 | print(f"Temp dir {file_path} has {len(files)} files, and is {file_bytes:0.2f} MB")
26 |
27 | def main():
28 | t = time.perf_counter()
29 | asyncio.run(amain())
30 | t2 = time.perf_counter()
31 |
32 | print(f'Total time elapsed: {t2-t:0.2f} seconds')
33 | sys.exit(0)
34 |
35 | if __name__ == "__main__":
36 | main()
--------------------------------------------------------------------------------
/tests/runtime/core/helpers_runtime.py:
--------------------------------------------------------------------------------
1 | import os
2 | from aos.grit import *
3 | from aos.wit import *
4 |
5 | def get_random_actor_id() -> ActorId:
6 | return get_object_id(os.urandom(20))
7 |
8 | async def create_genesis_message(store:ObjectStore, sender_id:ActorId, wit_name:str) -> MailboxUpdate:
9 | '''Creates a genesis message and returns a MailboxUpdate'''
10 | #genesis_ref here means that the wit should be called for the genesis message instead of the default genesis handler in the actor_executor
11 | # this is usually the assumption of most tests (that they handle the genesis message themselves)
12 | gen_core:TreeObject = Core.from_external_wit_ref(wit_name, genesis_ref=wit_name)
13 | gen_core.maket('data').makeb('args').set_as_json({'hello': 'world'})
14 | gen_message = await OutboxMessage.from_genesis(store, gen_core)
15 | gen_message_id = await gen_message.persist(store)
16 | return (sender_id, gen_message.recipient_id, gen_message_id)
17 |
18 | async def create_new_message(store:ObjectStore, sender_id:ActorId, recipient_id:ActorId, previous_message_id:MessageId|None, content:str|BlobObject|TreeObject) -> MailboxUpdate:
19 | '''Creates a new message and returns a MailboxUpdate'''
20 | if(isinstance(content, str)):
21 | content = BlobObject.from_str(content)
22 | content_id = await content.persist(store)
23 | message = Message(previous_message_id, None, content_id)
24 | message_id = await store.store(message)
25 | return (sender_id, recipient_id, message_id)
26 |
27 | async def create_actor(store:ObjectStore, refs:References, sender_id:ActorId, wit_name:str):
28 | sender_id, new_actor_id, gen_message_id = await create_genesis_message(store, sender_id, wit_name)
29 | gen_mailbox = {sender_id: gen_message_id}
30 | gen_inbox_id = await store.store(gen_mailbox)
31 | first_step = Step(None, new_actor_id, gen_inbox_id, None, new_actor_id) #core_id is the same as actor_id
32 | first_step_id = await store.store(first_step)
33 | await refs.set(ref_step_head(new_actor_id), first_step_id)
34 | return first_step_id
35 |
--------------------------------------------------------------------------------
/tests/runtime/core/test_resolvers_core.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import importlib
4 | from aos.wit import *
5 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
6 | from aos.wit.data_model import *
7 | from aos.runtime.core import *
8 | import helpers_runtime as helpers
9 |
10 | helper_py = """
11 | def help():
12 | return "helper"
13 | """
14 |
15 | main_py_absolute = """
16 | import helper
17 | def main():
18 | print(helper.help())
19 | return helper.help()
20 | """
21 |
22 | async def test_resolve_from_core__with_absolute_module_import():
23 | store = MemoryObjectStore()
24 | core = Core(store, {}, None)
25 | code = core.maket("code")
26 | code.makeb("main.py").set_as_str(main_py_absolute)
27 | code.makeb("helper.py").set_as_str(helper_py)
28 | core.makeb("wit").set_as_str("/code:main:main")
29 | core_id = await core.persist()
30 |
31 | resolver = CoreResolver(store)
32 | func = await resolver.resolve(core_id, 'wit', True)
33 | assert func is not None
34 | assert func.__name__ == 'main'
35 | assert func() == 'helper'
36 |
37 |
--------------------------------------------------------------------------------
/tests/runtime/core/test_runtime.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
4 | from aos.grit import *
5 | from aos.wit import *
6 | from aos.runtime.core import *
7 | import helpers_runtime as helpers
8 |
9 | async def test_run_empty():
10 | store = MemoryObjectStore()
11 | refs = MemoryReferences()
12 | runtime = Runtime(store, refs, point=1000)
13 | running_task = asyncio.create_task(runtime.start())
14 | await asyncio.sleep(0.1)
15 | runtime.stop()
16 | await running_task
17 |
18 | #there should be the agent actor (representing the runtime)
19 | agent_id = await refs.get(ref_root_actor())
20 | assert agent_id is not None
21 | agent_core = await store.load(agent_id)
22 | assert agent_core is not None
23 | assert "point" in agent_core
24 | assert bytes_to_point((await BlobObject.from_blob_id(store, agent_core['point'])).get_as_bytes()) == 1000
25 | # a step was created for he agent actor
26 | head = await refs.get(ref_step_head(agent_id))
27 | assert head is not None
28 |
29 |
30 |
--------------------------------------------------------------------------------
/tests/runtime/core/test_runtime_msgs_fanout.py:
--------------------------------------------------------------------------------
1 | import time
2 | from aos.wit import *
3 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
4 | from aos.wit.data_model import *
5 | from aos.runtime.core import *
6 | import helpers_runtime as helpers
7 |
8 | # A broader end-to-end test with fanout pattern:
9 | # 1) one actor gets created (wit_a)
10 | # 2) and then wit_a creates ~100 more actors (wit_b), and they all send messages back to the first wit
11 |
12 | async def test_msgs__fanout():
13 | actors = set()
14 | messages_from_senders = {}
15 | roundtrip_times = set()
16 |
17 | wit_a = Wit()
18 | @wit_a.run_wit
19 | async def wit_a_func(inbox:Inbox, outbox:Outbox, core:Core, **kwargs) -> None:
20 | #print('wit_a')
21 | object_store:ObjectStore = kwargs['object_store']
22 | actor_id:ActorId = kwargs['actor_id']
23 | nonlocal actors
24 | actors.add(actor_id)
25 |
26 | inbox_messages = await inbox.read_new()
27 | for message in inbox_messages:
28 | #if genesis message
29 | if(message.content_id == actor_id):
30 | for i in range(100):
31 | actor_core_b_n = Core.from_external_wit_ref('wit_b', genesis_ref='wit_b')
32 | actor_core_b_n.maket('data').makeb('args').set_as_json({'number': i})
33 | outbox.add(await OutboxMessage.from_genesis(object_store, actor_core_b_n))
34 | else:
35 | nonlocal messages_from_senders
36 | messages_from_senders.setdefault(message.sender_id, 0)
37 | messages_from_senders[message.sender_id] += 1
38 | nonlocal roundtrip_times
39 | roundtrip_times.add(time.time())
40 |
41 | wit_b = Wit()
42 | @wit_b.run_wit
43 | async def wit_b_func(inbox:Inbox, outbox:Outbox, core:Core, **kwargs) -> None:
44 | #print('wit_b')
45 | actor_id:ActorId = kwargs['actor_id']
46 | nonlocal actors
47 | actors.add(actor_id)
48 |
49 | inbox_messages = await inbox.read_new()
50 | for message in inbox_messages:
51 | #if genesis message
52 | if(message.content_id == actor_id):
53 | #await asyncio.sleep(0.01)
54 | nonlocal messages_from_senders
55 | messages_from_senders.setdefault(message.sender_id, 0)
56 | messages_from_senders[message.sender_id] += 1
57 | #send a message back
58 | outbox.add(OutboxMessage.from_reply(message, "hello from wit_b"))
59 |
60 | store = MemoryObjectStore()
61 | refs = MemoryReferences()
62 | resolver = ExternalResolver(store)
63 | resolver.register('wit_a', wit_a)
64 | resolver.register('wit_b', wit_b)
65 | runtime = Runtime(store, refs, resolver=resolver)
66 |
67 | running_task = asyncio.create_task(runtime.start())
68 | await asyncio.sleep(0.05)
69 | start_time = time.time()
70 | wit_a_gen_msg = await helpers.create_genesis_message(store, runtime.agent_id, 'wit_a')
71 | await runtime.inject_mailbox_update(wit_a_gen_msg)
72 | await asyncio.sleep(0.2)
73 | runtime.stop()
74 | await asyncio.wait_for(running_task, timeout=1)
75 |
76 | #gett the max time in roundtrip_times
77 | max_time = max(roundtrip_times)
78 | print(f'max roundtrip time: {max_time - start_time}')
79 | min_time = min(roundtrip_times)
80 | print(f'min roundtrip time: {min_time - start_time}')
81 |
82 | # print(actors)
83 | # print(messages_from_senders)
84 | assert len(actors) == 101
85 | assert len(messages_from_senders) == 101
86 |
87 |
88 |
89 |
90 |
91 |
--------------------------------------------------------------------------------
/tests/runtime/core/test_runtime_msgs_pending.py:
--------------------------------------------------------------------------------
1 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
2 | from aos.wit import *
3 | from aos.runtime.core import *
4 | import helpers_runtime as helpers
5 |
6 | # A broader end-to-end test that makes sure the runtime applies pending messages:
7 | # 1. Key: Do not run the runtime yet
8 | # 2. Create an actor manually (wit_a)
9 | # 3. Manually (again without the runtime) create an outbox for actor a that
10 | # creates a new actor (wit_b) and sends it another message
11 | # 4. Run the runtime
12 | # 5. Make sure the message was received by actor b (wit_b)
13 |
14 | async def test_msgs__runtime_pending():
15 | arrived_messages = []
16 |
17 | wit_a = Wit()
18 | @wit_a.run_wit
19 | async def wit_a_func(inbox:Inbox, outbox:Outbox, core:Core, **kwargs) -> None:
20 | print('wit_a')
21 | await inbox.read_new()
22 |
23 | wit_b = Wit()
24 | @wit_b.run_wit
25 | async def wit_b_func(inbox:Inbox, outbox:Outbox, core:Core, **kwargs) -> None:
26 | print('wit_b')
27 | object_store:ObjectStore = kwargs['object_store']
28 | actor_id:ActorId = kwargs['actor_id']
29 | inbox_messages = await inbox.read_new()
30 | #gen messages are handled individually, so there should only be ever one message at a time
31 | assert len(inbox_messages) == 1
32 |
33 | if(inbox_messages[0].content_id == actor_id):
34 | print('genesis message arrived')
35 | arrived_messages.append("genesis")
36 | else:
37 | print('other message arrived')
38 | arrived_messages.append("other")
39 |
40 |
41 | store = MemoryObjectStore()
42 | refs = MemoryReferences()
43 | resolver = ExternalResolver(store)
44 | resolver.register('wit_a', wit_a)
45 | resolver.register('wit_b', wit_b)
46 | runtime = Runtime(store, refs, resolver=resolver)
47 |
48 | #create wit_a
49 | wit_a_gen_step_id = await helpers.create_actor(store, refs, runtime.agent_id, 'wit_a')
50 | wit_a_gen_step:Step = await store.load(wit_a_gen_step_id)
51 | wit_a_actor_id = wit_a_gen_step.actor
52 |
53 | #send two messages to wit_b, but without ever running wit_b--just send messages to it
54 | # to do so, manually update the outbox *of wit a* with two message (gen & howdy) and create a new step that incoroprates that outbox
55 | outbox = Outbox({})
56 | b_gen_messge = await OutboxMessage.from_genesis(store, Core.from_external_wit_ref('wit_b'))
57 | wit_b_actor_id = b_gen_messge.content #will be the agent id of wit_b
58 | outbox.add(b_gen_messge)
59 | outbox.add(OutboxMessage.from_new(wit_b_actor_id, "Howdy"))
60 | outbox_id = await outbox.persist(store)
61 | #inbox and core of wit_a do not change, only the outbox
62 | wit_a_second_step = Step(wit_a_gen_step_id, wit_a_actor_id, wit_a_gen_step.inbox, outbox_id, wit_a_gen_step.core)
63 | wit_a_second_step_id = await store.store(wit_a_second_step)
64 | await refs.set(ref_step_head(wit_a_actor_id), wit_a_second_step_id)
65 |
66 | #now, start the runtime
67 | # wit_b has never been executed so far (ie no step has been run for it), there are only outbox messages in wit_a for wit_b
68 | # the runtime should pick up the two messages (gen & howdy) and send them to wit_b
69 | running_task = asyncio.create_task(runtime.start())
70 | await asyncio.sleep(0.1)
71 | runtime.stop()
72 | await running_task
73 |
74 | assert len(arrived_messages) == 2
75 | assert "genesis" in arrived_messages
76 | assert "other" in arrived_messages
77 |
78 |
--------------------------------------------------------------------------------
/tests/runtime/core/test_runtime_msgs_request_response.py:
--------------------------------------------------------------------------------
1 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
2 | from aos.wit import *
3 | from aos.runtime.core import *
4 | import helpers_runtime as helpers
5 |
6 | # test wit a communicating with wit b via the "request-response" helper
7 |
8 | async def test_msgs__single_wit():
9 | arrived_messages = []
10 |
11 | wit_a = Wit()
12 | @wit_a.message("start")
13 | async def on_a_message(actor_b:str, ctx:MessageContext) -> None:
14 | print("on_a_message: start")
15 | actor_b_id = to_object_id(actor_b)
16 | response = await ctx.request_response.run(OutboxMessage.from_new(actor_b_id, "hi", is_signal=True, mt="hi"), ['response'], 0.1)
17 | response_str = (await response.get_content()).get_as_str()
18 | arrived_messages.append(response_str)
19 |
20 | wit_b = Wit()
21 | @wit_b.message("hi")
22 | async def on_b_message(message:InboxMessage, ctx:MessageContext) -> None:
23 | print("on_b_message: request-response")
24 | ctx.outbox.add_reply_msg(message, "yo", mt="response")
25 |
26 | store = MemoryObjectStore()
27 | refs = MemoryReferences()
28 | resolver = ExternalResolver(store)
29 | resolver.register('wit_a', wit_a)
30 | resolver.register('wit_b', wit_b)
31 |
32 | runtime = Runtime(store, refs, resolver=resolver)
33 | running_task = asyncio.create_task(runtime.start())
34 | #genesis
35 | gen_a_message = await helpers.create_genesis_message(store, runtime.agent_id, 'wit_a')
36 | await runtime.inject_mailbox_update(gen_a_message)
37 | gen_b_message = await helpers.create_genesis_message(store, runtime.agent_id, 'wit_b')
38 | await runtime.inject_mailbox_update(gen_b_message)
39 | #since the genesis message is injected as a mailbox update, it is treated as a signal, and we need to wait for it to be processed
40 | await asyncio.sleep(0.3)
41 | #say hi
42 | hi_message = OutboxMessage.from_new(gen_a_message[1], gen_b_message[1].hex(), mt="start")
43 | await runtime.inject_mailbox_update(await hi_message.persist_to_mailbox_update(store, runtime.agent_id))
44 | await asyncio.sleep(0.1)
45 | #stop
46 | runtime.stop()
47 | await asyncio.wait_for(running_task, timeout=1)
48 |
49 | assert arrived_messages == ["yo"]
50 |
--------------------------------------------------------------------------------
/tests/runtime/core/test_runtime_msgs_single.py:
--------------------------------------------------------------------------------
1 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
2 | from aos.wit import *
3 | from aos.runtime.core import *
4 | import helpers_runtime as helpers
5 |
6 | # test a the wit function in conjunction with the runtime
7 |
8 | async def test_msgs__single_wit():
9 | arrived_messages = []
10 |
11 | wit_a = Wit()
12 | @wit_a.genesis_message
13 | async def on_genesis_message(message:InboxMessage, actor_id) -> None:
14 | print(f"on_genesis_message: I am {actor_id}")
15 | arrived_messages.append("genesis")
16 |
17 | @wit_a.message("hi")
18 | async def on_message(message:InboxMessage, actor_id) -> None:
19 | print(f"on_message: I am {actor_id}")
20 | arrived_messages.append("hi")
21 |
22 | store = MemoryObjectStore()
23 | refs = MemoryReferences()
24 | resolver = ExternalResolver(store)
25 | resolver.register('wit_a', wit_a)
26 |
27 | runtime = Runtime(store, refs, resolver=resolver)
28 | running_task = asyncio.create_task(runtime.start())
29 | #genesis
30 | gen_message = await helpers.create_genesis_message(store, runtime.agent_id, 'wit_a')
31 | await runtime.inject_mailbox_update(gen_message)
32 | #since the genesis message is injected as a mailbox update, it is treated as a signal, and we need to wait for it to be processed
33 | await asyncio.sleep(0.1)
34 | #say hi
35 | hi_message = OutboxMessage.from_new(gen_message[1], "hi from outside", mt="hi")
36 | await runtime.inject_mailbox_update(await hi_message.persist_to_mailbox_update(store, runtime.agent_id))
37 | await asyncio.sleep(0.1)
38 | #stop
39 | runtime.stop()
40 | await asyncio.wait_for(running_task, timeout=1)
41 |
42 | assert arrived_messages == ["genesis", "hi"]
43 |
--------------------------------------------------------------------------------
/tests/runtime/core/test_runtime_wit_sync.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | from aos.wit import *
4 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
5 | from aos.grit.tree_helpers import *
6 | from aos.wit.data_model import *
7 | from aos.runtime.core import *
8 |
9 | def wit_sync(last_step_id:StepId, new_inbox:Mailbox, **kwargs) -> StepId:
10 | print("wit_sync, called")
11 | store:ObjectStore = kwargs['store']
12 | if last_step_id is None:
13 | print("wit_sync: genesis")
14 | inbox_id = store.store_sync(new_inbox)
15 | step = Step(None, kwargs['actor_id'], inbox_id, None, kwargs['actor_id'])
16 | step_id = store.store_sync(step)
17 | return step_id
18 |
19 | # utils
20 | async def setup_runtime():
21 | store = MemoryObjectStore()
22 | refs = MemoryReferences()
23 | resolver = ExternalResolver(store)
24 | resolver.register('wit_sync', wit_sync)
25 | runtime = Runtime(store, refs, resolver=resolver)
26 | running_task = asyncio.create_task(runtime.start())
27 | await asyncio.sleep(0.05)
28 | return runtime, running_task
29 |
30 | async def send_genesis_message(runtime:Runtime, wit_ref):
31 | gen_core = Core(runtime.store, {}, None)
32 | gen_core.makeb("wit").set_as_str(f"external:{wit_ref}")
33 | gen_core.makeb("wit_genesis").set_as_str(f"external:{wit_ref}")
34 | gen_message = await OutboxMessage.from_genesis(runtime.store, gen_core)
35 | await runtime.inject_message(gen_message)
36 | await asyncio.sleep(0.1)
37 |
38 | # tests
39 | async def test_wit_sync_genesis():
40 | runtime, running_task = await setup_runtime()
41 | await send_genesis_message(runtime, 'wit_sync')
42 | await asyncio.sleep(0.1)
43 | runtime.stop()
44 | await running_task
45 | assert len(runtime.get_actors()) == 1
--------------------------------------------------------------------------------
/tests/runtime/store/test_grit_store.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 | from aos.grit import *
4 | from aos.runtime.store import grit_store_pb2, grit_store_pb2_grpc
5 | from aos.runtime.store import agent_store_pb2, agent_store_pb2_grpc
6 | from aos.runtime.store.store_client import StoreClient
7 | from aos.runtime.store.agent_object_store import AgentObjectStore
8 | from aos.runtime.store.store_server import start_server
9 |
10 | async def test_read_write(tmp_path):
11 | server_task = asyncio.create_task(start_server(str(tmp_path)))
12 |
13 | client = StoreClient()
14 | await client.wait_for_async_channel_ready()
15 |
16 | #create agent
17 | agent_stub = client.get_agent_store_stub_async()
18 | agent_response:agent_store_pb2.CreateAgentResponse = await agent_stub.CreateAgent(agent_store_pb2.CreateAgentRequest())
19 | agent_id = agent_response.agent_id
20 |
21 | #save object
22 | object_store = AgentObjectStore(client, agent_id)
23 |
24 | blob = Blob({'hi': 'there', 'foo': 'bar'}, os.urandom(1024))
25 |
26 | blob_id = await object_store.store(blob)
27 |
28 | blob2 = await object_store.load(blob_id)
29 | assert blob.data == blob2.data
30 | assert blob.headers == blob2.headers
31 |
--------------------------------------------------------------------------------
/tests/runtime/worker/test_worker.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 | from aos.grit import *
4 | from aos.runtime.store import grit_store_pb2, grit_store_pb2_grpc
5 | from aos.runtime.store import agent_store_pb2, agent_store_pb2_grpc
6 | from aos.runtime.store.store_client import StoreClient
7 | from aos.runtime.store.agent_object_store import AgentObjectStore
8 | from aos.runtime.store.store_server import start_server as start_store_server
9 | from aos.runtime.apex import apex_api_pb2, apex_api_pb2_grpc
10 | from aos.runtime.apex import apex_workers_pb2, apex_workers_pb2_grpc
11 | from aos.runtime.apex.apex_client import ApexClient
12 | from aos.runtime.apex.apex_server import start_server as start_apex_server
13 | from aos.runtime.worker.worker_server import start_server as start_worker_server
14 |
15 | import logging
16 | logging.basicConfig(level=logging.INFO)
17 |
18 | #run with:
19 | # poetry run pytest tests/runtime/worker/ --log-cli-level=10 -s
20 |
21 | async def test_worker(tmp_path):
22 | store_server_task = asyncio.create_task(start_store_server(str(tmp_path)))
23 | apex_server_task = asyncio.create_task(start_apex_server())
24 | worker_task = asyncio.create_task(start_worker_server())
25 |
26 | store_client = StoreClient()
27 | await store_client.wait_for_async_channel_ready()
28 | apex_client = ApexClient()
29 | await apex_client.wait_for_async_channel_ready()
30 |
31 | #create agent
32 | agent_stub = store_client.get_agent_store_stub_async()
33 | agent_response:agent_store_pb2.CreateAgentResponse = await agent_stub.CreateAgent(agent_store_pb2.CreateAgentRequest())
34 | agent_id = agent_response.agent_id
35 | print("test: agent_id", agent_id.hex())
36 |
37 | #start agent
38 | apex_api_stub = apex_client.get_apex_api_stub_async()
39 | await apex_api_stub.StartAgent(apex_api_pb2.StartAgentRequest(agent_id=agent_id))
40 |
41 | await asyncio.sleep(0.1)
42 |
43 | #stop agent
44 | await apex_api_stub.StopAgent(apex_api_pb2.StopAgentRequest(agent_id=agent_id))
45 |
46 | #TODO: push a wit to the agent
47 | # using the inject message api (doesnt exits yet)
48 |
49 | await asyncio.sleep(0.2)
50 |
--------------------------------------------------------------------------------
/tests/web/helpers_web.py:
--------------------------------------------------------------------------------
1 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
2 | from aos.wit import *
3 | from aos.runtime.web import *
4 | from aos.runtime.core import *
5 |
6 | def setup_runtime() -> Runtime:
7 | store = MemoryObjectStore()
8 | refs = MemoryReferences()
9 | resolver = ExternalResolver(store)
10 | runtime = Runtime(
11 | store=store,
12 | references=refs,
13 | point=0,
14 | resolver=resolver)
15 | return runtime
16 |
17 | def get_grit_url_prefix(runtime:Runtime) -> str:
18 | return f"/ag/{runtime.agent_id.hex()}/grit"
19 |
20 | def get_wit_url_prefix(runtime:Runtime) -> str:
21 | return f"/ag/{runtime.agent_id.hex()}/wit"
22 |
23 | async def create_object_from_content(runtime:Runtime, content:bytes|str|dict) -> ObjectId:
24 | b_obj = BlobObject.from_content(content)
25 | return await b_obj.persist(runtime.store)
26 |
27 | async def create_object(runtime:Runtime, object:Object) -> ObjectId:
28 | return await runtime.store.store(object)
29 |
30 | async def create_genesis_message(store:ObjectStore, sender_id:ActorId, wit_name:str, query_ref:str=None) -> MailboxUpdate:
31 | '''Creates a genesis message and returns a MailboxUpdate'''
32 | if(wit_name is None):
33 | raise Exception('wit_name must not be None')
34 | gen_core:TreeObject = Core.from_external_wit_ref(wit_name, query_ref)
35 | gen_core.maket("first").maket("second").makeb("third").set_as_str("made it")
36 | gen_message = await OutboxMessage.from_genesis(store, gen_core)
37 | gen_message_id = await gen_message.persist(store)
38 | return (sender_id, gen_message.recipient_id, gen_message_id)
39 |
40 | async def create_and_send_genesis_message(runtime:Runtime, wit_ref:str, query_ref:str=None) -> tuple[ActorId, MessageId]:
41 | #inject a genesis message as a mailbox update so we have access to the new actor id
42 | sender_id, new_actor_id, gen_message_id = await create_genesis_message(runtime.store, runtime.agent_id, wit_ref, query_ref)
43 | await runtime.inject_mailbox_update((sender_id, new_actor_id, gen_message_id))
44 | return (new_actor_id, gen_message_id)
45 |
46 | async def create_and_send_new_message(runtime:Runtime, recipient_id:ActorId, content:any) -> MessageId:
47 | msg = OutboxMessage.from_new(recipient_id, content)
48 | return await runtime.inject_message(msg)
--------------------------------------------------------------------------------
/tests/web/test_web_server.py:
--------------------------------------------------------------------------------
1 | from starlette.testclient import TestClient
2 | from aos.wit import *
3 | from aos.runtime.web import *
4 | from aos.runtime.core import *
5 | import helpers_web as helpers
6 |
7 | async def test_runt_web_server_empty():
8 | runtime = helpers.setup_runtime()
9 |
10 | runtime_task = asyncio.create_task(runtime.start())
11 | client = TestClient(WebServer(runtime).app())
12 |
13 | response = client.get("/")
14 | runtime.stop()
15 | await runtime_task
16 |
17 | assert response.status_code == 200
18 | assert response.text == "Wit API"
19 |
20 | async def test_get_agents():
21 | runtime = helpers.setup_runtime()
22 |
23 | runtime_task = asyncio.create_task(runtime.start())
24 | client = TestClient(WebServer(runtime).app())
25 |
26 | response = client.get("/ag")
27 | runtime.stop()
28 | await runtime_task
29 |
30 | assert response.status_code == 200
31 | assert response.headers['content-type'] == 'application/json'
32 | # /agents should return a list of agent ids, since the runtime only supports a single agent, it can only be one id
33 | assert response.json() == {'test': runtime.agent_id.hex()}
34 |
35 |
--------------------------------------------------------------------------------
/tests/web/test_web_server_grit.py:
--------------------------------------------------------------------------------
1 | import os
2 | from starlette.testclient import TestClient
3 | from aos.wit import *
4 | from aos.runtime.web import *
5 | from aos.runtime import *
6 | import helpers_web as helpers
7 |
8 | async def test_grit_get_refs_and_ref():
9 | runtime = helpers.setup_runtime()
10 | url_prefix = helpers.get_grit_url_prefix(runtime)
11 |
12 | runtime_task = asyncio.create_task(runtime.start())
13 | client = TestClient(WebServer(runtime).app())
14 |
15 | #refs should be empty at first
16 | response = client.get(url_prefix+"/refs")
17 | assert response.status_code == 200
18 | assert response.headers['content-type'] == 'application/json'
19 | assert response.json() == {}
20 |
21 | #add some refs
22 | blob_1_id = await helpers.create_object_from_content(runtime, "blob_1")
23 | blob_2_id = await helpers.create_object_from_content(runtime, "blob_2")
24 | await runtime.references.set("ref_1", blob_1_id)
25 | await runtime.references.set("ref_2", blob_2_id)
26 | #refs should now return two refs
27 | response = client.get(url_prefix+"/refs")
28 | assert response.status_code == 200
29 | reponse_json = response.json()
30 | assert len(reponse_json) == 2
31 | assert reponse_json['ref_1'] == blob_1_id.hex()
32 | assert reponse_json['ref_2'] == blob_2_id.hex()
33 |
34 | #test the single ref endpoint
35 | response = client.get(url_prefix+"/refs/ref_1")
36 | reponse_json = response.json()
37 | assert response.status_code == 200
38 | assert len(reponse_json) == 1
39 | assert reponse_json['ref_1'] == blob_1_id.hex()
40 |
41 | runtime.stop()
42 | await runtime_task
43 |
44 | async def test_grit_get_objects():
45 | runtime = helpers.setup_runtime()
46 | url_prefix = helpers.get_grit_url_prefix(runtime)
47 |
48 | runtime_task = asyncio.create_task(runtime.start())
49 | client = TestClient(WebServer(runtime).app())
50 |
51 | #test getting a non-existent object
52 | # with invalid id
53 | response = client.get(url_prefix+"/objects/abc")
54 | assert response.status_code == 400
55 | # with valid id, but not existing
56 | response = client.get(url_prefix+"/objects/"+to_object_id_str(get_object_id(os.urandom(20))))
57 | assert response.status_code == 404
58 |
59 | #add some objects
60 | blob_1_id = await helpers.create_object_from_content(runtime, "blob_1")
61 | blob_2_id = await helpers.create_object_from_content(runtime, b"blob_2")
62 | blob_3_id = await helpers.create_object_from_content(runtime, {"key1": "value1"})
63 | response = client.get(url_prefix+"/objects/"+to_object_id_str(blob_1_id))
64 | #should return as text
65 | assert response.status_code == 200
66 | assert response.headers['content-type'] == 'text/plain; charset=utf-8'
67 | assert response.text == "blob_1"
68 | #should return as binary
69 | response = client.get(url_prefix+"/objects/"+to_object_id_str(blob_2_id))
70 | assert response.status_code == 200
71 | assert response.headers['content-type'] == 'application/octet-stream'
72 | assert response.content == b"blob_2"
73 | #should return as json
74 | response = client.get(url_prefix+"/objects/"+to_object_id_str(blob_3_id))
75 | assert response.status_code == 200
76 | assert response.headers['content-type'] == 'application/json'
77 | assert response.json() == {"key1": "value1"}
78 |
79 | runtime.stop()
80 | await runtime_task
81 |
--------------------------------------------------------------------------------
/tests/web/test_web_server_sse.py:
--------------------------------------------------------------------------------
1 | import httpx
2 | from httpx_sse import aconnect_sse
3 | from starlette.testclient import TestClient
4 | from aos.wit import *
5 | from aos.runtime.web import *
6 | from aos.runtime.core import *
7 | import helpers_web as helpers
8 |
9 | #===================================================================================================
10 | # Wits
11 | #===================================================================================================
12 | wit_a = Wit()
13 | @wit_a.run_wit
14 | async def wit_a_func(inbox:Inbox, outbox:Outbox, core:Core, **kwargs) -> None:
15 | #print('wit_a')
16 | actor_id:ActorId = kwargs['actor_id']
17 | inbox_messages = await inbox.read_new()
18 |
19 | for msg in inbox_messages:
20 | if msg.content_id == actor_id:
21 | print("wit_a: got genesis message")
22 | else:
23 | print("wit_a: got a message")
24 | outbox.add(OutboxMessage.from_reply(msg, "hi back"))
25 |
26 | #===================================================================================================
27 | # Tests
28 | #===================================================================================================
29 | async def test_sse():
30 | runtime = helpers.setup_runtime()
31 | runtime.resolver.register('wit_a', wit_a)
32 | url_prefix = helpers.get_wit_url_prefix(runtime)
33 |
34 | runtime_task = asyncio.create_task(runtime.start())
35 | client = TestClient(WebServer(runtime).app())
36 | await asyncio.sleep(0.05) #give the runtime time to create the actor
37 |
38 | sse_events = []
39 | async def listen_to_messages():
40 | async with httpx.AsyncClient(transport=httpx.ASGITransport(app=WebServer(runtime).app()), base_url="http://localhost:5000") as client:
41 | async with aconnect_sse(client, method="GET", url=f"{url_prefix}/messages-sse?content=true") as event_source:
42 | async for sse in event_source.aiter_sse():
43 | print(f"SSE event (id: {sse.id}, event: {sse.event}): {sse.data}")
44 | sse_events.append(sse.json())
45 | print("SSE connection closed")
46 |
47 | listen_task = asyncio.create_task(listen_to_messages())
48 |
49 | #create and actor for wit_a
50 | wit_a_actor_id, wit_a_gen_message_id = await helpers.create_and_send_genesis_message(runtime, 'wit_a')
51 | wit_a_actor_id_str = to_object_id_str(wit_a_actor_id)
52 | await asyncio.sleep(0.05) #give the runtime time to create the actor
53 |
54 | #send a message via POST api to the actor
55 | response = client.post(url_prefix+"/actors/"+wit_a_actor_id_str+"/inbox", json={"content":"hi"})
56 | assert response.status_code == 201
57 | assert response.headers['content-type'] == 'text/plain; charset=utf-8'
58 | new_message_id_str = response.text
59 | await asyncio.sleep(0.05) #give the runtime time to create the actor
60 |
61 | runtime.stop()
62 | await runtime_task
63 | await listen_task
64 |
65 | #there should be one event, for the reply from the wit
66 | assert len(sse_events) == 1
67 | assert sse_events[0]['content'] == "hi back"
68 |
69 |
70 |
71 |
72 |
--------------------------------------------------------------------------------
/tests/wit/helpers_wit.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from aos.grit import *
4 | from aos.wit import *
5 |
6 | def get_random_actor_id() -> ActorId:
7 | return get_object_id(os.urandom(20))
8 |
9 | async def create_genesis_message(store:ObjectStore, sender_id:ActorId, wit_ref:str, query_ref:str=None) -> MailboxUpdate:
10 | '''Creates a genesis message and returns a MailboxUpdate'''
11 | gen_core:TreeObject = Core.from_external_wit_ref(wit_ref, query_ref)
12 | gen_core.maket('data').makeb('args').set_as_json({'hello': 'world'})
13 | gen_message = await OutboxMessage.from_genesis(store, gen_core)
14 | gen_message_id = await gen_message.persist(store)
15 | return (sender_id, gen_message.recipient_id, gen_message_id)
16 |
17 | async def create_new_message(store:ObjectStore, sender_id:ActorId, recipient_id:ActorId, previous_message_id:MessageId|None, content:str|BlobObject|TreeObject, mt:str=None) -> MailboxUpdate:
18 | '''Creates a new message and returns a MailboxUpdate'''
19 | if(isinstance(content, str)):
20 | content = BlobObject.from_str(content)
21 | content_id = await content.persist(store)
22 | headers = None
23 | if(mt is not None):
24 | headers = {'mt': mt}
25 | message = Message(previous_message_id, headers, content_id)
26 | message_id = await store.store(message)
27 | return (sender_id, recipient_id, message_id)
28 |
29 | async def setup_wit_with_dependencies(store:ObjectStore, wit_ref:str) -> tuple[dict, StepId|None, Mailbox]:
30 | (sender_id, recipient_id, gen_message_id) = await create_genesis_message(store, get_random_actor_id(), wit_ref)
31 | kwargs = {
32 | 'agent_id': get_random_actor_id(),
33 | 'actor_id': recipient_id,
34 | 'object_store': store,
35 | }
36 | mailbox = {sender_id: gen_message_id}
37 | return (kwargs, None, mailbox) #none becuase it is the genesis step
38 |
39 | async def setup_wit_prototype_with_dependencies(store:ObjectStore, wit_ref:str) -> tuple[dict, StepId|None, Mailbox]:
40 | (sender_id, recipient_id, gen_message_id) = await create_genesis_message(store, get_random_actor_id(), wit_ref)
41 | kwargs = {
42 | 'agent_id': get_random_actor_id(),
43 | 'actor_id': recipient_id,
44 | 'object_store': store,
45 | }
46 | mailbox = {sender_id: gen_message_id}
47 | return (kwargs, None, mailbox) #none becuase it is the genesis step
48 |
49 | async def setup_query_with_dependencies(store:ObjectStore, wit_function, wit_ref:str, query_ref:str) -> tuple[dict, StepId]:
50 | (sender_id, recipient_id, gen_message_id) = await create_genesis_message(store, get_random_actor_id(), wit_ref, query_ref)
51 | kwargs = {
52 | 'agent_id': get_random_actor_id(),
53 | 'actor_id': recipient_id,
54 | 'object_store': store,
55 | }
56 | mailbox = {sender_id: gen_message_id}
57 | #run the wit function, to have an inital step
58 | new_step_id = await wit_function(*(None, mailbox), **kwargs)
59 | return (kwargs, new_step_id)
--------------------------------------------------------------------------------
/tests/wit/test_data_model_blob.py:
--------------------------------------------------------------------------------
1 | import os
2 | from aos.grit import *
3 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
4 | from aos.wit.data_model import *
5 |
6 | async def test_blob__get_set_persist__bytes():
7 | store = MemoryObjectStore()
8 | blob = BlobObject(None, None)
9 | test_data = bytes(b'hello world')
10 | blob.set_as_bytes(test_data)
11 | test_data_2 = blob.get_as_bytes()
12 | assert test_data == test_data_2
13 |
14 | object_id = await blob.persist(store)
15 | blob_2 = await store.load(object_id)
16 | assert blob_2 is not None
17 | assert blob_2.data == test_data
18 |
19 | async def test_blob__get_set_persist__str():
20 | store = MemoryObjectStore()
21 | blob = BlobObject(None, None)
22 | test_data = 'hello world'
23 | blob.set_as_str(test_data)
24 | test_data_2 = blob.get_as_str()
25 | assert test_data == test_data_2
26 |
27 | object_id = await blob.persist(store)
28 | blob_2 = await store.load(object_id)
29 | assert blob_2 is not None
30 | assert blob_2.data == bytes(test_data, 'utf-8')
31 |
32 | async def test_blob__get_set_persist__dict():
33 | store = MemoryObjectStore()
34 | blob = BlobObject(None, None)
35 | test_data = {
36 | 'hello': 'world',
37 | 'but': [1,2,3,5],
38 | 'also': {"this": "that"}
39 | }
40 | blob.set_as_json(test_data)
41 | test_data_2 = blob.get_as_json()
42 | assert test_data == test_data_2
43 |
44 | object_id = await blob.persist(store)
45 | test_data_3 = await store.load(object_id)
46 | assert test_data_3 is not None
--------------------------------------------------------------------------------
/tests/wit/test_data_model_inbox.py:
--------------------------------------------------------------------------------
1 | import os
2 | from aos.grit import *
3 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
4 | from aos.wit.data_model import *
5 |
6 | def get_random_actor_id() -> ActorId:
7 | return get_object_id(os.urandom(20))
8 |
9 | async def create_messages(store:ObjectStore, sender:ActorId, recipient:ActorId, count:int) -> list[MessageId]:
10 | message_ids = []
11 | previous_id = None
12 | for i in range(count):
13 | message_content_id = await store.store(Blob(None, bytes(f"message {i+1}", 'utf-8')))
14 | message = Message(previous_id, None, message_content_id)
15 | previous_id = await store.store(message)
16 | message_ids.append(previous_id)
17 | return message_ids
18 |
19 | async def create_new_inbox(store:ObjectStore, actor:ActorId, senders:list[ActorId]) -> Mailbox:
20 | inbox = Mailbox()
21 | for sender in senders:
22 | message_ids = await create_messages(store, sender, actor, 5)
23 | inbox[sender] = message_ids[-1] #last message id is the head of the linked list
24 | return inbox
25 |
26 | async def test_inbox_read_all():
27 | store = MemoryObjectStore()
28 |
29 | actor_id = get_random_actor_id()
30 | senders_ids = [get_random_actor_id(), get_random_actor_id(), get_random_actor_id()]
31 |
32 | new_inbox = await create_new_inbox(store, actor_id, senders_ids)
33 | inbox = Inbox(store, None, new_inbox)
34 |
35 | msgs = await inbox.read_new(1)
36 | #there are 3 senders, and we read one of each
37 | assert len(msgs) == 3
38 | for msg in msgs:
39 | msg_content = (await msg.get_content()).get_as_str()
40 | assert msg_content == "message 1"
41 |
42 | #persist the inbox, having read only one message from each sender
43 | read_inbox_id = await inbox.persist(store)
44 |
45 | #create a new inbox from the persisted id, and read all remaining messages
46 | #we should now get the second set of messages
47 | inbox = await Inbox.from_inbox_id(store, read_inbox_id, new_inbox)
48 | msgs = await inbox.read_new()
49 | assert len(msgs) == 4*3 # 4 messags remain from 3 senders
50 | for msg in msgs:
51 | msg_content = (await msg.get_content()).get_as_str()
52 | assert msg_content in ["message 2", "message 3", "message 4", "message 5"]
53 | assert msg_content != "message 1"
54 |
55 | #persist the inbox, having read all messags
56 | # and check that the persisted final inbox matches the in-memory view of "new_inbox"
57 | read_inbox_id = await inbox.persist(store)
58 | read_inbox = await store.load(read_inbox_id)
59 | assert len(read_inbox) == 3
60 | assert len(read_inbox) == len(new_inbox)
61 | assert read_inbox == new_inbox
62 |
63 |
--------------------------------------------------------------------------------
/tests/wit/test_data_model_outbox.py:
--------------------------------------------------------------------------------
1 | import os
2 | from aos.grit import *
3 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
4 | from aos.wit.data_model import *
5 |
6 | def get_random_actor_id() -> ActorId:
7 | return get_object_id(os.urandom(20))
8 |
9 | async def get_message_content(store:ObjectStore, message_id:MessageId) -> str:
10 | message = await store.load(message_id)
11 | content_blob = await BlobObject.from_blob_id(store, message.content)
12 | return content_blob.get_as_str()
13 |
14 | async def test_outbox_from_new():
15 | store = MemoryObjectStore()
16 |
17 | actor_id = get_random_actor_id()
18 | recipient_ids = [get_random_actor_id(), get_random_actor_id(), get_random_actor_id()]
19 |
20 | outbox = Outbox(None)
21 | outbox.add(OutboxMessage.from_new(recipient_ids[0], "message 1"))
22 | outbox.add(OutboxMessage.from_new(recipient_ids[1], "message 1"))
23 | outbox.add(OutboxMessage.from_new(recipient_ids[2], "message 1"))
24 | outbox_id = await outbox.persist(store)
25 |
26 | outbox_mailbox = await store.load(outbox_id)
27 | assert len(outbox_mailbox) == 3
28 | assert recipient_ids[0] in outbox_mailbox
29 | assert recipient_ids[1] in outbox_mailbox
30 | assert recipient_ids[2] in outbox_mailbox
31 |
32 | assert (await get_message_content(store, outbox_mailbox[recipient_ids[0]])) == "message 1"
33 | assert (await get_message_content(store, outbox_mailbox[recipient_ids[1]])) == "message 1"
34 | assert (await get_message_content(store, outbox_mailbox[recipient_ids[2]])) == "message 1"
35 |
36 |
37 | async def test_outbox_from_previous():
38 | store = MemoryObjectStore()
39 | recipient_ids = [get_random_actor_id(), get_random_actor_id(), get_random_actor_id()]
40 |
41 | #create an outbox with 2 messages for each recipient
42 | outbox = Outbox(None)
43 | outbox.add(OutboxMessage.from_new(recipient_ids[0], "message 1"))
44 | outbox.add(OutboxMessage.from_new(recipient_ids[0], "message 2"))
45 | outbox.add(OutboxMessage.from_new(recipient_ids[1], "message 1"))
46 | outbox.add(OutboxMessage.from_new(recipient_ids[1], "message 2"))
47 | outbox.add(OutboxMessage.from_new(recipient_ids[2], "message 1"))
48 | outbox.add(OutboxMessage.from_new(recipient_ids[2], "message 2"))
49 | first_outbox_id = await outbox.persist(store)
50 |
51 | #create a new outbox from the previous one
52 | outbox = await Outbox.from_outbox_id(store, first_outbox_id)
53 | #add 1 more message for two of the recipients
54 | outbox.add(OutboxMessage.from_new(recipient_ids[0], "message 3"))
55 | outbox.add(OutboxMessage.from_new(recipient_ids[1], "message 3"))
56 | second_outbox_id = await outbox.persist(store)
57 |
58 | outbox_mailbox = await store.load(second_outbox_id)
59 | assert len(outbox_mailbox) == 3
60 | assert recipient_ids[0] in outbox_mailbox
61 | assert recipient_ids[1] in outbox_mailbox
62 | assert recipient_ids[2] in outbox_mailbox
63 |
64 | assert (await get_message_content(store, outbox_mailbox[recipient_ids[0]])) == "message 3"
65 | assert (await get_message_content(store, outbox_mailbox[recipient_ids[1]])) == "message 3"
66 | assert (await get_message_content(store, outbox_mailbox[recipient_ids[2]])) == "message 2" #did not add another message for this recipient
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/tests/wit/test_wit_state.py:
--------------------------------------------------------------------------------
1 | from aos.grit.stores.memory import MemoryObjectStore, MemoryReferences
2 | from aos.wit import *
3 | from aos.runtime import *
4 | import helpers_wit as helpers
5 |
6 | class MoreData:
7 | my_details:str = None
8 |
9 | class MyState(WitState):
10 | str1:str = None
11 | int1:str = None
12 | dict1:dict = None
13 | list1:list = None
14 | subobj1:MoreData = None
15 |
16 | async def test_automatic_property_persistence_in_core():
17 | store = MemoryObjectStore()
18 | core = Core(store, {}, None)
19 |
20 | state = MyState()
21 | # all is empty
22 | assert state.str1 is None
23 | assert state.subobj1 is None
24 | await state._load_from_core(core)
25 | # still empty
26 | assert state.str1 is None
27 | assert state.subobj1 is None
28 |
29 | # set vars
30 | state.str1 = "str1"
31 | state.int1 = 100
32 | state.dict1 = {"a":1, "b":2}
33 | state.list1 = [1,2,3]
34 | state.subobj1 = MoreData()
35 | state.subobj1.my_details = "my details"
36 | # persist
37 | await state._persist_to_core(core)
38 | state_data = (await core.gett("state"))
39 | assert len(state_data) == 5
40 |
41 | # try wit a new object
42 | state = MyState()
43 | # empty again
44 | assert state.str1 is None
45 | assert state.subobj1 is None
46 | # load from the core
47 | await state._load_from_core(core)
48 | assert state.str1 is not None
49 | assert state.str1 == "str1"
50 | assert state.int1 == 100
51 | assert state.dict1 == {"a":1, "b":2}
52 | assert state.list1 == [1,2,3]
53 | assert state.subobj1 is not None
54 | assert state.subobj1.my_details == "my details"
55 |
--------------------------------------------------------------------------------