├── examples ├── .python-version ├── requirements.txt ├── example.just ├── README.md ├── main.py ├── cli_cluster.py └── ig_post_planner.py ├── offchain ├── tools │ ├── tests │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── test_default_api.py │ │ ├── test_ollama.py │ │ ├── test_tool.py │ │ └── test_agent.py │ ├── src │ │ └── nexus_tools │ │ │ ├── __init__.py │ │ │ └── server │ │ │ ├── __init__.py │ │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── extra_models.py │ │ │ ├── model.py │ │ │ ├── agents.py │ │ │ ├── error.py │ │ │ ├── completion.py │ │ │ └── prompt.py │ │ │ ├── tools │ │ │ └── __init__.py │ │ │ ├── controllers │ │ │ ├── __init__.py │ │ │ └── inference.py │ │ │ ├── security_api.py │ │ │ └── crew │ │ │ ├── talus_chat_ollama.py │ │ │ └── talus_ollama.py │ ├── .flake8 │ ├── Modelfile │ ├── pyproject.toml │ ├── README.md │ ├── .gitignore │ └── LICENSE ├── events │ ├── src │ │ └── nexus_events │ │ │ ├── __init__.py │ │ │ └── offchain.py │ ├── pyproject.toml │ ├── README.md │ ├── .gitignore │ └── LICENSE ├── README.md └── LICENSE ├── nexus_sdk ├── .python-version ├── requirements.txt ├── README.md ├── pyproject.toml ├── setup.py └── src │ └── nexus_sdk │ ├── __init__.py │ ├── node.py │ ├── model.py │ ├── utils.py │ └── cluster.py ├── docker ├── sui │ ├── genesis │ │ ├── .dockerignore │ │ ├── requirements.txt │ │ ├── static │ │ │ ├── sui.keystore │ │ │ ├── client.yaml │ │ │ └── fullnode.yaml │ │ ├── overlays │ │ │ └── common.yaml │ │ ├── Dockerfile │ │ ├── compose-validators.yaml │ │ └── generate.py │ ├── .gitignore │ ├── Dockerfile │ ├── bin │ │ └── publish_package.sh │ └── compose.yaml ├── docker-compose-nollama.yaml ├── docker-compose.yaml ├── .env ├── ollama │ ├── Dockerfile │ └── compose.yaml ├── nexus │ ├── Dockerfile │ ├── bin │ │ ├── setup_venv.sh │ │ ├── start_events.py │ │ └── bootstrap_model.py │ └── compose.yaml ├── README.md └── containers.just ├── .dockerignore ├── .gitattributes ├── e2e_tests ├── .gitignore ├── src │ ├── prelude.rs │ ├── ollama_mock.rs │ ├── prompt.rs │ └── completion.rs ├── Cargo.toml ├── README.md └── oneclick-test.sh ├── onchain ├── Suibase.toml ├── Move.toml ├── sources │ ├── consts.move │ ├── tool.move │ ├── tests │ │ ├── node_tests.move │ │ ├── prompt_tests.move │ │ └── cluster_tests.move │ ├── node.move │ ├── prompt.move │ └── task.move ├── LICENSE └── README.md ├── .editorconfig ├── .github ├── workflows │ ├── python.yml │ └── talus-agentic-framework.yml └── actions │ └── fetch-sui-cli │ └── action.yml ├── rustfmt.toml ├── TROUBLESHOOTING.md ├── justfile ├── .gitignore ├── LICENSE └── README.md /examples/.python-version: -------------------------------------------------------------------------------- 1 | 3.10 2 | -------------------------------------------------------------------------------- /offchain/tools/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /nexus_sdk/.python-version: -------------------------------------------------------------------------------- 1 | 3.10 2 | -------------------------------------------------------------------------------- /docker/sui/genesis/.dockerignore: -------------------------------------------------------------------------------- 1 | ./files/* -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /offchain/events/src/nexus_events/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker/sui/.gitignore: -------------------------------------------------------------------------------- 1 | genesis/.venv 2 | genesis/files -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/tools/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | ./offchain/events/build 2 | ./offchain/tools/build -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker/sui/genesis/requirements.txt: -------------------------------------------------------------------------------- 1 | hiyapyco>=0.5.1 2 | setuptools 3 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Force LF line endings for all files 2 | * text=auto eol=lf -------------------------------------------------------------------------------- /docker/docker-compose-nollama.yaml: -------------------------------------------------------------------------------- 1 | include: 2 | - ./sui/compose.yaml 3 | - ./nexus/compose.yaml 4 | -------------------------------------------------------------------------------- /docker/sui/genesis/static/sui.keystore: -------------------------------------------------------------------------------- 1 | [ 2 | "ACWkfiQ6x7FxJ74IZAgQYRl67TTtLLDlP2RhJjPI6tTQ" 3 | ] 4 | -------------------------------------------------------------------------------- /nexus_sdk/requirements.txt: -------------------------------------------------------------------------------- 1 | # Last version before JSON RPC is marked as deprecated 2 | pysui==0.52.0 3 | -------------------------------------------------------------------------------- /e2e_tests/.gitignore: -------------------------------------------------------------------------------- 1 | # ! Don't ignore Cargo.lock so that we can cache the dependencies on CI 2 | 3 | target 4 | -------------------------------------------------------------------------------- /docker/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | include: 2 | - ./sui/compose.yaml 3 | - ./ollama/compose.yaml 4 | - ./nexus/compose.yaml 5 | -------------------------------------------------------------------------------- /offchain/tools/.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 200 3 | exclude = .git,__pycache__,__init__.py,.mypy_cache,.pytest_cache,.venv 4 | -------------------------------------------------------------------------------- /offchain/README.md: -------------------------------------------------------------------------------- 1 | # Nexus offchain components 2 | 3 | See [`events`][events] and [`tools`][tools]. 4 | 5 | 6 | [events]: ./events/ 7 | [tools]: ./tools/ -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/models/extra_models.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | 4 | class TokenModel(BaseModel): 5 | """Defines a token model.""" 6 | 7 | sub: str 8 | -------------------------------------------------------------------------------- /onchain/Suibase.toml: -------------------------------------------------------------------------------- 1 | [meta] 2 | creation_timestamp = "1720027248390943 2024-07-03 13:20:48.390943 -04:00" 3 | 4 | [packages] 5 | talus = { uuid = "NH5F3LHUSTK3T3U6DVMWI2CJEA", uuid_custom = false } 6 | -------------------------------------------------------------------------------- /examples/requirements.txt: -------------------------------------------------------------------------------- 1 | # Last version before JSON RPC is marked as deprecated 2 | pysui==0.52.0 3 | asyncio 4 | # Get a decent python shell for exploring the SDK and evertyhing 5 | ptpython 6 | # For colorized output 7 | colorama 8 | -------------------------------------------------------------------------------- /nexus_sdk/README.md: -------------------------------------------------------------------------------- 1 | This SDK provides utility functions for setting up and interacting with Nexus. 2 | It includes functionality for environment setup, node and model registration, and contract management. 3 | 4 | See [`examples`](../examples) to understand how you can use the SDK to build agents. 5 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | end_of_line = lf 6 | indent_style = space 7 | insert_final_newline = true 8 | trim_trailing_whitespace = true 9 | 10 | [*.move] 11 | indent_size = 4 12 | 13 | [*.yml] 14 | indent_size = 2 15 | 16 | [*.rs] 17 | indent_size = 4 18 | -------------------------------------------------------------------------------- /docker/sui/genesis/static/client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | keystore: 3 | File: /opt/sui/config/sui.keystore 4 | envs: 5 | - alias: localnet 6 | rpc: "http://fullnode1:9000" 7 | ws: ~ 8 | basic_auth: ~ 9 | active_env: localnet 10 | active_address: "0xd59d79516a4ed5b6825e80826c075a12bdd2759aaeb901df2f427f5f880c8f60" 11 | -------------------------------------------------------------------------------- /nexus_sdk/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "nexus-sdk" 3 | version = "0.1.0" 4 | readme = "README.md" 5 | requires-python = ">=3.10" 6 | dependencies = [ 7 | "pysui==0.52.0", 8 | "asyncio", 9 | "ptpython", 10 | "colorama" 11 | ] 12 | description = "Nexus SDK Library" 13 | authors = [ 14 | { name="Talus", email="hi@talus.network" } 15 | ] -------------------------------------------------------------------------------- /offchain/tools/Modelfile: -------------------------------------------------------------------------------- 1 | FROM tinyllama 2 | TEMPLATE """{{- if .System }}System: {{.System}} 3 | 4 | {{end}}Human: {{.Prompt}} 5 | 6 | Assistant: """ 7 | SYSTEM "You are a helpful AI assistant named Mistral." 8 | PARAMETER temperature 1.0 9 | PARAMETER top_k 40 10 | PARAMETER top_p 0.95 11 | PARAMETER stop "" 12 | PARAMETER stop "Human:" 13 | PARAMETER stop "Assistant:" -------------------------------------------------------------------------------- /e2e_tests/src/prelude.rs: -------------------------------------------------------------------------------- 1 | pub(crate) use { 2 | anyhow::anyhow, 3 | log::{debug, error, info, warn}, 4 | serde_json::Value as JsonValue, 5 | sui_sdk::{ 6 | types::base_types::ObjectID, 7 | wallet_context::WalletContext, 8 | SuiClient, 9 | }, 10 | }; 11 | 12 | pub(crate) type Result = std::result::Result; 13 | -------------------------------------------------------------------------------- /onchain/Move.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "talus" 3 | edition = "2024.beta" 4 | authors = ["Talus (engineering@talus.network)"] 5 | license = "Business Source License 1.1" 6 | 7 | [dependencies] 8 | Sui = { git = "https://github.com/MystenLabs/sui.git", subdir = "crates/sui-framework/packages/sui-framework", rev = "devnet-v1.27.0" } 9 | 10 | [addresses] 11 | talus = "0x0" 12 | 13 | -------------------------------------------------------------------------------- /offchain/tools/tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from fastapi import FastAPI 3 | from fastapi.testclient import TestClient 4 | 5 | from openapi_server.main import app as application 6 | 7 | 8 | @pytest.fixture 9 | def app() -> FastAPI: 10 | application.dependency_overrides = {} 11 | 12 | return application 13 | 14 | 15 | @pytest.fixture 16 | def client(app) -> TestClient: 17 | return TestClient(app) 18 | -------------------------------------------------------------------------------- /docker/.env: -------------------------------------------------------------------------------- 1 | SUI_TAG=testnet-v1.28.3 2 | LLAMA_MODEL_VERSION=llama3.2:1b 3 | LLAMA_MODEL_VERSION_TAG=llama3.2-1b 4 | RPC_URL=http://fullnode1:9000 5 | WS_URL=ws://fullnode1:9000 6 | MODEL_URL=http://ollama:11434 7 | FAUCET_URL=http://faucet:5003/gas 8 | TOOL_URL=http://tools:8080/tool/use 9 | LLM_ASSISTANT_URL=http://tools:8080/predict 10 | OLLAMA_DEVICE_DRIVER=nvidia 11 | OLLAMA_DEVICE_COUNT=all 12 | OLLAMA_DEVICE_CAPABILITIES=gpu 13 | -------------------------------------------------------------------------------- /docker/ollama/Dockerfile: -------------------------------------------------------------------------------- 1 | #syntax=docker/dockerfile:1 2 | 3 | FROM ollama/ollama:0.3.2 4 | 5 | ARG LLAMA_MODEL_VERSION=llama 6 | 7 | RUN apt-get update && apt-get install -y curl 8 | 9 | RUN nohup bash -c "ollama serve &" && \ 10 | until curl -s http://localhost:11434 | grep "Ollama is running"; do echo "Waiting for Ollama to start..."; sleep 2; done && \ 11 | ollama pull ${LLAMA_MODEL_VERSION} 12 | 13 | EXPOSE 11434 14 | 15 | CMD ["serve"] -------------------------------------------------------------------------------- /.github/workflows/python.yml: -------------------------------------------------------------------------------- 1 | # Github workflow to check python code 2 | 3 | name: Python 4 | on: 5 | push: 6 | paths: 7 | - "examples/**" 8 | - "nexus_sdk/**" 9 | - "offchain/**" 10 | jobs: 11 | # https://black.readthedocs.io/en/stable/integrations/github_actions.html 12 | formatting-check: 13 | name: Formatting Check 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: psf/black@stable 18 | -------------------------------------------------------------------------------- /onchain/sources/consts.move: -------------------------------------------------------------------------------- 1 | module talus::consts { 2 | use std::string::{String, utf8}; 3 | 4 | // === Statuses === 5 | 6 | const StatusIdle: vector = b"IDLE"; 7 | public fun status_idle(): String { utf8(StatusIdle) } 8 | 9 | const StatusRunning: vector = b"RUNNING"; 10 | public fun status_running(): String { utf8(StatusRunning) } 11 | 12 | const StatusSuccess: vector = b"SUCCESS"; 13 | public fun status_success(): String { utf8(StatusSuccess) } 14 | } 15 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | 3 | reorder_imports = true 4 | use_try_shorthand = true 5 | remove_nested_parens = true 6 | reorder_modules = true 7 | use_field_init_shorthand = true 8 | max_width = 80 9 | 10 | imports_granularity = "One" 11 | group_imports = "One" 12 | imports_layout = "HorizontalVertical" 13 | enum_discrim_align_threshold = 40 14 | hex_literal_case = "Lower" 15 | newline_style = "Unix" 16 | normalize_comments = true 17 | normalize_doc_attributes = true 18 | reorder_impl_items = true 19 | -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/models/model.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | from pydantic import BaseModel 3 | from datetime import datetime 4 | 5 | 6 | class ModelDetail(BaseModel): 7 | format: str 8 | family: str 9 | families: Optional[List[str]] = None 10 | parameter_size: str 11 | quantization_level: str 12 | 13 | 14 | class Model(BaseModel): 15 | name: str 16 | modified_at: datetime 17 | size: int 18 | digest: str 19 | details: ModelDetail 20 | 21 | 22 | class ModelsResponse(BaseModel): 23 | models: List[Model] 24 | -------------------------------------------------------------------------------- /offchain/events/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.setuptools.packages.find] 6 | where = ["src"] 7 | 8 | [project] 9 | name = "nexus_events" 10 | version = "0.1.0" 11 | description = "Nexus offchain event handling" 12 | authors = [ 13 | { name="Talus", email="hi@talus.network" } 14 | ] 15 | dependencies = [ 16 | "python-dotenv", 17 | "pysui==0.52.0", 18 | "asyncio", 19 | "aiohttp", 20 | "pathlib", 21 | "pynacl", 22 | "psutil", 23 | "unidecode" 24 | ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /nexus_sdk/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="nexus_sdk", 5 | version="0.1.2", 6 | packages=find_packages(where="src"), 7 | package_dir={"": "src"}, 8 | install_requires=["pysui==0.52.0", "setuptools"], 9 | description="Talus Nexus SDK", 10 | long_description=open("README.md").read(), 11 | long_description_content_type="text/markdown", 12 | classifiers=[ 13 | "Development Status :: 3 - Alpha", 14 | "Intended Audience :: Developers", 15 | "Programming Language :: Python :: 3.10", 16 | ], 17 | ) 18 | -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/security_api.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | from typing import List 4 | 5 | from fastapi import Depends, Security 6 | from fastapi.openapi.models import OAuthFlowImplicit, OAuthFlows 7 | from fastapi.security import ( 8 | HTTPAuthorizationCredentials, 9 | HTTPBasic, 10 | HTTPBasicCredentials, 11 | HTTPBearer, 12 | OAuth2, 13 | OAuth2AuthorizationCodeBearer, 14 | OAuth2PasswordBearer, 15 | SecurityScopes, 16 | ) 17 | from fastapi.security.api_key import APIKeyCookie, APIKeyHeader, APIKeyQuery 18 | 19 | from openapi_server.models.extra_models import TokenModel 20 | -------------------------------------------------------------------------------- /docker/nexus/Dockerfile: -------------------------------------------------------------------------------- 1 | #syntax=docker/dockerfile:1 2 | 3 | FROM python:3.10-slim AS builder 4 | 5 | ARG INSTALL_RUST=false 6 | 7 | ENV INSTALL_RUST=${INSTALL_RUST} 8 | 9 | WORKDIR /app 10 | 11 | RUN ls -lta 12 | 13 | COPY . . 14 | 15 | COPY --from=nexus bin/setup_venv.sh /usr/local/bin/setup_venv.sh 16 | 17 | RUN chmod +x /usr/local/bin/setup_venv.sh 18 | 19 | RUN /usr/local/bin/setup_venv.sh 20 | 21 | FROM python:3.10-slim AS runtime 22 | 23 | WORKDIR /app 24 | 25 | COPY --from=builder /app /app 26 | 27 | EXPOSE 8080 28 | 29 | CMD ["bash", "-c", "source .venv/bin/activate && uvicorn src.nexus_tools.server.main:app --host 0.0.0.0 --port 8080"] 30 | -------------------------------------------------------------------------------- /nexus_sdk/src/nexus_sdk/__init__.py: -------------------------------------------------------------------------------- 1 | from .node import create_node 2 | from .model import create_model 3 | from .utils import get_sui_client 4 | from .utils import get_sui_client_with_airdrop 5 | from .cluster import ( 6 | create_cluster, 7 | create_agent_for_cluster, 8 | create_task, 9 | execute_cluster, 10 | get_cluster_execution_response, 11 | ) 12 | 13 | __all__ = [ 14 | "create_agent_for_cluster", 15 | "create_cluster", 16 | "create_model", 17 | "create_node", 18 | "create_task", 19 | "execute_cluster", 20 | "get_cluster_execution_response", 21 | "get_sui_client", 22 | "get_sui_client_with_airdrop", 23 | ] 24 | -------------------------------------------------------------------------------- /docker/ollama/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | ollama: 3 | image: talusnetwork/ollama:${LLAMA_MODEL_VERSION_TAG} 4 | container_name: ollama 5 | deploy: 6 | resources: 7 | reservations: 8 | devices: 9 | - driver: ${OLLAMA_DEVICE_DRIVER} 10 | count: ${OLLAMA_DEVICE_COUNT} 11 | capabilities: ["${OLLAMA_DEVICE_CAPABILITIES}"] 12 | build: 13 | context: "." 14 | args: 15 | LLAMA_MODEL_VERSION: ${LLAMA_MODEL_VERSION} 16 | ports: 17 | - "11434:11434" 18 | restart: unless-stopped 19 | depends_on: 20 | build-suitools: 21 | condition: service_completed_successfully 22 | -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/models/agents.py: -------------------------------------------------------------------------------- 1 | # models.py 2 | from pydantic import BaseModel, Field 3 | from typing import List 4 | 5 | 6 | class ToolModel(BaseModel): 7 | name: str 8 | description: str 9 | 10 | 11 | class AgentModel(BaseModel): 12 | role: str 13 | goal: str 14 | backstory: str 15 | tools: List[ToolModel] 16 | 17 | 18 | class TaskModel(BaseModel): 19 | description: str 20 | expected_output: str 21 | agent_role: str 22 | 23 | 24 | class CreateAgentRequest(BaseModel): 25 | company_description: str 26 | company_domain: str 27 | hiring_needs: str 28 | specific_benefits: str 29 | agents: List[AgentModel] 30 | tasks: List[TaskModel] 31 | -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/models/error.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | from __future__ import annotations 4 | from typing import Optional 5 | from pydantic import BaseModel, Field 6 | 7 | 8 | class Error(BaseModel): 9 | """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). 10 | 11 | Do not edit the class manually. 12 | 13 | Error - a model defined in OpenAPI 14 | 15 | message: The message of this Error [Optional]. 16 | code: The code of this Error [Optional]. 17 | """ 18 | 19 | message: Optional[str] = Field(alias="message", default=None) 20 | code: Optional[int] = Field(alias="code", default=None) 21 | 22 | 23 | Error.update_forward_refs() 24 | -------------------------------------------------------------------------------- /docker/nexus/bin/setup_venv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "$INSTALL_RUST" = "true" ]; then 4 | apt-get update 5 | apt-get install -y --no-install-recommends curl build-essential 6 | curl https://sh.rustup.rs -sSf | sh -s -- -y 7 | . $HOME/.cargo/env 8 | rustup update 9 | rustup default stable 10 | fi 11 | 12 | pip install uv 13 | uv venv -p "$PYTHON_VERSION" 14 | export OSTYPE=${OSTYPE:-linux-gnu} 15 | . .venv/bin/activate 16 | 17 | if [ "$INSTALL_RUST" = "true" ]; then 18 | . $HOME/.cargo/env 19 | fi 20 | 21 | if [ -f "pyproject.toml" ]; then 22 | uv pip install . 23 | else 24 | for dir in */; do 25 | if [ -f "$dir/pyproject.toml" ]; then 26 | uv pip install "$dir" 27 | fi 28 | done 29 | fi 30 | -------------------------------------------------------------------------------- /nexus_sdk/src/nexus_sdk/node.py: -------------------------------------------------------------------------------- 1 | from pysui.sui.sui_txn.sync_transaction import SuiTransaction 2 | from pysui.sui.sui_types.scalars import SuiU64 3 | 4 | 5 | # Creates a new node owned object. 6 | # Returns the node ID. 7 | def create_node(client, package_id, name, node_type, gpu_memory): 8 | txn = SuiTransaction(client=client) 9 | 10 | result = txn.move_call( 11 | target=f"{package_id}::node::create", 12 | arguments=[name, node_type, SuiU64(gpu_memory), "c", []], 13 | ) 14 | result = txn.execute(gas_budget=10000000) 15 | 16 | if result.is_ok() or result._data.succeeded: 17 | node_id = result._data.effects.created[0].reference.object_id 18 | return node_id 19 | else: 20 | print(f"Failed to create node: {result.result_string}") 21 | return None 22 | -------------------------------------------------------------------------------- /.github/actions/fetch-sui-cli/action.yml: -------------------------------------------------------------------------------- 1 | name: "Setup Sui CLI" 2 | description: "Downloads and sets up the Sui CLI" 3 | inputs: 4 | sui_ref: 5 | description: "Sui version to download from the Sui's Github release page" 6 | required: true 7 | runs: 8 | using: "composite" 9 | steps: 10 | - run: wget "https://github.com/MystenLabs/sui/releases/download/${{ inputs.sui_ref }}/sui-${{ inputs.sui_ref }}-ubuntu-x86_64.tgz" 11 | shell: bash 12 | - run: tar -xvf "sui-${{ inputs.sui_ref }}-ubuntu-x86_64.tgz" 13 | shell: bash 14 | - run: mkdir -p /home/runner/.local/bin 15 | shell: bash 16 | - run: mv sui /home/runner/.local/bin/sui 17 | shell: bash 18 | - run: sudo chmod +x /home/runner/.local/bin/sui 19 | shell: bash 20 | - run: sui --version 21 | shell: bash 22 | -------------------------------------------------------------------------------- /TROUBLESHOOTING.md: -------------------------------------------------------------------------------- 1 | # "Object is not available for consumption" 2 | 3 | You may encounter this error when working with Sui localnet. 4 | In a nutshell, this means that some previous operation using this object, usually a `Coin`, has not finished correctly. 5 | Typically this happens when you turn off the localnet and start it again. 6 | 7 | To fix it, you can regenerate the localnet: 8 | 9 | ```bash 10 | localnet regen 11 | ``` 12 | 13 | # "This query type is not supported by the full node" 14 | 15 | Most likely you are using very different version of localnet with Suibase. 16 | You need to pin the version in your Suibase.yml file. 17 | See the command `just suibase-setup`. 18 | 19 | # "Server returned an error status code: 500" 20 | 21 | Try regenerating the localnet: 22 | 23 | ```bash 24 | localnet regen 25 | ``` 26 | -------------------------------------------------------------------------------- /docker/sui/genesis/static/fullnode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | db-path: /opt/sui/db 3 | network-address: /ip4/0.0.0.0/tcp/8080/http 4 | json-rpc-address: "0.0.0.0:9000" 5 | metrics-address: "0.0.0.0:9184" 6 | admin-interface-port: 1337 7 | enable-event-processing: true 8 | grpc-load-shed: ~ 9 | grpc-concurrency-limit: ~ 10 | p2p-config: 11 | listen-address: "0.0.0.0:8084" 12 | genesis: 13 | genesis-file-location: /opt/sui/config/genesis.blob 14 | authority-store-pruning-config: 15 | num-latest-epoch-dbs-to-retain: 3 16 | epoch-db-pruning-period-secs: 3600 17 | num-epochs-to-retain: 18446744073709551615 18 | max-checkpoints-in-batch: 5 19 | max-transactions-in-batch: 1000 20 | use-range-deletion: true 21 | pruning-run-delay-seconds: 60 22 | state-debug-dump-config: 23 | dump-file-directory: /opt/sui/state_debug_dump 24 | -------------------------------------------------------------------------------- /docker/sui/genesis/overlays/common.yaml: -------------------------------------------------------------------------------- 1 | validator: 2 | network-address: /ip4/0.0.0.0/tcp/8080/http 3 | metrics-address: 0.0.0.0:9184 4 | json-rpc-address: 0.0.0.0:9000 5 | admin-interface-port: 1337 6 | genesis: 7 | genesis-file-location: /opt/sui/config/genesis.blob 8 | db-path: /opt/sui/db/authorities_db 9 | consensus-config: 10 | db-path: /opt/sui/db/consensus_db 11 | narwhal-config: 12 | max_header_delay: 2000ms 13 | expensive-safety-check-config: 14 | enable-epoch-sui-conservation-check: false 15 | enable-deep-per-tx-sui-conservation-check: false 16 | force-disable-epoch-sui-conservation-check: false 17 | enable-state-consistency-check: false 18 | force-disable-state-consistency-check: false 19 | enable-move-vm-paranoid-checks: false 20 | epoch_duration_ms: 120000 21 | -------------------------------------------------------------------------------- /e2e_tests/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "e2e_tests" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [[bin]] 7 | name = "e2e_tests_bin" 8 | path = "src/main.rs" 9 | 10 | [dependencies] 11 | anyhow = "1.0" 12 | axum = { version = "0.7", default-features = false, features = [ 13 | "tokio", 14 | "http2", 15 | ] } 16 | dotenvy = "0.15" 17 | env_logger = "0.11" 18 | futures-util = "0.3" 19 | log = "0.4" 20 | rand = "0.8" 21 | reqwest = { version = "0.12", features = ["json"] } 22 | serde = { version = "1.0", features = ["derive"] } 23 | serde_json = "1.0" 24 | tokio = { version = "1", features = ["full"] } 25 | 26 | [dependencies.sui_sdk] 27 | git = "https://github.com/mystenlabs/sui" 28 | tag = "testnet-v1.26.1" 29 | package = "sui-sdk" 30 | 31 | [dependencies.sui_keys] 32 | git = "https://github.com/mystenlabs/sui" 33 | tag = "testnet-v1.26.1" 34 | package = "sui-keys" 35 | -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/models/completion.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | from __future__ import annotations 4 | from datetime import datetime 5 | from typing import List, Optional 6 | from pydantic import BaseModel, Field 7 | 8 | 9 | class ToolCall(BaseModel): 10 | name: str 11 | args: dict 12 | 13 | 14 | class Completion(BaseModel): 15 | """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). 16 | 17 | Do not edit the class manually. 18 | 19 | Completion - a model defined in OpenAPI 20 | 21 | completion: The completion of this Completion. 22 | timestamp: The timestamp of this Completion. 23 | """ 24 | 25 | completion: str = Field(alias="completion") 26 | timestamp: datetime = Field(alias="timestamp") 27 | tool_calls: Optional[List[ToolCall]] = None 28 | 29 | 30 | Completion.update_forward_refs() 31 | -------------------------------------------------------------------------------- /offchain/tools/tests/test_default_api.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | from fastapi.testclient import TestClient 4 | 5 | 6 | from openapi_server.models.completion import Completion 7 | from openapi_server.models.error import Error 8 | from openapi_server.models.prompt import Prompt 9 | 10 | 11 | def test_predict_post(client: TestClient): 12 | """Test case for predict_post 13 | 14 | Get a completion response from the AI model based on the provided prompt and parameters. 15 | """ 16 | prompt = { 17 | "max_tokens": 1024, 18 | "temperature": 1.0, 19 | "model": "llama2-code", 20 | "text": "What is the capital of France?", 21 | } 22 | 23 | headers = {} 24 | response = client.request( 25 | "POST", 26 | "/predict", 27 | headers=headers, 28 | json=prompt, 29 | ) 30 | 31 | # uncomment below to assert the status code of the HTTP response 32 | # assert response.status_code == 200 33 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | ## Infrastructure description 4 | 5 | Local infra consists of following services 6 | 7 | - sui 8 | - 4 validators 9 | - faucet 10 | - fullnode 11 | - nexus 12 | - events 13 | - tools 14 | - examples 15 | - ollama (only on windows and linux) 16 | 17 | There are also a few startup services 18 | 19 | - sui 20 | - build-suitools 21 | - builds a sui image to a tag specified in .env 22 | - build-genesis 23 | - runs generate.py to generate new sui genesis.blob and validator.yaml 24 | - publish-package 25 | - builds and publishes the nexus smart contracts from ./onchain directory 26 | - bootstrap-model 27 | - bootstraps a Llama model on the Sui blockchain by creating a node and the model using nexus_sdk, then saves their details for future use. 28 | 29 | ## Troubleshooting 30 | 31 | If you encounter trouble building the `build-genesis` image, try switching the context to default. 32 | 33 | `docker context use default` 34 | -------------------------------------------------------------------------------- /docker/sui/Dockerfile: -------------------------------------------------------------------------------- 1 | #syntax=docker/dockerfile:1 2 | 3 | FROM rust:1.81-bullseye AS builder 4 | 5 | ARG PROFILE=release 6 | ARG GIT_REVISION 7 | ARG SUI_TAG=testnet-v1.29.1 8 | 9 | ENV GIT_REVISION=$GIT_REVISION 10 | 11 | RUN apt-get update && apt-get install -y git cmake clang libpq5 libpq-dev 12 | 13 | WORKDIR /sui 14 | 15 | RUN git clone --depth 1 --branch ${SUI_TAG} https://github.com/MystenLabs/sui.git . 16 | 17 | RUN cargo build --profile ${PROFILE} \ 18 | --bin sui-node \ 19 | --bin sui \ 20 | --bin sui-faucet 21 | 22 | FROM debian:bullseye-slim AS runtime 23 | 24 | WORKDIR /sui 25 | 26 | RUN apt-get update && apt-get install -y libpq5 libpq-dev ca-certificates git jq curl 27 | 28 | COPY --from=builder /sui/target/release/sui-node /usr/local/bin 29 | COPY --from=builder /sui/target/release/sui /usr/local/bin 30 | COPY --from=builder /sui/target/release/sui-faucet /usr/local/bin 31 | 32 | ARG BUILD_DATE 33 | ARG GIT_REVISION 34 | LABEL build-date=$BUILD_DATE 35 | LABEL git-revision=$GIT_REVISION 36 | -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/controllers/inference.py: -------------------------------------------------------------------------------- 1 | import os 2 | import ollama 3 | from ollama import Client 4 | 5 | 6 | class Inference: 7 | @staticmethod 8 | def prompt(prompt, model, max_tokens=1000, temperature=1.0): 9 | # Fetch the URL from environment variable, defaulting to localhost if not provided 10 | ollama_host = os.getenv("OLLAMA_HOST", "http://localhost:11434") 11 | 12 | # Create a custom client with the specified host 13 | client = Client(host=ollama_host) 14 | 15 | # Set up options for the request 16 | options = {"temperature": temperature, "num_predict": max_tokens} 17 | 18 | # Make the request using the client 19 | response = client.chat( 20 | model=model, 21 | options=options, 22 | messages=[ 23 | { 24 | "role": "user", 25 | "content": prompt, 26 | }, 27 | ], 28 | ) 29 | 30 | return response 31 | -------------------------------------------------------------------------------- /offchain/events/README.md: -------------------------------------------------------------------------------- 1 | # Events 2 | 3 | This package contains the offchain event listener (`sui_event.py`), which receives `RequestForCompletionEvent` events 4 | emitted by agents executing onchain. It then calls their required tools and passes those results with the defined prompt 5 | to inference of specified models. 6 | 7 | To see available models/tools and define new ones, see the [`tools` README.md][tools_readme]. 8 | 9 | ## How to run this 10 | 11 | When you start this service it expects the following variables that can be set either as environment variables or with flags: 12 | 13 | - `--packageid` (env `PACKAGE_ID`) (required): Package ID to filter events 14 | - `--privkey` (env `SUI_PRIVATE_KEY`) (required): Sui private key 15 | - `--modelownercapid` (env `MODEL_OWNER_CAP_ID`) (required): Model owner capability object ID to submit completions 16 | - `--rpc` (default: `http://localhost:9000`): RPC URL 17 | - `--ws` (default: `ws://localhost:9000`): WebSocket URL 18 | 19 | 20 | 21 | [tools_readme]: ../tools/README.md 22 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | set shell := [ "python3", "-c"] 2 | 3 | [private] 4 | default: version-check 5 | @__import__('os').system("just -l") 6 | 7 | [private] 8 | version-check: 9 | @import sys; major, minor = sys.version_info[:2]; \ 10 | assert (major, minor) >= (3, 7), "This script requires at least Python 3.7. Please link \"python3\" to Python 3.7 or higher and try again." 11 | 12 | # Commands for running examples 13 | mod example 'examples/example.just' 14 | 15 | # Build, Start, Stop, or Clean Up docker containers 16 | mod containers 'docker/containers.just' 17 | 18 | # Builds and starts the entire environment 19 | infra-up: version-check 20 | @print("Building and starting the entire environment..."); __import__('os').system("just containers build"); __import__('os').system("just containers start") 21 | 22 | # Shuts down and cleans up the environment 23 | infra-down: version-check 24 | @print("Stopping and cleaning up the entire environment..."); __import__('os').system("just containers stop"); __import__('os').system("just containers clean") 25 | -------------------------------------------------------------------------------- /docker/sui/genesis/Dockerfile: -------------------------------------------------------------------------------- 1 | # Stage 1: Build sui tools and dependencies 2 | ARG SUI_TAG=testnet-v1.29.1 3 | FROM talusnetwork/sui-tools:${SUI_TAG} AS sui-builder 4 | 5 | FROM python:3.9-slim-bullseye 6 | 7 | ENV PYTHONUNBUFFERED=1 8 | ENV TARGET_DIR=/opt/sui/genesis/files 9 | 10 | RUN apt-get update && apt-get install -y libpq5 libpq-dev ca-certificates libc6 11 | 12 | WORKDIR /opt/sui/genesis 13 | 14 | # Copy necessary sui binary and libraries from the sui-builder stage 15 | COPY --from=sui-builder /usr/local/bin/sui /usr/local/bin/sui 16 | 17 | # Copy the necessary files for genesis creation 18 | COPY overlays overlays 19 | COPY static static 20 | COPY compose-validators.yaml compose-validators.yaml 21 | COPY requirements.txt . 22 | COPY generate.py . 23 | 24 | # Install Python dependencies for genesis preparation 25 | RUN python3 -m pip install -r requirements.txt 26 | 27 | # Set the entrypoint to run the generation script with the target directory 28 | CMD ["/bin/bash", "-c", "python3 generate.py --genesis-template compose-validators.yaml --target-directory ${TARGET_DIR}"] 29 | -------------------------------------------------------------------------------- /onchain/sources/tool.move: -------------------------------------------------------------------------------- 1 | module talus::tool { 2 | //! A tool is utility or resource that an agent can use to complete tasks. 3 | //! A tool is stored optionally on a [talus::task::Task] and if provided, 4 | //! the agent will use the result of the tool to submit a response. 5 | //! A tool can have side-effects. 6 | //! 7 | //! An example of a tool would be wiki search or a smart contract invocation. 8 | 9 | use std::string::String; 10 | 11 | // === Data models === 12 | 13 | /// Tool name serves as an identifier for a tool. 14 | public struct Tool has store, copy, drop { 15 | name: String, 16 | /// At the moment tool can be parametrized only up front when creating a 17 | /// cluster. 18 | args: vector, 19 | } 20 | 21 | // === Constructors === 22 | 23 | public fun new(name: String, args: vector): Tool { 24 | Tool { name, args } 25 | } 26 | 27 | // === Accessors === 28 | 29 | public fun get_name(self: &Tool): String { self.name } 30 | public fun get_args(self: &Tool): vector { self.args } 31 | } 32 | -------------------------------------------------------------------------------- /docker/sui/bin/publish_package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Step 0: Check ENV for SHARED_DIR and build PACKAGE_ID_FILE location 4 | SHARED_DIR="${SHARED_DIR:-/shared}" 5 | PACKAGE_ID_FILE="$SHARED_DIR/package_id.json" 6 | 7 | if [ ! -f "$PACKAGE_ID_FILE" ]; then 8 | echo "Package ID file not found. Proceeding to publish package." 9 | 10 | # Step 1: Navigate to /opt/sui/onchain 11 | cd /opt/sui/onchain || { echo "Failed to navigate to /opt/sui/onchain"; exit 1; } 12 | 13 | # Step 2: Run the sui client publish command and extract the package_id 14 | PACKAGE_ID=$(sui client publish --skip-dependency-verification --json | jq -r '.objectChanges[] | select(.type == "published") | .packageId') 15 | 16 | # Step 3: Save the package_id to the specified PACKAGE_ID_FILE 17 | if [ "$PACKAGE_ID" != "null" ] && [ -n "$PACKAGE_ID" ]; then 18 | echo "[\"$PACKAGE_ID\"]" > "$PACKAGE_ID_FILE" 19 | echo "Package ID saved to $PACKAGE_ID_FILE" 20 | else 21 | echo "Failed to extract a valid package ID" 22 | exit 1 23 | fi 24 | else 25 | echo "Package ID file already exists. No action taken." 26 | fi 27 | -------------------------------------------------------------------------------- /onchain/sources/tests/node_tests.move: -------------------------------------------------------------------------------- 1 | #[test_only] 2 | module talus::node_tests { 3 | use sui::test_scenario; 4 | use talus::node::{Self, Node}; 5 | use std::string; 6 | 7 | #[test] 8 | fun test_create_node() { 9 | let mut scenario = test_scenario::begin(@0x1); 10 | let ctx = test_scenario::ctx(&mut scenario); 11 | 12 | // Create a node 13 | node::create( 14 | string::utf8(b"Test Node"), 15 | string::utf8(b"GPU"), 16 | 16, 17 | vector::empty(), 18 | vector::empty(), 19 | ctx 20 | ); 21 | 22 | // Move to the next transaction 23 | test_scenario::next_tx(&mut scenario, @0x1); 24 | 25 | // Check if the node was created and owned 26 | assert!(test_scenario::has_most_recent_for_sender(&scenario), 0); 27 | 28 | // Get the created node 29 | let node = test_scenario::take_from_sender(&scenario); 30 | 31 | // Return the node to the scenario 32 | test_scenario::return_to_sender(&scenario, node); 33 | 34 | test_scenario::end(scenario); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /docker/containers.just: -------------------------------------------------------------------------------- 1 | set shell := ["python3", "-c"] 2 | 3 | [private] 4 | default: 5 | @__import__('os').system("just -l containers") 6 | 7 | [private] 8 | [no-cd] 9 | check: 10 | @import os, sys; from subprocess import call; result = call("docker ps | grep -q 'examples'", shell=True); \ 11 | print("Docker environment is already running.") if result == 0 else (print("Docker environment is not running. Starting environment...") or os.system("just containers start")) 12 | 13 | # Builds the Docker containers using Docker Compose 14 | [no-cd] 15 | build: 16 | @print("Building Docker containers..."); __import__('os').system("python3 ./docker/nexusctl.py create") 17 | 18 | # Starts the Docker containers using Docker Compose 19 | [no-cd] 20 | start: 21 | @print("Starting Docker containers..."); __import__('os').system("python3 ./docker/nexusctl.py start") 22 | 23 | # Stops the Docker containers using Docker Compose 24 | [no-cd] 25 | stop: 26 | @print("Stopping Docker containers..."); __import__('os').system("python3 ./docker/nexusctl.py stop") 27 | 28 | # Deletes all Docker volumes related to the project using Docker Compose 29 | [no-cd] 30 | clean: 31 | @print("Deleting Docker volumes..."); __import__('os').system("python3 ./docker/nexusctl.py delete") 32 | -------------------------------------------------------------------------------- /examples/example.just: -------------------------------------------------------------------------------- 1 | set shell := [ "python3", "-c"] 2 | 3 | [private] 4 | default: version-check 5 | @__import__('os').system("just -l example") 6 | 7 | [private] 8 | version-check: 9 | @import sys; major, minor = sys.version_info[:2]; \ 10 | assert (major, minor) >= (3, 7), "This script requires at least Python 3.7. Please link \"python3\" to Python 3.7 or higher and try again." 11 | 12 | 13 | # Runs an example that prompts the user for a description of their post. 14 | [no-cd] 15 | ig-post-planner: 16 | @__import__('os').system("just containers check") 17 | @__import__('os').system("docker exec -it examples /bin/bash -c \"source .venv/bin/activate && python examples/main.py ig_post_planner\"") 18 | 19 | # Runs an example that prompts the user for description of their trip. 20 | [no-cd] 21 | trip-planner: 22 | @__import__('os').system("just containers check") 23 | @__import__('os').system("docker exec -it examples /bin/bash -c \"source .venv/bin/activate && python examples/main.py trip_planner\"") 24 | 25 | # Runs an example that prompts the user for description of their cluster. 26 | [no-cd] 27 | cli-cluster: 28 | @__import__('os').system("just containers check") 29 | @__import__('os').system("docker exec -it examples /bin/bash -c \"source .venv/bin/activate && python examples/main.py cli_cluster\"") 30 | -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/models/prompt.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | from __future__ import annotations 4 | 5 | from pydantic import BaseModel, Field, validator 6 | from typing import List, Optional 7 | 8 | 9 | class Prompt(BaseModel): 10 | """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). 11 | 12 | Do not edit the class manually. 13 | 14 | Prompt - a model defined in OpenAPI 15 | 16 | prompt: The text of this Prompt. 17 | model: The model of this Prompt. 18 | max_tokens: The max_tokens of this Prompt. 19 | temperature: The temperature of this Prompt. 20 | """ 21 | 22 | prompt: str = Field(alias="prompt") 23 | model: str = Field(alias="model", default="llama3.2:1b") 24 | max_tokens: int = Field(alias="max_tokens", default=1000) 25 | temperature: float = Field(alias="temperature", default=1.0) 26 | tools: Optional[List[str]] = Field( 27 | default=None, description="List of tool names to be used" 28 | ) 29 | 30 | @validator("temperature") 31 | def temperature_max(cls, value): 32 | assert value <= 1 33 | assert value >= 0 34 | return value 35 | 36 | @validator("temperature") 37 | def temperature_min(cls, value): 38 | assert value >= 0 39 | return value 40 | 41 | 42 | Prompt.update_forward_refs() 43 | -------------------------------------------------------------------------------- /offchain/tools/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.setuptools.packages.find] 6 | where = ["src"] 7 | 8 | [project] 9 | name = "nexus_tools" 10 | version = "0.1.0" 11 | description = "Nexus offchain tool execution" 12 | authors = [ 13 | { name="Talus", email="hi@talus.network" } 14 | ] 15 | dependencies = [ 16 | "aiofiles", 17 | "aniso8601", 18 | "async-exit-stack", 19 | "async-generator", 20 | "certifi", 21 | "chardet", 22 | "click", 23 | "dnspython", 24 | "email-validator", 25 | "fastapi", 26 | "graphene", 27 | "graphql-core", 28 | "graphql-relay", 29 | "h11", 30 | "httptools", 31 | "httpx", 32 | "itsdangerous", 33 | "Jinja2", 34 | "MarkupSafe", 35 | "promise", 36 | "pydantic", 37 | "python-dotenv", 38 | "python-multipart", 39 | "requests", 40 | "Rx", 41 | "starlette", 42 | "typing-extensions", 43 | "ujson", 44 | "urllib3", 45 | "uvicorn", 46 | "uvloop", 47 | "watchgod", 48 | "websockets", 49 | "ollama", 50 | "crewai", 51 | "crewai-tools", 52 | "pytest", 53 | "langchain_community", 54 | "langchain_openai", 55 | "duckduckgo-search", 56 | "langchain", 57 | "wikipedia", 58 | "langchain-experimental", 59 | "arxiv", 60 | "xmltodict", 61 | "wolframalpha", 62 | "langchain_experimental", 63 | "google-generativeai", 64 | "unstructured" 65 | ] 66 | 67 | 68 | -------------------------------------------------------------------------------- /offchain/events/src/nexus_events/offchain.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import os 3 | from dotenv import load_dotenv 4 | 5 | load_dotenv() 6 | 7 | LLM_ASSISTANT_URL = os.getenv("LLM_ASSISTANT_URL", "http://localhost:8080/predict") 8 | 9 | 10 | class OffChain: 11 | def process( 12 | self, prompt: str, model_name: str, max_tokens: int, temperature: float 13 | ) -> str: 14 | url = LLM_ASSISTANT_URL 15 | headers = {"Content-Type": "application/json"} 16 | prompt_data = { 17 | "prompt": prompt, 18 | "model": model_name, 19 | "max_tokens": int(max_tokens), 20 | "temperature": temperature, 21 | } 22 | 23 | try: 24 | 25 | response = requests.post(url, headers=headers, json=prompt_data) 26 | response.raise_for_status() 27 | result = response.json() 28 | 29 | completion = result["completion"] 30 | return completion 31 | except requests.exceptions.RequestException as e: 32 | msg = f"Error occurred while calling the API: {e}" 33 | if hasattr(e, "response") and e.response is not None: 34 | msg += f"\nResponse content: {e.response.text}" 35 | print(msg) 36 | raise Exception(status_code=500, detail=msg) 37 | 38 | 39 | def main(): 40 | 41 | off_chain = OffChain() 42 | prompt = "Write python script that prints the numbers 1 to 100" 43 | model_name = "tinyllama" 44 | max_tokens = 3000 45 | temperature = 0.3 46 | 47 | completion = off_chain.process(prompt, model_name, max_tokens, temperature) 48 | print(completion) 49 | 50 | 51 | if __name__ == "__main__": 52 | main() 53 | -------------------------------------------------------------------------------- /offchain/tools/tests/test_ollama.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import ollama 3 | 4 | 5 | class Inference: 6 | @staticmethod 7 | def prompt(prompt, model, max_tokens=1000, temperature=1.0): 8 | options = {"temperature": temperature, "num_predict": max_tokens} 9 | 10 | response = ollama.chat( 11 | model=model, 12 | options=options, 13 | messages=[ 14 | { 15 | "role": "user", 16 | "content": prompt, 17 | }, 18 | ], 19 | ) 20 | 21 | return response 22 | 23 | 24 | def main(): 25 | parser = argparse.ArgumentParser(description="Test Ollama chat with Mistral model") 26 | parser.add_argument("prompt", help="The prompt to send to the model") 27 | parser.add_argument( 28 | "--max-tokens", 29 | type=int, 30 | default=1000, 31 | help="Maximum number of tokens to generate", 32 | ) 33 | parser.add_argument( 34 | "--temperature", type=float, default=1.0, help="Temperature for text generation" 35 | ) 36 | 37 | args = parser.parse_args() 38 | 39 | model = "mistral-latest" 40 | 41 | print(f"Testing Ollama chat with model: {model}") 42 | print(f"Prompt: {args.prompt}") 43 | print(f"Max tokens: {args.max_tokens}") 44 | print(f"Temperature: {args.temperature}") 45 | print("\nGenerating response...\n") 46 | 47 | try: 48 | response = Inference.prompt( 49 | args.prompt, model, args.max_tokens, args.temperature 50 | ) 51 | print("Response:") 52 | print(response["message"]["content"]) 53 | except Exception as e: 54 | print(f"An error occurred: {e}") 55 | 56 | 57 | if __name__ == "__main__": 58 | main() 59 | -------------------------------------------------------------------------------- /onchain/sources/node.move: -------------------------------------------------------------------------------- 1 | module talus::node { 2 | //! A [`Node`] is a computational unit that can run one or more models. 3 | //! 4 | //! From a node we create models. 5 | //! From a model we create agents. 6 | //! From agents we create a clusters. 7 | 8 | use std::string::String; 9 | use sui::event; 10 | use sui::transfer::transfer; 11 | 12 | // === Data models === 13 | 14 | /// Meant as an owned object. 15 | /// By having ownership of this object you can create new models that are 16 | /// bound to this node. 17 | /// 18 | /// TODO: In future this should have the same ownership pattern as models 19 | /// and agents. 20 | public struct Node has key, store { 21 | id: UID, 22 | name: String, 23 | node_type: String, 24 | gpu_memory: u64, 25 | image_hash: vector, 26 | external_arguments: vector, 27 | } 28 | 29 | // === Events === 30 | 31 | public struct NodeCreatedEvent has copy, drop { 32 | node: ID, 33 | name: String, 34 | } 35 | 36 | // === Constructors === 37 | 38 | public entry fun create( 39 | name: String, 40 | node_type: String, 41 | gpu_memory: u64, 42 | image_hash: vector, 43 | external_arguments: vector, 44 | ctx: &mut TxContext, 45 | ) { 46 | let node = Node { 47 | id: object::new(ctx), 48 | name, 49 | node_type, 50 | gpu_memory, 51 | image_hash, 52 | external_arguments, 53 | }; 54 | 55 | event::emit(NodeCreatedEvent { 56 | node: object::id(&node), 57 | name: node.name, 58 | }); 59 | 60 | transfer(node, ctx.sender()); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /nexus_sdk/src/nexus_sdk/model.py: -------------------------------------------------------------------------------- 1 | from pysui.sui.sui_txn.sync_transaction import SuiTransaction 2 | from pysui.sui.sui_types.scalars import ObjectID, SuiU64, SuiU8, SuiString, SuiBoolean 3 | from pysui.sui.sui_types.collections import SuiArray 4 | import ast 5 | 6 | 7 | # Creates a new on-chain model object. 8 | # Returns the model ID and the model owner capability ID. 9 | def create_model( 10 | client, 11 | package_id, 12 | node_id, 13 | name, 14 | model_hash, 15 | url, 16 | token_price, 17 | capacity, 18 | num_params, 19 | description, 20 | max_context_length, 21 | is_fine_tuned, 22 | family, 23 | vendor, 24 | is_open_source, 25 | datasets, 26 | ): 27 | txn = SuiTransaction(client=client) 28 | 29 | args = [ 30 | ObjectID(node_id), 31 | SuiString(name), 32 | SuiArray([SuiU8(b) for b in model_hash]), 33 | SuiString(url), 34 | SuiU64(token_price), 35 | SuiU64(capacity), 36 | SuiU64(num_params), 37 | SuiString(description), 38 | SuiU64(max_context_length), 39 | SuiBoolean(is_fine_tuned), 40 | SuiString(family), 41 | SuiString(vendor), 42 | SuiBoolean(is_open_source), 43 | SuiArray([SuiString(dataset) for dataset in datasets]), 44 | ] 45 | 46 | result = txn.move_call( 47 | target=f"{package_id}::model::create", 48 | arguments=args, 49 | ) 50 | result = txn.execute(gas_budget=10000000) 51 | 52 | if result.is_ok(): 53 | effects = result.result_data.effects 54 | if effects.status.status == "success": 55 | # just because it says "parsed_json" doesn't mean it's actually valid JSON apparently 56 | not_json = result.result_data.events[0].parsed_json 57 | created_event = ast.literal_eval(not_json.replace("\n", "\\n")) 58 | 59 | model_id = created_event["model"] 60 | model_owner_cap_id = created_event["owner_cap"] 61 | return model_id, model_owner_cap_id 62 | 63 | return None 64 | -------------------------------------------------------------------------------- /docker/sui/genesis/compose-validators.yaml: -------------------------------------------------------------------------------- 1 | accounts: 2 | - address: '0xd59d79516a4ed5b6825e80826c075a12bdd2759aaeb901df2f427f5f880c8f60' 3 | gas_amounts: 4 | - 750000000000000000 5 | - 750000000000000000 6 | - address: '0x160ef6ce4f395208a12119c5011bf8d8ceb760e3159307c819bd0197d154d384' 7 | gas_amounts: 8 | - 20000000000000000 9 | - 20000000000000000 10 | - 20000000000000000 11 | - 20000000000000000 12 | - 20000000000000000 13 | parameters: 14 | allow_insertion_of_extra_objects: false 15 | epoch_duration_ms: 120000 16 | validator_config_info: 17 | - commission_rate: 0 18 | consensus_address: /ip4/127.0.0.1/tcp/8083/http 19 | gas_price: 1000 20 | name: validator1 21 | narwhal_primary_address: /dns/validator1/udp/8081 22 | narwhal_worker_address: /dns/validator1/udp/8082 23 | network_address: /dns/validator1/tcp/8080/http 24 | p2p_address: /dns/validator1/udp/8084 25 | stake: 20000000000000000 26 | genesis: 27 | genesis-file-location: /opt/sui/genesis.blob 28 | - commission_rate: 0 29 | consensus_address: /ip4/127.0.0.1/tcp/8083/http 30 | gas_price: 1000 31 | name: validator2 32 | narwhal_primary_address: /dns/validator2/udp/8081 33 | narwhal_worker_address: /dns/validator2/udp/8082 34 | network_address: /dns/validator2/tcp/8080/http 35 | p2p_address: /dns/validator2/udp/8084 36 | stake: 20000000000000000 37 | - commission_rate: 0 38 | consensus_address: /ip4/127.0.0.1/tcp/8083/http 39 | gas_price: 1000 40 | name: validator3 41 | narwhal_primary_address: /dns/validator3/udp/8081 42 | narwhal_worker_address: /dns/validator3/udp/8082 43 | network_address: /dns/validator3/tcp/8080/http 44 | p2p_address: /dns/validator3/udp/8084 45 | stake: 20000000000000000 46 | - commission_rate: 0 47 | consensus_address: /ip4/127.0.0.1/tcp/8083/http 48 | gas_price: 1000 49 | name: validator4 50 | narwhal_primary_address: /dns/validator4/udp/8081 51 | narwhal_worker_address: /dns/validator4/udp/8082 52 | network_address: /dns/validator4/tcp/8080/http 53 | p2p_address: /dns/validator4/udp/8084 54 | stake: 20000000000000000 55 | -------------------------------------------------------------------------------- /nexus_sdk/src/nexus_sdk/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | from pysui import SuiConfig 4 | from pysui.sui.sui_clients.sync_client import SuiClient 5 | from pysui.abstracts.client_keypair import SignatureScheme 6 | 7 | 8 | # Returns Sui client with the given private key. 9 | def get_sui_client( 10 | private_key, 11 | rpc_url="http://localhost:9000", 12 | ws_url="ws://localhost:9000", 13 | ): 14 | return SuiClient( 15 | SuiConfig.user_config( 16 | rpc_url=rpc_url, 17 | ws_url=ws_url, 18 | prv_keys=[private_key], 19 | ) 20 | ) 21 | 22 | 23 | # Utility function to create a Sui client with airdrop (faucet) 24 | def get_sui_client_with_airdrop( 25 | rpc_url="http://localhost:9000", 26 | ws_url="ws://localhost:9000", 27 | faucet_url="http://localhost:5003/gas", 28 | keystore_path=Path("./sui.keystore"), 29 | ): 30 | 31 | if not keystore_path.exists(): 32 | keystore_path.parent.mkdir(parents=True, exist_ok=True) 33 | keystore_path.touch() 34 | sui_config = SuiConfig.user_config(rpc_url=rpc_url, ws_url=ws_url) 35 | 36 | _, address = sui_config.create_new_keypair_and_address( 37 | scheme=SignatureScheme.ED25519 38 | ) 39 | 40 | sui_config._faucet_url = faucet_url 41 | 42 | client = SuiClient(sui_config) 43 | 44 | result = client.get_gas_from_faucet() 45 | if not result: 46 | raise Exception("Failed to get gas from faucet") 47 | 48 | sui_config._write_keypairs(keystore_path) 49 | print(f"New wallet created and funded. Address: {address}") 50 | return client 51 | else: 52 | with open(keystore_path, "r") as f: 53 | keys = json.load(f) 54 | if not keys: 55 | raise ValueError( 56 | "Sui keystore file is empty. Please check your Sui configuration." 57 | ) 58 | private_key = keys[0] # Assuming the first key is used 59 | return get_sui_client(private_key, rpc_url=rpc_url, ws_url=ws_url) 60 | -------------------------------------------------------------------------------- /e2e_tests/README.md: -------------------------------------------------------------------------------- 1 | This binary setups dummy node, model, agent and cluster. 2 | 3 | Then it creates a test prompt (and therefore cluster execution) and calls ollama (or a mocked API) to generate completions for submitted prompts. 4 | 5 | The purpose of this code is to test Nexus with specific scenarios. 6 | Currently we use it in [CI](../.github/workflows/talus-agentic-framework.yml). 7 | 8 | # Run 9 | 10 | Check your active environment and make sure it's `localnet`. 11 | For example 12 | 13 | ```bash 14 | $ sui client active-env 15 | ``` 16 | 17 | Have Sui localnet running. 18 | For example, with the Sui CLI you can run 19 | 20 | ```bash 21 | $ sui start 22 | ``` 23 | 24 | Optionally, have Ollama running with `mistras` model. 25 | For example 26 | 27 | ```bash 28 | $ ollama run mistral 29 | ``` 30 | 31 | Ollama http APIs are optional because they are mocked in the test binary itself if the HTTP endpoint is not provided. 32 | This is useful for example if you want to test some change and are not interested in the completions themselves. 33 | CI is using the mocked API as well. 34 | 35 | This test binary expects some env vars. 36 | These can be directly set to the environment or set in a `.env` file. 37 | 38 | ``` 39 | RUST_LOG=info 40 | FW_PKG_ID=... 41 | SUI_WALLET_PATH=~/.sui/sui_config/client.yaml 42 | OLLAMA_HTTP_API=http://localhost:11434/api/generate 43 | ``` 44 | 45 | `FW_PKG_ID` will be the package id of the framework package that you want to test. 46 | It must be deployed in the Sui localnet. 47 | For example, you can deploy the package with 48 | 49 | ```bash 50 | $ cd onchain 51 | $ sui client publish --skip-dependency-verification 52 | ``` 53 | 54 | This command spits out package ID that you can set to that env var. 55 | 56 | `SUI_WALLET_PATH` must point to an existing yaml file with your local wallet. 57 | 58 | As mentioned before, `OLLAMA_HTTP_API` is optional and will be mocked if empty. 59 | 60 | Now you should be good to go to `cargo run`. 61 | 62 | There's a oneclick test script [`oneclick-test.sh`](./oneclick-test.sh) that sets up the environment and runs the test binary. 63 | It emulates what happens in CI. 64 | -------------------------------------------------------------------------------- /e2e_tests/src/ollama_mock.rs: -------------------------------------------------------------------------------- 1 | //! If ollama http API env var is not provided, we spawn a simple HTTP server 2 | //! to mock those APIs that return a static response. 3 | 4 | use { 5 | crate::prelude::*, 6 | axum::{routing::post, Router}, 7 | reqwest::Url, 8 | std::str::FromStr, 9 | }; 10 | 11 | pub(crate) async fn start() -> Result { 12 | let app = Router::new().route("/", post(mocked_model_response)); 13 | 14 | let addr = "0.0.0.0:3000"; 15 | let listener = tokio::net::TcpListener::bind(addr).await?; 16 | 17 | tokio::spawn(async move { 18 | if let Err(err) = axum::serve(listener, app).await { 19 | error!("Failed to start mock ollama HTTP server: {err}"); 20 | } 21 | }); 22 | 23 | Ok(FromStr::from_str(&format!("http://{addr}"))?) 24 | } 25 | 26 | /// Prints "This is a mock LLM response." and is done. 27 | async fn mocked_model_response() -> &'static str { 28 | r#"{"model":"mistral","created_at":"2024-08-07T10:08:39.33050386Z","response":" \"","done":false} 29 | {"model":"mistral","created_at":"2024-08-07T10:08:39.330506894Z","response":"This","done":false} 30 | {"model":"mistral","created_at":"2024-08-07T10:08:39.333717178Z","response":" is","done":false} 31 | {"model":"mistral","created_at":"2024-08-07T10:08:39.346571008Z","response":" a","done":false} 32 | {"model":"mistral","created_at":"2024-08-07T10:08:39.359445086Z","response":" mock","done":false} 33 | {"model":"mistral","created_at":"2024-08-07T10:08:39.372366904Z","response":" LL","done":false} 34 | {"model":"mistral","created_at":"2024-08-07T10:08:39.385213658Z","response":"M","done":false} 35 | {"model":"mistral","created_at":"2024-08-07T10:08:39.39866316Z","response":" response","done":false} 36 | {"model":"mistral","created_at":"2024-08-07T10:08:39.411548254Z","response":".\"","done":false} 37 | {"model":"mistral","created_at":"2024-08-07T10:08:39.425126938Z","response":"","done":true,"done_reason":"stop","context":[3,29473,2066,16650,3095,29515,1619,1117,1032,9743,9582,29487,3667,4,1027,1113,4028,1117,1032,9743,17472,29523,3667,1379],"total_duration":183077547,"load_duration":3397703,"prompt_eval_count":16,"prompt_eval_duration":20677000,"eval_count":10,"eval_duration":117239000}"# 38 | } 39 | -------------------------------------------------------------------------------- /onchain/sources/prompt.move: -------------------------------------------------------------------------------- 1 | module talus::prompt { 2 | //! A prompt represents a request for a model to generate a response. 3 | //! 4 | //! The [`RequestForCompletionEvent`] is emitted every time cluster execution 5 | //! is in need of a completion. 6 | //! The off-chain node that runs the model's inference listens to this event 7 | //! and submits the completion back to the chain. 8 | 9 | use std::string::{Self, String}; 10 | use sui::event; 11 | use talus::model::{Self, ModelInfo}; 12 | use talus::tool::Tool; 13 | 14 | // === Errors === 15 | 16 | const EPromptCannotBeEmpty: u64 = 1; 17 | const ETemperatureMustBeBetweenHundredAndZero: u64 = 2; 18 | 19 | // === Events === 20 | 21 | public struct RequestForCompletionEvent has copy, drop { 22 | cluster_execution: ID, 23 | node: ID, 24 | model: ID, 25 | external_provider: String, 26 | model_name: String, 27 | prompt_contents: String, 28 | prompt_hash: vector, 29 | max_tokens: u64, 30 | /// A value between 0 and 100. 31 | temperature: u8, 32 | extra_arguments: vector, 33 | tool: Option, 34 | } 35 | 36 | // === Package protected === 37 | 38 | /// Emits an event that's listened to by the off-chain node that runs the 39 | /// model. 40 | /// 41 | /// This is called within the context of the cluster execution hence package 42 | /// protected. 43 | public(package) fun emit_request_for_completion( 44 | model: &ModelInfo, 45 | external_provider: String, 46 | prompt_contents: String, 47 | prompt_hash: vector, 48 | max_tokens: u64, 49 | temperature: u8, // 0-200 50 | extra_arguments: vector, 51 | cluster_execution: ID, 52 | tool: Option, 53 | ) { 54 | assert!(temperature <= 200, ETemperatureMustBeBetweenHundredAndZero); 55 | assert!(temperature >= 0, ETemperatureMustBeBetweenHundredAndZero); 56 | assert!(string::length(&prompt_contents) > 0, EPromptCannotBeEmpty); 57 | 58 | event::emit(RequestForCompletionEvent { 59 | node: model::get_node_id(model), 60 | model: model::get_id(model), 61 | cluster_execution, 62 | model_name: model::get_name(model), 63 | external_provider: external_provider, 64 | prompt_contents, 65 | prompt_hash, 66 | max_tokens, 67 | temperature, 68 | extra_arguments, 69 | tool, 70 | }); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/crew/talus_chat_ollama.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any, Iterator, List, Optional 3 | 4 | from langchain_core.callbacks import CallbackManagerForLLMRun 5 | from langchain_core.language_models.chat_models import BaseChatModel 6 | from langchain_core.messages import BaseMessage, AIMessage 7 | from langchain_core.outputs import ChatResult, ChatGeneration 8 | 9 | from langchain_community.llms.ollama import OllamaEndpointNotFoundError, _OllamaCommon 10 | 11 | 12 | class TalusChatOllama(BaseChatModel, _OllamaCommon): 13 | def __init__( 14 | self, 15 | prompt_contract: Any, 16 | completion_contract: Any, 17 | **kwargs: Any, 18 | ): 19 | super().__init__(**kwargs) 20 | self.prompt_contract = prompt_contract 21 | self.completion_contract = completion_contract 22 | 23 | @property 24 | def _llm_type(self) -> str: 25 | return "blockchain-ollama-chat" 26 | 27 | def _generate( 28 | self, 29 | messages: List[BaseMessage], 30 | stop: Optional[List[str]] = None, 31 | run_manager: Optional[CallbackManagerForLLMRun] = None, 32 | **kwargs: Any, 33 | ) -> ChatResult: 34 | # Call the prompt contract to retrieve the prompt 35 | prompt = self.prompt_contract.get_prompt() 36 | 37 | # Convert the prompt to the format expected by Ollama 38 | ollama_messages = self._convert_messages_to_ollama_messages([prompt]) 39 | 40 | # Call the Ollama API to generate the completion 41 | final_chunk = self._chat_stream_with_aggregation( 42 | ollama_messages, 43 | stop=stop, 44 | run_manager=run_manager, 45 | verbose=self.verbo**kwargs, 46 | ) 47 | 48 | # Extract the generated text from the final chunk 49 | generated_text = final_chunk.text 50 | 51 | # Call the completion contract to store the completion 52 | self.completion_contract.store_completion(generated_text) 53 | 54 | # Create a ChatGeneration object with the generated text 55 | chat_generation = ChatGeneration( 56 | message=AIMessage(content=generated_text), 57 | generation_info=final_chunk.generation_info, 58 | ) 59 | 60 | return ChatResult(generations=[chat_generation]) 61 | 62 | def _stream( 63 | self, 64 | messages: List[BaseMessage], 65 | stop: Optional[List[str]] = None, 66 | run_manager: Optional[CallbackManagerForLLMRun] = None, 67 | **kwargs: Any, 68 | ) -> Iterator[ChatGeneration]: 69 | raise NotImplementedError( 70 | "Streaming is not supported for BlockchainChatOllama." 71 | ) 72 | -------------------------------------------------------------------------------- /docker/nexus/bin/start_events.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import subprocess 4 | from pathlib import Path 5 | 6 | # Set paths 7 | shared_dir = Path(os.getenv("SHARED_DIR", ".")) 8 | keystore_path = Path(shared_dir) / "sui.keystore" 9 | 10 | # Extract details from JSON files 11 | package_id_path = Path(shared_dir) / "package_id.json" 12 | node_details_path = Path(shared_dir) / "node_details.json" 13 | 14 | 15 | rpc_url = os.getenv("RPC_URL", "http://localhost:9000") 16 | ws_url = os.getenv("WS_URL", "ws://localhost:9000") 17 | tool_url = os.getenv("TOOL_URL", "http://0.0.0.0:8080/tool/use") 18 | 19 | # Load package ID 20 | try: 21 | with open(package_id_path, "r") as f: 22 | package_id = json.load(f)[0] 23 | except (FileNotFoundError, IndexError, json.JSONDecodeError) as e: 24 | print(f"Error: Unable to load package ID from {package_id_path}. Details: {e}") 25 | exit(1) 26 | 27 | # Load node details 28 | try: 29 | with open(node_details_path, "r") as f: 30 | node_details = json.load(f) 31 | model_owner_cap_id = node_details.get("llama_owner_cap_id") 32 | except (FileNotFoundError, json.JSONDecodeError) as e: 33 | print(f"Error: Unable to load node details from {node_details_path}. Details: {e}") 34 | exit(1) 35 | 36 | if not model_owner_cap_id: 37 | print("Error: Model owner capability ID is missing.") 38 | exit(1) 39 | 40 | # Load SUI private key from keystore JSON 41 | try: 42 | with open(keystore_path, "r") as f: 43 | keys = json.load(f) 44 | if not keys: 45 | raise ValueError( 46 | "Sui keystore file is empty. Please check your Sui configuration." 47 | ) 48 | private_key = keys[0] # Assuming the first key is used 49 | except (FileNotFoundError, json.JSONDecodeError, ValueError) as e: 50 | print(f"Error: Unable to load SUI private key from {keystore_path}. Details: {e}") 51 | exit(1) 52 | 53 | # Set environment variables 54 | os.environ["PACKAGE_ID"] = package_id 55 | os.environ["SUI_PRIVATE_KEY"] = private_key 56 | os.environ["MODEL_OWNER_CAP_ID"] = model_owner_cap_id 57 | 58 | # Command to run the Python script 59 | command = [ 60 | "python", 61 | "events/src/nexus_events/sui_event.py", 62 | "--packageid", 63 | package_id, 64 | "--privkey", 65 | private_key, 66 | "--modelownercapid", 67 | model_owner_cap_id, 68 | "--rpc", 69 | rpc_url, 70 | "--ws", 71 | ws_url, 72 | "--toolurl", 73 | tool_url, # New argument for tool URL 74 | ] 75 | 76 | print(f"Running command: {' '.join(command)}") 77 | 78 | # Execute the command 79 | try: 80 | subprocess.run(command, check=True) 81 | except subprocess.CalledProcessError as e: 82 | print(f"Error: Failed to execute command. Details: {e}") 83 | exit(1) 84 | -------------------------------------------------------------------------------- /offchain/tools/README.md: -------------------------------------------------------------------------------- 1 | # Tools 2 | 3 | This directory contains offchain tools for LLM inference and other functionalities. 4 | 5 | ## Model Inference 6 | 7 | Model inference currently relies on ollama through the [server/main.py][main_py] route `/predict`, which runs inference 8 | of the defined ollama models. 9 | 10 | ## Tools 11 | 12 | Available tools are defined in [server/tools/tools.py][tools_py]. Current supported tools are listed 13 | below and any desired tools can be added by following the instructions in **Adding Tools**. 14 | Tools are executed through the [server/main.py][main_py] route `/tool/use`. 15 | 16 | _Note_: to use the OpenAI, Gemini, Scenexplain, or Tavily tools, equivalent api keys must be set in the `.env` and can be obtained here: 17 | 18 | - [OpenAI Key](https://openai.com/index/openai-api/) 19 | - [Scenex Key](https://scenex.jina.ai/api) 20 | - [Tavily Key](https://app.tavily.com) 21 | 22 | The above tools can also be deleted if not desired for simplicity. 23 | 24 | ### Adding Tools 25 | 26 | In [server/tools/tools.py][tools_py], each tool has a defined argument structure which inherits from `pydantic` `BaseModel`, 27 | and a `ToolCallBody` which consists of their name and the argument substructure. 28 | `TOOL_ARGS_MAPPING` is a dictionary of available tools and their args, and `TOOLS` is a dictionary of available tools 29 | and their actual executables, wrapped by the `create_clusterai_tool` function which allows for any lambda 30 | function to be defined as a tool. This setup was intended towards support of definition of tools from onchain. 31 | 32 | ### Supported Tools 33 | 34 | 1. `search`: Web search using DuckDuckGo. 35 | 2. `wikipedia`: Query Wikipedia for information. 36 | 3. `arxiv`: Search academic papers on arXiv. 37 | 4. `pubmed`: Search medical and life sciences literature. 38 | 5. `scene_explain`: Explain the contents of an image. 39 | 6. `shell`: Execute shell commands. 40 | 7. `tavily_search`: Perform searches using Tavily. 41 | 8. `python_repl`: Execute Python code. 42 | 9. `read_file`: Read the contents of a file. 43 | 10. `list_directory`: List the contents of a directory. 44 | 11. `gpt4_vision`: Analyze images using GPT-4 Vision. 45 | 12. `dalle3`: Generate images based on text prompts. 46 | 13. `openai_embeddings`: Create text embeddings using OpenAI's API. 47 | 14. `browser`: Scrape and summarize website content. 48 | 15. `instagram_search`: Search for Instagram-specific content. 49 | 50 | Note: Each tool accepts specific arguments as defined in the `TOOL_ARGS_MAPPING` in the `tools.py` file. The AI model can use these tools by specifying the tool name and providing the required arguments. 51 | 52 | ## Tests 53 | 54 | To run the tests: 55 | 56 | ```bash 57 | pip3 install pytest 58 | PYTHONPATH=src pytest tests 59 | ``` 60 | 61 | 62 | [main_py]: ./src/nexus_tools/server/main.py 63 | [tools_py]: ./src/nexus_tools/server/tools/tools.py -------------------------------------------------------------------------------- /offchain/tools/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | 140 | # Mac shit 141 | .DS_Store 142 | 143 | # python things 144 | __pycache__ 145 | __pypackages__ 146 | 147 | # Generator obsolete 148 | .openapi-generator 149 | -------------------------------------------------------------------------------- /offchain/events/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | 140 | # Mac shit 141 | .DS_Store 142 | 143 | # python things 144 | __pycache__ 145 | __pypackages__ 146 | 147 | # Generator obsolete 148 | .openapi-generator 149 | 150 | tmp.py -------------------------------------------------------------------------------- /docker/nexus/bin/bootstrap_model.py: -------------------------------------------------------------------------------- 1 | # Import necessary modules 2 | import json 3 | from pathlib import Path 4 | from nexus_sdk import get_sui_client_with_airdrop, create_node, create_model 5 | import os 6 | 7 | shared_dir = Path(os.getenv("SHARED_DIR", ".")) 8 | package_id_file = Path(shared_dir) / "package_id.json" 9 | keystore_path = Path(shared_dir) / "sui.keystore" 10 | 11 | rpc_url = os.getenv("RPC_URL", "http://localhost:9000") 12 | ws_url = os.getenv("WS_URL", "ws://localhost:9000") 13 | faucet_url = os.getenv("FAUCET_URL", "http://localhost:5003/gas") 14 | 15 | 16 | # Decoupled function to create node and model and save details to a file. 17 | def create_and_save_node_and_model(client, package_id): 18 | node_id = create_example_node(client, package_id) 19 | llama_id, llama_owner_cap_id = create_llama_model(client, package_id, node_id) 20 | 21 | # Save the node details to a JSON file 22 | shared_dir = Path(os.getenv("SHARED_DIR", ".")) 23 | shared_dir.mkdir(parents=True, exist_ok=True) 24 | node_details = { 25 | "node_id": node_id, 26 | "llama_id": llama_id, 27 | "llama_owner_cap_id": llama_owner_cap_id, 28 | } 29 | with open(shared_dir / "node_details.json", "w") as f: 30 | json.dump(node_details, f, indent=4) 31 | 32 | return node_id, llama_id, llama_owner_cap_id 33 | 34 | 35 | # Creates a new node owned object. 36 | def create_example_node(client, package_id): 37 | node_id = create_node(client, package_id, "LocalNode", "CPU", 16) 38 | if not node_id: 39 | raise Exception("Failed to create node") 40 | return node_id 41 | 42 | 43 | # Creates llama model representation on chain. 44 | # Returns the model ID and the model owner capability ID. 45 | def create_llama_model(client, package_id, node_id): 46 | model_id, model_owner_cap_id = create_model( 47 | client=client, 48 | package_id=package_id, 49 | node_id=node_id, 50 | name="llama3.2:1b", 51 | model_hash=b"llama3.2_1b_hash", 52 | url=os.getenv("MODEL_URL", "http://localhost:11434"), 53 | token_price=1000, 54 | capacity=1000000, 55 | num_params=1000000000, 56 | description="llama3.2 1b", 57 | max_context_length=8192, 58 | is_fine_tuned=False, 59 | family="Llama3.2", 60 | vendor="Meta", 61 | is_open_source=True, 62 | datasets=["test"], 63 | ) 64 | if not model_id: 65 | raise Exception("Failed to create model") 66 | return model_id, model_owner_cap_id 67 | 68 | 69 | if __name__ == "__main__": 70 | 71 | client = get_sui_client_with_airdrop( 72 | rpc_url=rpc_url, 73 | ws_url=ws_url, 74 | faucet_url=faucet_url, 75 | keystore_path=keystore_path, 76 | ) 77 | with open(package_id_file, "r") as f: 78 | package_id_list = json.load(f) 79 | package_id = package_id_list[0] 80 | 81 | create_and_save_node_and_model(client, package_id) 82 | print("environment prepared successfully") 83 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | !docker/.env 110 | .venv 111 | env/ 112 | venv/ 113 | ENV/ 114 | env.bak/ 115 | venv.bak/ 116 | 117 | # Spyder project settings 118 | .spyderproject 119 | .spyproject 120 | 121 | # Rope project settings 122 | .ropeproject 123 | 124 | # mkdocs documentation 125 | /site 126 | 127 | # mypy 128 | .mypy_cache/ 129 | .dmypy.json 130 | dmypy.json 131 | 132 | # Pyre type checker 133 | .pyre/ 134 | 135 | # pytype static type analyzer 136 | .pytype/ 137 | 138 | # Cython debug symbols 139 | cython_debug/ 140 | 141 | # Mac stuff 142 | .DS_Store 143 | 144 | # python things 145 | __pycache__ 146 | __pypackages__ 147 | 148 | # Generator obsolete 149 | .openapi-generator 150 | 151 | node_modules 152 | 153 | tmp.* 154 | .wrk.local 155 | 156 | Move.lock 157 | model_addresses.json 158 | 159 | # Bash tools 160 | nohup.out 161 | 162 | # IDEs 163 | .idea 164 | *.iml 165 | .vscode 166 | ./docker/sui/genesis/files/ 167 | ./docker/sui/genesis/files/.venv 168 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | We have built a few examples to showcase Nexus agents. 4 | 5 | Before you can use the examples or build your own agents, you need to install a few things first, 6 | as shown in the next section. 7 | 8 | - [Examples](#examples) 9 | - [Environment setup](#environment-setup) 10 | - [Operating System](#operating-system) 11 | - [Helper tools](#helper-tools) 12 | - [Docker](#docker) 13 | - [Example: Instagram Post Planner](#example-instagram-post-planner) 14 | - [Example: Trip Planner](#example-trip-planner) 15 | - [Example: CLI Cluster](#example-cli-cluster) 16 | - [Tools](#tools) 17 | - [Events](#events) 18 | 19 | ## Environment setup 20 | 21 | ### Operating System 22 | 23 | We support macOS, windows and linux. 24 | 25 | ### Helper tools 26 | 27 | You need to install the following tools by following their official installation instructions: 28 | 29 | - [`docker`][docker] 30 | - [`just`][just] (on Linux install it with "Pre-Built Binaries" rather than with `apt` because of an outdated version) 31 | - [`python`][python] 32 | 33 | We use `just` as a general command runner, just run `just` for available commands. 34 | 35 | ### Docker 36 | 37 | We use Docker to create a consistent local environment for all examples, ensuring compatibility across macOS, Windows, and Linux. By packaging dependencies into isolated containers, we aim to provide a uniform environment that minimizes compatibility issues. To run these examples, you’ll need Docker Compose version 2.20 or higher. 38 | 39 | **Note for macOS users:** While Ollama can run in a container on macOS, it experiences poor performance due to Docker Desktop for macOS lacking GPU acceleration support. To ensure better performance, Ollama is running directly on the host instead of within a container. 40 | 41 | ## Example: Instagram Post Planner 42 | 43 | This [example][ig_post_planner] demonstrates how to create an Instagram post planner agent using 44 | the Nexus SDK. 45 | 46 | Run with `just example ig-post-planner`. 47 | 48 | ## Example: Trip Planner 49 | 50 | This [example][trip_planner] demonstrates how to create a trip planner agent using the Nexus 51 | SDK. 52 | 53 | Run with `just example trip-planner`. 54 | 55 | ## Example: CLI Cluster 56 | 57 | This [example][cli_cluster] prompts the user to create a [cluster][design_cluster] by describing 58 | agents and tasks on the command line. 59 | 60 | Run with `just example cli-cluster`. 61 | 62 | ## Tools 63 | 64 | Agents can use tools to enhance their capabilities. Please refer to the [`tools` README][tools_README] 65 | for a list of available tools, and instructions on how to add new ones. 66 | 67 | ## Events 68 | 69 | Events allow offchain systems to respond to onchain actions, automating tool execution and model inference based on specific triggers. Please refer to the [`events` README][events_README] for more details. 70 | 71 | 72 | 73 | [docker]: https://docs.docker.com/engine/install/ 74 | [just]: https://github.com/casey/just 75 | [python]: https://www.python.org/downloads/ 76 | [tools_README]: ../offchain/tools/README.md 77 | [events_README]: ../offchain/events/README.md 78 | [ig_post_planner]: ./ig_post_planner.py 79 | [trip_planner]: ./trip_planner.py 80 | [cli_cluster]: ./cli_cluster.py 81 | [design_cluster]: ../onchain/README.md#cluster 82 | -------------------------------------------------------------------------------- /onchain/sources/tests/prompt_tests.move: -------------------------------------------------------------------------------- 1 | #[test_only] 2 | module talus::prompt_tests { 3 | use std::string; 4 | use sui::test_scenario::{Self, ctx}; 5 | use talus::model; 6 | use talus::prompt; 7 | 8 | #[test] 9 | fun test_emit_request_for_completion() { 10 | let owner = @0x1; 11 | let mut scenario = test_scenario::begin(owner); 12 | 13 | // Create a mock Model 14 | let model = model::new_mock_info_for_testing(ctx(&mut scenario)); 15 | 16 | test_scenario::next_tx(&mut scenario, owner); 17 | { 18 | let mock_execution_id = object::new(ctx(&mut scenario)); 19 | 20 | prompt::emit_request_for_completion( 21 | &model, 22 | string::utf8(b"Test Provider"), 23 | string::utf8(b"Test prompt"), 24 | b"test_hash", 25 | 100, 26 | 50, 27 | vector::empty(), 28 | object::uid_to_inner(&mock_execution_id), 29 | option::none(), // no tool 30 | ); 31 | 32 | object::delete(mock_execution_id); 33 | }; 34 | 35 | test_scenario::end(scenario); 36 | } 37 | 38 | #[test] 39 | #[expected_failure(abort_code = prompt::ETemperatureMustBeBetweenHundredAndZero)] 40 | fun test_invalid_temperature() { 41 | let owner = @0x1; 42 | let mut scenario = test_scenario::begin(owner); 43 | 44 | // Create a mock Model 45 | let model = model::new_mock_info_for_testing(ctx(&mut scenario)); 46 | 47 | test_scenario::next_tx(&mut scenario, owner); 48 | { 49 | let mock_execution_id = object::new(ctx(&mut scenario)); 50 | prompt::emit_request_for_completion( 51 | &model, 52 | string::utf8(b"Test Provider"), 53 | string::utf8(b"Test prompt"), 54 | b"test_hash", 55 | 100, 56 | 201, // Invalid temperature 57 | vector::empty(), 58 | object::uid_to_inner(&mock_execution_id), 59 | option::none(), // no tool 60 | ); 61 | 62 | object::delete(mock_execution_id); 63 | }; 64 | 65 | test_scenario::end(scenario); 66 | } 67 | 68 | #[test] 69 | #[expected_failure(abort_code = prompt::EPromptCannotBeEmpty)] 70 | fun test_empty_prompt() { 71 | let owner = @0x1; 72 | let mut scenario = test_scenario::begin(owner); 73 | 74 | // Create a mock Model 75 | let model = model::new_mock_info_for_testing(ctx(&mut scenario)); 76 | 77 | test_scenario::next_tx(&mut scenario, owner); 78 | { 79 | let mock_execution_id = object::new(ctx(&mut scenario)); 80 | prompt::emit_request_for_completion( 81 | &model, 82 | string::utf8(b"Test Provider"), 83 | string::utf8(b""), // Empty prompt 84 | b"test_hash", 85 | 100, 86 | 50, 87 | vector::empty(), 88 | object::uid_to_inner(&mock_execution_id), 89 | option::none(), // no tool 90 | ); 91 | 92 | object::delete(mock_execution_id); 93 | }; 94 | 95 | test_scenario::end(scenario); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /docker/nexus/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | tools: 3 | container_name: tools 4 | image: talusnetwork/nexus-tools:latest 5 | build: 6 | context: "../../offchain/tools/" 7 | dockerfile: "../../docker/nexus/Dockerfile" 8 | additional_contexts: 9 | nexus: ../../docker/nexus 10 | ports: 11 | - "8080:8080" 12 | restart: unless-stopped 13 | environment: 14 | OLLAMA_HOST: ${MODEL_URL} 15 | logging: 16 | driver: "json-file" 17 | options: 18 | max-file: "10" 19 | max-size: "1g" 20 | depends_on: 21 | build-suitools: 22 | condition: service_completed_successfully 23 | publish-package: 24 | condition: service_completed_successfully 25 | bootstrap-model: 26 | condition: service_completed_successfully 27 | 28 | events: 29 | container_name: events 30 | image: talusnetwork/nexus-events:latest 31 | build: 32 | context: "../../offchain/" 33 | dockerfile: "../docker/nexus/Dockerfile" 34 | additional_contexts: 35 | nexus: ../../docker/nexus 36 | args: 37 | INSTALL_RUST: "true" 38 | command: > 39 | bash -c "source .venv/bin/activate && python start_events.py" 40 | logging: 41 | driver: "json-file" 42 | options: 43 | max-file: "10" 44 | max-size: "1g" 45 | environment: 46 | RPC_URL: ${RPC_URL} 47 | WS_URL: ${WS_URL} 48 | TOOL_URL: ${TOOL_URL} 49 | LLM_ASSISTANT_URL: ${LLM_ASSISTANT_URL} 50 | SHARED_DIR: /app/shared 51 | volumes: 52 | - shared:/app/shared 53 | - ./bin/start_events.py:/app/start_events.py:ro 54 | restart: unless-stopped 55 | depends_on: 56 | build-suitools: 57 | condition: service_completed_successfully 58 | publish-package: 59 | condition: service_completed_successfully 60 | bootstrap-model: 61 | condition: service_completed_successfully 62 | tools: 63 | condition: service_started 64 | 65 | bootstrap-model: 66 | container_name: bootstrap-model 67 | image: talusnetwork/nexus-bootstrap-model:latest 68 | build: 69 | context: "../../nexus_sdk/" 70 | dockerfile: "../docker/nexus/Dockerfile" 71 | additional_contexts: 72 | nexus: ../../docker/nexus 73 | args: 74 | INSTALL_RUST: "true" 75 | environment: 76 | RPC_URL: ${RPC_URL} 77 | WS_URL: ${WS_URL} 78 | MODEL_URL: ${MODEL_URL} 79 | FAUCET_URL: ${FAUCET_URL} 80 | SHARED_DIR: /app/shared 81 | volumes: 82 | - ./bin/bootstrap_model.py:/app/bootstrap_model.py 83 | - shared:/app/shared 84 | command: > 85 | bash -c "source .venv/bin/activate && python bootstrap_model.py" 86 | restart: on-failure 87 | depends_on: 88 | publish-package: 89 | condition: service_completed_successfully 90 | faucet: 91 | condition: service_healthy 92 | 93 | examples: 94 | image: talusnetwork/nexus-examples:latest 95 | container_name: examples 96 | build: 97 | context: "../../nexus_sdk/" 98 | dockerfile: "../docker/nexus/Dockerfile" 99 | additional_contexts: 100 | nexus: ../../docker/nexus 101 | args: 102 | INSTALL_RUST: "true" 103 | environment: 104 | RPC_URL: ${RPC_URL} 105 | WS_URL: ${WS_URL} 106 | SHARED_DIR: /app/shared 107 | volumes: 108 | - ../../examples:/app/examples 109 | - shared:/app/shared 110 | command: ["tail", "-f", "/dev/null"] 111 | restart: on-failure 112 | depends_on: 113 | publish-package: 114 | condition: service_completed_successfully 115 | bootstrap-model: 116 | condition: service_completed_successfully 117 | tools: 118 | condition: service_started 119 | events: 120 | condition: service_started 121 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Business Source License 1.1 2 | License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved. 3 | “Business Source License” is a trademark of MariaDB Corporation Ab. 4 | ____________________________________________________________________________ 5 | 6 | Business Source License 1.1 7 | Parameters 8 | Licensor: Talus Labs, Inc. 9 | Licensed Work: Talus AI Agent Framework © 2024 Talus Labs, Inc. 10 | Change Date: December 31, 2027 11 | Change License: Apache License, Version 2.0 12 | ____________________________________________________________________________ 13 | 14 | Terms 15 | The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. 16 | 17 | Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. 18 | 19 | If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. 20 | 21 | All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. 22 | 23 | You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. 24 | Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. 25 | 26 | This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). 27 | 28 | TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. 29 | _____________________________________________________________________________ 30 | Covenants of Licensor 31 | In consideration of the right to use this License’s text and the “Business Source License” name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: 32 | 33 | To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where “compatible” means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. 34 | 35 | To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text “None” to specify a Change Date. Not to modify this License in any other way. 36 | ____________________________________________________________________________ 37 | Notice 38 | The Business Source License (this document, or the “License”) is not an Open Source license. However, the Licensed Work will eventually be made available under an Open Source License, as stated in this License. 39 | -------------------------------------------------------------------------------- /offchain/LICENSE: -------------------------------------------------------------------------------- 1 | Business Source License 1.1 2 | License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved. 3 | “Business Source License” is a trademark of MariaDB Corporation Ab. 4 | ____________________________________________________________________________ 5 | 6 | Business Source License 1.1 7 | Parameters 8 | Licensor: Talus Labs, Inc. 9 | Licensed Work: Talus AI Agent Framework © 2024 Talus Labs, Inc. 10 | Change Date: December 31, 2027 11 | Change License: Apache License, Version 2.0 12 | ____________________________________________________________________________ 13 | 14 | Terms 15 | The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. 16 | 17 | Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. 18 | 19 | If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. 20 | 21 | All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. 22 | 23 | You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. 24 | Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. 25 | 26 | This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). 27 | 28 | TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. 29 | _____________________________________________________________________________ 30 | Covenants of Licensor 31 | In consideration of the right to use this License’s text and the “Business Source License” name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: 32 | 33 | To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where “compatible” means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. 34 | 35 | To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text “None” to specify a Change Date. Not to modify this License in any other way. 36 | ____________________________________________________________________________ 37 | Notice 38 | The Business Source License (this document, or the “License”) is not an Open Source license. However, the Licensed Work will eventually be made available under an Open Source License, as stated in this License. 39 | -------------------------------------------------------------------------------- /onchain/LICENSE: -------------------------------------------------------------------------------- 1 | Business Source License 1.1 2 | License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved. 3 | “Business Source License” is a trademark of MariaDB Corporation Ab. 4 | ____________________________________________________________________________ 5 | 6 | Business Source License 1.1 7 | Parameters 8 | Licensor: Talus Labs, Inc. 9 | Licensed Work: Talus AI Agent Framework © 2024 Talus Labs, Inc. 10 | Change Date: December 31, 2027 11 | Change License: Apache License, Version 2.0 12 | ____________________________________________________________________________ 13 | 14 | Terms 15 | The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. 16 | 17 | Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. 18 | 19 | If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. 20 | 21 | All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. 22 | 23 | You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. 24 | Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. 25 | 26 | This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). 27 | 28 | TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. 29 | _____________________________________________________________________________ 30 | Covenants of Licensor 31 | In consideration of the right to use this License’s text and the “Business Source License” name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: 32 | 33 | To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where “compatible” means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. 34 | 35 | To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text “None” to specify a Change Date. Not to modify this License in any other way. 36 | ____________________________________________________________________________ 37 | Notice 38 | The Business Source License (this document, or the “License”) is not an Open Source license. However, the Licensed Work will eventually be made available under an Open Source License, as stated in this License. 39 | -------------------------------------------------------------------------------- /offchain/events/LICENSE: -------------------------------------------------------------------------------- 1 | Business Source License 1.1 2 | License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved. 3 | “Business Source License” is a trademark of MariaDB Corporation Ab. 4 | ____________________________________________________________________________ 5 | 6 | Business Source License 1.1 7 | Parameters 8 | Licensor: Talus Labs, Inc. 9 | Licensed Work: Talus AI Agent Framework © 2024 Talus Labs, Inc. 10 | Change Date: December 31, 2027 11 | Change License: Apache License, Version 2.0 12 | ____________________________________________________________________________ 13 | 14 | Terms 15 | The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. 16 | 17 | Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. 18 | 19 | If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. 20 | 21 | All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. 22 | 23 | You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. 24 | Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. 25 | 26 | This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). 27 | 28 | TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. 29 | _____________________________________________________________________________ 30 | Covenants of Licensor 31 | In consideration of the right to use this License’s text and the “Business Source License” name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: 32 | 33 | To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where “compatible” means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. 34 | 35 | To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text “None” to specify a Change Date. Not to modify this License in any other way. 36 | ____________________________________________________________________________ 37 | Notice 38 | The Business Source License (this document, or the “License”) is not an Open Source license. However, the Licensed Work will eventually be made available under an Open Source License, as stated in this License. 39 | -------------------------------------------------------------------------------- /offchain/tools/LICENSE: -------------------------------------------------------------------------------- 1 | Business Source License 1.1 2 | License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved. 3 | “Business Source License” is a trademark of MariaDB Corporation Ab. 4 | ____________________________________________________________________________ 5 | 6 | Business Source License 1.1 7 | Parameters 8 | Licensor: Talus Labs, Inc. 9 | Licensed Work: Talus AI Agent Framework © 2024 Talus Labs, Inc. 10 | Change Date: December 31, 2027 11 | Change License: Apache License, Version 2.0 12 | ____________________________________________________________________________ 13 | 14 | Terms 15 | The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. 16 | 17 | Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. 18 | 19 | If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. 20 | 21 | All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. 22 | 23 | You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. 24 | Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. 25 | 26 | This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). 27 | 28 | TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. 29 | _____________________________________________________________________________ 30 | Covenants of Licensor 31 | In consideration of the right to use this License’s text and the “Business Source License” name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: 32 | 33 | To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where “compatible” means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. 34 | 35 | To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text “None” to specify a Change Date. Not to modify this License in any other way. 36 | ____________________________________________________________________________ 37 | Notice 38 | The Business Source License (this document, or the “License”) is not an Open Source license. However, the Licensed Work will eventually be made available under an Open Source License, as stated in this License. 39 | -------------------------------------------------------------------------------- /e2e_tests/oneclick-test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # The purpose of this bash script is to run the end-to-end tests for the Talus 5 | # package in the localnet environment with one command. 6 | # 7 | # The script assumes: 8 | # - sui CLI, jq, cargo 9 | # - env var SUI_WALLET_PATH poiting to the wallet yaml 10 | # or it can be alternatively defined in .env file (see README) 11 | # - pwd is a git repo 12 | # 13 | # 1. Assert that dependencies are installed 14 | # 2. Assert that the script is run from the correct directory 15 | # 3. Start sui _localnet_ node in the background 16 | # 4. Wait for sui RPC to be available 17 | # 5. Publish the Talus package to the sui node and get its package ID 18 | # 6. Run the E2E tests against the published package but with mocked ollama 19 | # 7. Kill the sui node 20 | # 21 | # This script has been tested on Ubuntu 22.04. 22 | # 23 | 24 | # 25 | # 1. 26 | # 27 | 28 | sui --version 29 | if [ $? -ne 0 ]; then 30 | echo "Sui CLI is not installed" 31 | exit 1 32 | fi 33 | jq --version 34 | if [ $? -ne 0 ]; then 35 | echo "jq is not installed" 36 | exit 1 37 | fi 38 | cargo --version 39 | if [ $? -ne 0 ]; then 40 | echo "cargo is not installed" 41 | exit 1 42 | fi 43 | 44 | # 45 | # 2. 46 | # 47 | 48 | # get the root dir, assuming we are under a git repo structure 49 | root_dir=$(git rev-parse --show-toplevel) 50 | if [ $? -ne 0 ]; then 51 | echo "Not in a git repo" 52 | exit 1 53 | fi 54 | pkg_path="${root_dir}/onchain" 55 | e2e_path="${root_dir}/e2e_tests" 56 | 57 | if [ ! -d "$pkg_path" ]; then 58 | echo "Talus package path ${pkg_path} does not exist" 59 | exit 1 60 | fi 61 | if [ ! -d "$e2e_path" ]; then 62 | echo "E2E Rust tests path ${e2e_path} does not exist" 63 | exit 1 64 | fi 65 | 66 | # 67 | # 3. 68 | # 69 | 70 | # assert that active env is localnet 71 | # we assume that you have set it up, e.g. using `sui genesis` 72 | # we first run 'sui client active-env' which prompts the user to create an 73 | # environment if it doesn't exist yet 74 | echo "Expecting active environment to be localnet at http://localhost:9000" 75 | sui client active-env || exit 1 76 | active_env=$(sui client active-env sui.log 2>&1 & 84 | sui_pid=$! # will be used to kill sui process later 85 | kill_sui_localnet() { 86 | echo "Killing sui node" 87 | kill $sui_pid 88 | } 89 | 90 | # handle Control-C, which is can be useful when running the script interactively 91 | trap 'kill_sui_localnet' INT 92 | 93 | # 94 | # 4. 95 | # 96 | 97 | echo "Waiting for sui to start" 98 | # retry sui client balance 10 times with 2 second delay until it succeeds 99 | # or exit if it fails after 10 retries 100 | max_retries=10 101 | for i in $(seq 1 $max_retries); do 102 | balance=$(sui client balance) 103 | if [ $? -eq 0 ]; then 104 | break 105 | fi 106 | if [ $i -eq $max_retries ]; then 107 | echo "Failed to start sui." 108 | echo "Try 'sui start' and see what might be the issue." 109 | echo "You need 'sui genesis' if you haven't started the localnet yet" 110 | # send exit signal just in case 111 | kill_sui_localnet 112 | exit 1 113 | fi 114 | sleep 2 115 | done 116 | 117 | # kill the sui node if the tests fail 118 | trap 'kill_sui_localnet' ERR 119 | 120 | # 121 | # 5. 122 | # 123 | 124 | echo "Publishing package" 125 | cd $pkg_path 126 | json=$(sui client publish --skip-dependency-verification --json) 127 | if [ $? -ne 0 ]; then 128 | echo "Failed to publish package:" 129 | echo 130 | echo 131 | echo $json 132 | exit 1 133 | fi 134 | 135 | fw_pkg_id=$(echo $json | jq -cr '.objectChanges[] | select(.packageId) | .packageId') 136 | # assert fw_pkg_id starts with 0x as a valid object ID 137 | if [[ ! $fw_pkg_id =~ ^0x ]]; then 138 | echo "Invalid package ID: ${fw_pkg_id}" 139 | exit 1 140 | fi 141 | 142 | # 143 | # 6. 144 | # 145 | 146 | echo "Running E2E tests" 147 | cd $e2e_path 148 | # start with mocked ollama 149 | FW_PKG_ID="${fw_pkg_id}" \ 150 | OLLAMA_HTTP_API="" \ 151 | RUST_LOG="info" \ 152 | cargo run 153 | if [ $? -ne 0 ]; then 154 | echo "E2E tests failed" 155 | exit 1 156 | fi 157 | 158 | # 159 | # 7. 160 | # 161 | 162 | kill_sui_localnet 163 | -------------------------------------------------------------------------------- /e2e_tests/src/prompt.rs: -------------------------------------------------------------------------------- 1 | //! Runs a simple test: sends a prompt to the cluster and waits for the response. 2 | //! The status of the cluster execution is checked every [`SLEEP_BETWEEN_CHECKS_FOR_SUCCESS`]. 3 | //! If the execution is not done after [`MAX_WAIT_FOR_SUCCESS`], the test fails. 4 | //! Being done is defined as having a status [`STATUS_DONE`] and a non-empty `cluster_response`. 5 | 6 | use { 7 | crate::{prelude::*, setup::TestsSetup, SuiJsonValueExt, TestsContext}, 8 | std::{str::FromStr, time::Duration}, 9 | sui_sdk::{ 10 | json::SuiJsonValue, 11 | rpc_types::{SuiObjectDataOptions, SuiParsedData}, 12 | }, 13 | tokio::time::Instant, 14 | }; 15 | 16 | const CLUSTER_EXECUTION_CREATED_EVENT: &str = "ClusterExecutionCreatedEvent"; 17 | const SLEEP_BETWEEN_CHECKS_FOR_SUCCESS: Duration = Duration::from_secs(1); 18 | /// This test will wait after 2 minutes without the execution being done. 19 | const MAX_WAIT_FOR_SUCCESS: Duration = Duration::from_secs(120); 20 | const CLUSTER_MODULE: &str = "cluster"; 21 | const STATUS_DONE: &str = "SUCCESS"; 22 | const CLUSTER_EXECUTE_FUNCTION: &str = "execute"; 23 | 24 | pub async fn send_and_expect_answer( 25 | ctx: &mut TestsContext, 26 | resources: &TestsSetup, 27 | ) -> Result<()> { 28 | let events = ctx 29 | .move_call( 30 | CLUSTER_MODULE, 31 | CLUSTER_EXECUTE_FUNCTION, 32 | vec![ 33 | SuiJsonValue::from_object_id(resources.cluster.id), 34 | SuiJsonValue::from_str_to_string( 35 | "Write a poem about sleep or there lack of", 36 | )?, 37 | ], 38 | ) 39 | .await? 40 | .events 41 | .ok_or_else(|| anyhow!("No events in response"))? 42 | .data; 43 | 44 | // extract the execution ID from the tx response 45 | let Some(execution_id) = events 46 | .into_iter() 47 | .find(|event| { 48 | event.type_.name.to_string() == CLUSTER_EXECUTION_CREATED_EVENT 49 | }) 50 | .map(|event| event.parsed_json["execution"].clone()) 51 | else { 52 | anyhow::bail!( 53 | "No {CLUSTER_EXECUTION_CREATED_EVENT}.execution event in response", 54 | ); 55 | }; 56 | let execution_id = execution_id.as_str().ok_or_else(|| { 57 | anyhow!("{CLUSTER_EXECUTION_CREATED_EVENT}.execution is not a string") 58 | })?; 59 | let execution_id = ObjectID::from_str(execution_id)?; 60 | info!("Sent a new prompt, execution: {execution_id}"); 61 | 62 | wait_for_all_tasks_to_be_done(ctx, execution_id).await?; 63 | 64 | Ok(()) 65 | } 66 | 67 | /// A better approach here would be to wait for the final event of this 68 | /// execution perhaps, but this is simple enough. 69 | async fn wait_for_all_tasks_to_be_done( 70 | ctx: &mut TestsContext, 71 | execution_id: ObjectID, 72 | ) -> Result<()> { 73 | let started_at = Instant::now(); 74 | loop { 75 | let object_data = ctx 76 | .client() 77 | .await? 78 | .read_api() 79 | .get_object_with_options( 80 | execution_id, 81 | SuiObjectDataOptions::full_content(), 82 | ) 83 | .await? 84 | .data 85 | .ok_or_else(|| anyhow!("No data in response for {execution_id}"))?; 86 | 87 | let Some(SuiParsedData::MoveObject(object_data)) = object_data.content 88 | else { 89 | anyhow::bail!("No MoveObject in response for {execution_id}"); 90 | }; 91 | let json = object_data.fields.to_json_value(); 92 | let status = json["status"].as_str().ok_or_else(|| { 93 | anyhow!("No status in response for {execution_id}") 94 | })?; 95 | 96 | if status == STATUS_DONE { 97 | let response = 98 | json["cluster_response"].as_str().ok_or_else(|| { 99 | anyhow!( 100 | "No cluster_response in response for {execution_id}" 101 | ) 102 | })?; 103 | 104 | if response.is_empty() { 105 | anyhow::bail!( 106 | "Prompt {execution_id} is done, but cluster_response is empty.\ 107 | Last execution object state: {json:#?}" 108 | ); 109 | } 110 | 111 | info!("Prompt {execution_id} is done:\n\n\n{response}\n\n\n"); 112 | break; 113 | } else if started_at.elapsed() > MAX_WAIT_FOR_SUCCESS { 114 | anyhow::bail!( 115 | "Prompt {execution_id} is not done after \ 116 | {MAX_WAIT_FOR_SUCCESS:?}. \ 117 | Last execution object state: {json:#?}" 118 | ); 119 | } 120 | 121 | tokio::time::sleep(SLEEP_BETWEEN_CHECKS_FOR_SUCCESS).await; 122 | } 123 | 124 | Ok(()) 125 | } 126 | -------------------------------------------------------------------------------- /examples/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import subprocess 4 | import json 5 | import argparse 6 | from pathlib import Path 7 | from cli_cluster import run_cli_cluster_example 8 | from colorama import init as colorama_init 9 | from ig_post_planner import run_ig_post_planner_example 10 | from trip_planner import run_trip_planner_example 11 | from nexus_sdk import get_sui_client 12 | 13 | # We know that this script is located in the ./examples directory, so we go 14 | # one level up to get the root directory of the repository 15 | repo_root_dir = Path(__file__).resolve().parent.parent 16 | 17 | # Define paths to shared resources 18 | shared_dir = Path(os.getenv("SHARED_DIR", ".")) 19 | keystore_path = Path(shared_dir) / "sui.keystore" 20 | package_id_path = Path(shared_dir) / "package_id.json" 21 | node_details_path = Path(shared_dir) / "node_details.json" 22 | 23 | rpc_url = os.getenv("RPC_URL", "http://localhost:9000") 24 | ws_url = os.getenv("WS_URL", "ws://localhost:9000") 25 | 26 | # Maps example name to a function that runs it. 27 | # In essence, this is the source of truth for supported examples. 28 | EXAMPLES = { 29 | "trip_planner": run_trip_planner_example, 30 | "ig_post_planner": run_ig_post_planner_example, 31 | "cli_cluster": run_cli_cluster_example, 32 | } 33 | 34 | 35 | # Runs given command and returns the output. 36 | def run_command(command, cwd=None): 37 | result = subprocess.run( 38 | command, cwd=cwd, shell=True, capture_output=True, text=True 39 | ) 40 | if result.returncode != 0: 41 | print(f"Error executing command: {command}") 42 | print(f"Error output: {result.stdout}\n\n{result.stderr}") 43 | raise Exception(f"Command failed: {command}") 44 | return result.stdout 45 | 46 | 47 | def load_configuration(): 48 | """Load the required configuration from predefined paths.""" 49 | # Load package ID 50 | try: 51 | with open(package_id_path, "r") as f: 52 | package_id = json.load(f)[0] 53 | except (FileNotFoundError, IndexError, json.JSONDecodeError) as e: 54 | print(f"Error: Unable to load package ID from {package_id_path}. Details: {e}") 55 | sys.exit(1) 56 | 57 | # Load node details 58 | try: 59 | with open(node_details_path, "r") as f: 60 | node_details = json.load(f) 61 | llama_id = node_details.get("llama_id") 62 | llama_owner_cap_id = node_details.get("llama_owner_cap_id") 63 | except (FileNotFoundError, json.JSONDecodeError) as e: 64 | print( 65 | f"Error: Unable to load node details from {node_details_path}. Details: {e}" 66 | ) 67 | sys.exit(1) 68 | 69 | if not llama_id or not llama_owner_cap_id: 70 | print("Error: Llama ID or Llama Owner Capability ID is missing.") 71 | sys.exit(1) 72 | 73 | # Load SUI private key from keystore JSON 74 | try: 75 | with open(keystore_path, "r") as f: 76 | keys = json.load(f) 77 | if not keys: 78 | raise ValueError( 79 | "Sui keystore file is empty. Please check your Sui configuration." 80 | ) 81 | private_key = keys[0] # Assuming the first key is used 82 | except (FileNotFoundError, json.JSONDecodeError, ValueError) as e: 83 | print( 84 | f"Error: Unable to load SUI private key from {keystore_path}. Details: {e}" 85 | ) 86 | sys.exit(1) 87 | 88 | return package_id, llama_id, llama_owner_cap_id, private_key 89 | 90 | 91 | def main(): 92 | colorama_init() 93 | 94 | # Argument parsing 95 | parser = argparse.ArgumentParser( 96 | description="Run a specific example with Sui client." 97 | ) 98 | parser.add_argument( 99 | "example_name", 100 | help="The name of the example to run. Available examples: trip_planner, ig_post_planner, cli_cluster", 101 | ) 102 | args = parser.parse_args() 103 | 104 | # Validate the example name 105 | example_name = args.example_name 106 | if example_name not in EXAMPLES: 107 | raise ValueError( 108 | f"Unknown example name: {example_name}. Available examples: {list(EXAMPLES.keys())}" 109 | ) 110 | 111 | # Load configuration from known paths 112 | package_id, llama_id, llama_owner_cap_id, private_key = load_configuration() 113 | # Create the Sui client 114 | 115 | client = get_sui_client(private_key, rpc_url=rpc_url, ws_url=ws_url) 116 | # Run the selected example 117 | try: 118 | print(f"\nRunning example: {example_name}\n") 119 | EXAMPLES[example_name](client, package_id, llama_id, llama_owner_cap_id) 120 | print(f"\nExample {example_name} finished successfully.") 121 | except Exception as e: 122 | print(f"Failed to run example {example_name}: {e}") 123 | 124 | 125 | if __name__ == "__main__": 126 | main() 127 | -------------------------------------------------------------------------------- /docker/sui/genesis/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Mysten Labs, Inc. 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | 6 | import argparse 7 | import re 8 | import os.path 9 | import subprocess 10 | import sys 11 | import typing 12 | import yaml 13 | 14 | import hiyapyco # https://pypi.org/project/HiYaPyCo/ 15 | 16 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 17 | 18 | # Use the base directory to construct absolute paths 19 | _COMMON_OVERLAY_PATH = os.path.join(BASE_DIR, "overlays", "common.yaml") 20 | 21 | 22 | def parse_overlays(overlay_type: str) -> str: 23 | overlays: str = "" 24 | 25 | with open(_COMMON_OVERLAY_PATH, "r") as f: 26 | common_overlays = yaml.safe_load(f)[overlay_type] 27 | overlays = yaml.safe_dump(common_overlays) 28 | overlays = hiyapyco.load([overlays]) 29 | 30 | return hiyapyco.dump(overlays) 31 | 32 | 33 | def get_network_addresses(genesis_config: typing.Dict) -> typing.List: 34 | network_adr_pattern = ( 35 | r"/(?Pdns|ip4|ip6|unix)/(?P
[^/]*)(/udp|/tcp)?/(?P\d+)?" 36 | ) 37 | network_addresses = [] 38 | for validator in genesis_config.get("validator_config_info"): 39 | match = re.search(network_adr_pattern, validator["network_address"]) 40 | network_addresses.append(f'{match.group("address")}-{match.group("port")}.yaml') 41 | return network_addresses 42 | 43 | 44 | def set_validator_name(genesis_config: typing.Dict) -> typing.Dict: 45 | network_adr_pattern = ( 46 | r"/(?Pdns|ip4|ip6|unix)/(?P
[^/]*)(/udp|/tcp)?/(?P\d+)?" 47 | ) 48 | for validator in genesis_config.get("validator_config_info"): 49 | match = re.search(network_adr_pattern, validator["network_address"]) 50 | validator["name"] = match.group("address") 51 | return genesis_config 52 | 53 | 54 | def main(args: argparse.ArgumentParser) -> None: 55 | # create target directory if it doesn't exist 56 | _ = subprocess.run(["mkdir", "-p", "z", f"{args.target_directory}"], check=True) 57 | 58 | # load genesis template 59 | with open(args.genesis_template, "r") as f: 60 | genesis_config = yaml.safe_load(f) 61 | 62 | validator_network_addresses = get_network_addresses(genesis_config) 63 | 64 | # set the validator name based on their address 65 | genesis_config = set_validator_name(genesis_config) 66 | 67 | # write genesis configuration file 68 | with open(f"{args.target_directory}/genesis.yaml", "w") as f: 69 | f.write(yaml.safe_dump(genesis_config)) 70 | 71 | # run genesis with newly created genesis configuration file 72 | _ = subprocess.run( 73 | [ 74 | "sui", 75 | "genesis", 76 | "--from-config", 77 | f"{args.target_directory}/genesis.yaml", 78 | "--working-dir", 79 | "z", 80 | "-f", 81 | ], 82 | # this should be inherited from the parent process, but just in case 83 | env=os.environ, 84 | check=True, 85 | ) 86 | 87 | # parse validator overlays 88 | overlays = parse_overlays("validator") 89 | 90 | # process validator overlays 91 | for validator in validator_network_addresses: 92 | with open(f"z/{validator}", "r") as f: 93 | validator_config = f.read() 94 | 95 | merged_yaml = hiyapyco.load( 96 | [validator_config, overlays], method=hiyapyco.METHOD_MERGE 97 | ) 98 | merged_yaml = hiyapyco.dump(merged_yaml) 99 | 100 | with open(f"{args.target_directory}/{validator}", "w") as f: 101 | f.write(merged_yaml) 102 | 103 | # move other required files to target 104 | subprocess.run(["mv", "z/genesis.blob", f"{args.target_directory}/"], check=True) 105 | 106 | _ = subprocess.run(["rm", "-rf", "z"], check=True) 107 | 108 | 109 | if __name__ == "__main__": 110 | parser = argparse.ArgumentParser(description=__doc__) 111 | parser.add_argument( 112 | "-g", 113 | "--genesis-template", 114 | type=str, 115 | help="template to use for genesis.yaml generation", 116 | required=False, 117 | ) 118 | parser.add_argument( 119 | "-t", 120 | "--target-directory", 121 | type=str, 122 | help="target directory for generated genesis and configuration files", 123 | default="target", 124 | required=False, 125 | ) 126 | parser.add_argument( 127 | "-o", 128 | "--override-generation", 129 | type=str, 130 | help="do not generate and use override configuration instead", 131 | required=False, 132 | ) 133 | parser.add_argument( 134 | "-p", 135 | "--protocol-config-override", 136 | type=str, 137 | help="protocol config override to set", 138 | required=False, 139 | ) 140 | args = parser.parse_args() 141 | 142 | # exit if configuration already exists 143 | if os.path.exists(f"{args.target_directory}/genesis.blob"): 144 | print("configuration already exists, not generating") 145 | sys.exit(0) 146 | 147 | main(args) 148 | -------------------------------------------------------------------------------- /.github/workflows/talus-agentic-framework.yml: -------------------------------------------------------------------------------- 1 | # Github workflow to build and test the Talus Agentic Framework project 2 | 3 | name: Talus Agentic Framework 4 | on: 5 | pull_request: 6 | 7 | push: 8 | paths: 9 | - "onchain/**" 10 | - "e2e_tests/**" 11 | env: 12 | # defines what Sui version to install from the Sui's Github release page 13 | # https://github.com/MystenLabs/sui/releases 14 | SUI_REF: testnet-v1.26.1 15 | 16 | jobs: 17 | # 1. Get Sui CLI 18 | # 2. Builds and tests talus framework package 19 | build-agentic-framework: 20 | name: (Move) Build Agentic Framework 21 | runs-on: ubuntu-latest 22 | steps: 23 | - name: Check out repository code 24 | uses: actions/checkout@v4 25 | 26 | # 1. 27 | - name: Fetch Sui CLI 28 | uses: ./.github/actions/fetch-sui-cli 29 | with: 30 | sui_ref: ${{ env.SUI_REF }} 31 | 32 | # 2. 33 | - run: sui move build -p onchain 34 | - run: sui move test -p onchain 35 | 36 | # We use nightly for formatting only because lots of nice format rules are 37 | # not available in stable Rust yet. 38 | # 39 | # 1. Get nightly Rust toolchain 40 | # 2. Check Rust formatting 41 | check-e2e-tests-fmt: 42 | name: (Rust) Check Formatting 43 | runs-on: ubuntu-latest 44 | steps: 45 | - name: Check out repository code 46 | uses: actions/checkout@v4 47 | 48 | # 1. 49 | - name: Set up Rust 50 | uses: actions-rs/toolchain@v1 51 | with: 52 | toolchain: nightly 53 | profile: minimal 54 | override: true 55 | components: rustfmt 56 | 57 | # 2. 58 | - run: cd e2e_tests && cargo fmt -- --check 59 | 60 | # 1. Get stable Rust toolchain 61 | # 2. Set up caching 62 | # 3. Build and check Rust binary 63 | # 4. Upload Rust binary as artifact 64 | build-e2e-tests: 65 | name: (Rust) Build E2E Tests 66 | runs-on: ubuntu-latest 67 | steps: 68 | - name: Check out repository code 69 | uses: actions/checkout@v4 70 | 71 | # 1. 72 | - name: Set up Rust 73 | uses: actions-rs/toolchain@v1 74 | with: 75 | toolchain: stable 76 | profile: minimal 77 | override: true 78 | components: clippy 79 | 80 | # 2. 81 | - name: Cache Rust dependencies 82 | uses: actions/cache@v3 83 | with: 84 | path: | 85 | ~/.cargo/bin/ 86 | ~/.cargo/registry/index/ 87 | ~/.cargo/registry/cache/ 88 | ~/.cargo/git/db/ 89 | e2e_tests/target/ 90 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} 91 | restore-keys: | 92 | ${{ runner.os }}-cargo- 93 | 94 | # 3. 95 | - run: cd e2e_tests && cargo build 96 | - run: cd e2e_tests && cargo clippy -- -D warnings 97 | 98 | # 4. 99 | - name: Upload Rust binary 100 | uses: actions/upload-artifact@v3 101 | with: 102 | name: e2e-tests-binary # ARTIFACT NAME 103 | path: e2e_tests/target/debug/e2e_tests_bin # FROM THIS PATH 104 | retention-days: 1 # we only need this for the next job 105 | 106 | # 1. Get necessary files: code, Sui CLI, Rust binary. 107 | # The Ollama APIs are mocked in the Rust e2e binary 108 | # 2. Start Sui Localnet as a bg process with a fresh genesis and localnet wallet 109 | # 3. Deploy Talus Pkg and export FW_PKG_ID env variable 110 | # 4. Run E2E Tests binary with appropriate env variables 111 | # 5. Shutdown the localnet to clean up 112 | run-e2e-tests: 113 | name: Run E2E Tests 114 | runs-on: ubuntu-latest 115 | needs: [build-agentic-framework, build-e2e-tests] 116 | steps: 117 | # 1. 118 | - name: Check out repository code 119 | uses: actions/checkout@v4 120 | - name: Fetch Sui CLI 121 | uses: ./.github/actions/fetch-sui-cli 122 | with: 123 | sui_ref: ${{ env.SUI_REF }} 124 | - name: Download Rust binary 125 | uses: actions/download-artifact@v3 126 | with: 127 | name: e2e-tests-binary 128 | 129 | # 2. 130 | - name: Start Sui Localnet 131 | run: | 132 | sui genesis -f 133 | nohup sui start & 134 | echo $! > sui-localnet.pid & 135 | sleep 5 136 | shell: bash 137 | 138 | # 3. 139 | - name: Deploy Talus Pkg and export FW_PKG_ID 140 | run: | 141 | cd onchain 142 | json=$(sui client publish --skip-dependency-verification --json) 143 | 144 | fw_pkg_id=$(echo $json | jq -cr '.objectChanges[] | select(.packageId) | .packageId') 145 | if [ -z "$fw_pkg_id" ]; then 146 | echo "Cannot get pkg ID from JSON \n\n${json}" 147 | else 148 | echo "Talus framework package ID: $fw_pkg_id" 149 | fi 150 | 151 | echo "FW_PKG_ID=$(echo $fw_pkg_id)" >> $GITHUB_ENV 152 | 153 | # 4. 154 | - name: Run E2E Tests binary 155 | run: | 156 | export SUI_WALLET_PATH=~/.sui/sui_config/client.yaml 157 | export RUST_LOG=info,e2e_tests=debug 158 | chmod +x e2e_tests_bin 159 | ./e2e_tests_bin 160 | 161 | # 5. 162 | - name: Shutdown Sui Localnet 163 | run: | 164 | kill $(cat sui-localnet.pid) 165 | shell: bash 166 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nexus 2 | 3 | > [!IMPORTANT] 4 | > **For the upcoming version of nexus, check [nexus-sdk][nexus-sdk] and our [docs][docs].** 5 | > For updates on our latest work, follow our [blog][blog]. 6 | 7 | > [!CAUTION] 8 | > 9 | > This repository is no longer actively maintained. The maintainers are currently working on an updated and improved architecture, and as such, this codebase should only be used for **educational purposes** or reference. You may explore the code to learn from past implementations. 10 | 11 | ## Overview 12 | 13 | Nexus is the onchain Agentic Framework created by [Talus][talus]. 14 | The current version, provided here, is a sneak preview of our first iteration. There are 15 | [many ideas](#what-remains-to-be-done) to develop it further but in it's current state it still 16 | showcases what an onchain agentic framework is and it works end-to-end. For more details about 17 | Talus, please visit our [blog][blog] and read our [litepaper][litepaper]. 18 | 19 | - [Nexus](#nexus) 20 | - [Architecture](#architecture) 21 | - [What is provided](#what-is-provided) 22 | - [Looking forward](#looking-forward) 23 | - [Contributing](#contributing) 24 | - [References](#references) 25 | - [Acknowledgments](#acknowledgments) 26 | - [License](#license) 27 | 28 | ## Architecture 29 | 30 | Nexus is built on [Sui Move][sui_move], and consists of several key components: 31 | 32 | 1. **Onchain logic**: The onchain core logic of Nexus is implemented in Sui Move smart 33 | contracts in folder [`onchain`][onchain]. 34 | 2. **SDK**: A Python SDK, in folder [`nexus_sdk`][nexus_sdk], which provides easy-to-use functions for setting up agents, interacting 35 | with the smart contracts, and running agentic workflows. 36 | 3. **Offchain components**: Services that handle LLM inference and anything that runs offchain, 37 | including supported tools. For more details, please see [`offchain`][offchain], where we 38 | define two Python packages named `nexus_events` and `nexus_tools`. 39 | 40 | The high-level architecture is shown in the following diagram: 41 | 42 | ```mermaid 43 | graph TD 44 | subgraph offchain["Offchain components"] 45 | subgraph nexus_tools["nexus_tools"] 46 | LLM_Inference["LLM Inference"] 47 | Tools 48 | end 49 | nexus_events 50 | end 51 | 52 | style offchain stroke-dasharray: 5 5, stroke-width:2px; 53 | 54 | subgraph onchain[""] 55 | nexus_contracts["Nexus Contracts
(folder onchain)"] 56 | blockchain["Blockchain"] 57 | end 58 | 59 | style onchain stroke-dasharray: 5 5, stroke-width:2px; 60 | 61 | subgraph agent["Example Agent"] 62 | agent_instructions["Instructions"] 63 | agent_ui["UI"] 64 | end 65 | 66 | style agent stroke-dasharray: 5 5, stroke-width:2px; 67 | 68 | nexus_sdk["nexus_sdk"] 69 | 70 | nexus_events --> nexus_tools 71 | nexus_events --> blockchain 72 | nexus_contracts --> blockchain 73 | nexus_sdk --> onchain 74 | 75 | agent --> nexus_sdk 76 | ``` 77 | 78 | ## What is provided 79 | 80 | - Nexus, an onchain agentic framework, made of the components described above. 81 | - [Examples][examples] of agents implemented with Nexus. 82 | - Complete instructions on how to setup a full environment, including the blockchain, smart 83 | contracts, and the offchain components. 84 | 85 | ## Looking forward 86 | 87 | Our first iteration had focused on feasibility, and as an aid in exploring the design space. You 88 | can build agents that work end-to-end. Here are some thoughts, which also give you an idea of 89 | some of the things we are actively working on: 90 | 91 | - Develop and experiment with novel **pricing/payment mechanisms**. 92 | - Implement a **slashing mechanism** to penalize misbehaving nodes and maintain network integrity. 93 | - Expand support to include **multiple modalities** beyond LLMs. 94 | - Enable **customization of tool parameters** by both users and agents for greater flexibility. 95 | - Introduce better **error handling** for agent responses to improve reliability. 96 | - Implement **parallel execution** capabilities to enhance task processing efficiency. 97 | - Develop support for **advanced task flow features**, such as loops and backtracking, to handle more complex workflows. 98 | - Provide **offchain storage** options to reduce on-chain data storage needs. 99 | - Introduce **privacy features** to allow for confidential or private data handling. 100 | 101 | Stay tuned ! 102 | 103 | ## Contributing 104 | 105 | If you find and issue setting up and running Nexus, which is not covered by our documentation, 106 | please open a [ticket][bugs] _and_ add the [`external`][label_external] label. 107 | 108 | ## References 109 | 110 | - Talus [site][talus]. 111 | - Talus [blog][blog]. 112 | - Talus [litepaper][litepaper]. 113 | 114 | ## Acknowledgments 115 | 116 | In designing this version of Nexus, we have taken inspiration from [crewAI][crewAI]. Concepts 117 | like 'Tool', 'Task' etc come from there. We also use crewAI tools in the implementation. 118 | 119 | ## License 120 | 121 | - The Nexus agentic framework (in [`onchain`][onchain] and [`offchain`][offchain]), is 122 | licensed under [BSL 1.1][Nexus_License]. 123 | - The [Nexus SDK][nexus_sdk], is licensed under [Apache 2.0][SDK_License]. 124 | - [Examples][examples] are licensed under [Apache 2.0][Examples_License]. 125 | 126 | 127 | 128 | [talus]: https://talus.network/ 129 | [docs]: https://docs.talus.network/ 130 | [nexus-sdk]: https://github.com/Talus-Network/nexus-sdk 131 | [blog]: https://blog.talus.network/ 132 | [litepaper]: https://talus.network/litepaper.pdf 133 | [crewAI]: https://github.com/crewAIInc/crewAI 134 | [sui_move]: https://docs.sui.io/concepts/sui-move-concepts 135 | [onchain]: ./onchain/ 136 | [offchain]: ./offchain/ 137 | [nexus_sdk]: ./nexus_sdk/ 138 | [examples]: ./examples/ 139 | [bugs]: https://github.com/Talus-Network/nexus/issues 140 | [label_external]: https://github.com/Talus-Network/nexus/labels/external 141 | [Nexus_License]: ./LICENSE 142 | [SDK_License]: ./nexus_sdk/LICENSE 143 | [Examples_License]: ./examples/LICENSE 144 | -------------------------------------------------------------------------------- /offchain/tools/tests/test_tool.py: -------------------------------------------------------------------------------- 1 | """ 2 | tests for /tool/use route in main.py 3 | To run, execute "pytest tests/test_tool.py" from `tools` directory 4 | """ 5 | 6 | from fastapi.testclient import TestClient 7 | from ..server.main import app 8 | import pytest 9 | import os 10 | 11 | client = TestClient(app) 12 | 13 | 14 | def test_gpt4_vision_tool(): 15 | response = client.post( 16 | "/tool/use", 17 | json={ 18 | "tool_name": "gpt4_vision", 19 | "args": { 20 | "image_url": "https://i.imgur.com/Rr1jAAn.jpeg", 21 | "prompt": "Describe this image", 22 | }, 23 | }, 24 | ) 25 | print(f"Response content: {response.json()}") 26 | assert response.status_code == 200 27 | assert "result" in response.json() 28 | 29 | 30 | def test_dalle3_tool(): 31 | response = client.post( 32 | "/tool/use", 33 | json={"tool_name": "dalle3", "args": {"prompt": "A futuristic city"}}, 34 | ) 35 | print(f"Response content: {response.json()}") 36 | assert response.status_code == 200 37 | assert "result" in response.json() 38 | 39 | 40 | def test_openai_embeddings_tool(): 41 | response = client.post( 42 | "/tool/use", 43 | json={"tool_name": "openai_embeddings", "args": {"text": "Test embedding"}}, 44 | ) 45 | print(f"Response content: {response.json()}") 46 | assert response.status_code == 200 47 | assert "result" in response.json() 48 | 49 | 50 | def test_search_tool(): 51 | response = client.post( 52 | "/tool/use", 53 | json={ 54 | "tool_name": "search", 55 | "args": {"query": "FastAPI tutorial", "num_results": "5"}, 56 | }, 57 | ) 58 | print(f"Response content: {response.json()}") 59 | assert response.status_code == 200 60 | assert "result" in response.json() 61 | 62 | 63 | def test_wikipedia_tool(): 64 | response = client.post( 65 | "/tool/use", 66 | json={ 67 | "tool_name": "wikipedia", 68 | "args": {"query": "Python programming language"}, 69 | }, 70 | ) 71 | print(f"Response content: {response.json()}") 72 | assert response.status_code == 200 73 | assert "result" in response.json() 74 | 75 | 76 | def test_arxiv_tool(): 77 | response = client.post( 78 | "/tool/use", json={"tool_name": "arxiv", "args": {"query": "quantum computing"}} 79 | ) 80 | print(f"Response content: {response.json()}") 81 | assert response.status_code == 200 82 | assert "result" in response.json() 83 | 84 | 85 | def test_pubmed_tool(): 86 | response = client.post( 87 | "/tool/use", json={"tool_name": "pubmed", "args": {"query": "COVID-19 vaccine"}} 88 | ) 89 | print(f"Response content: {response.json()}") 90 | assert response.status_code == 200 91 | assert "result" in response.json() 92 | 93 | 94 | def test_scene_explain_tool(): 95 | response = client.post( 96 | "/tool/use", 97 | json={ 98 | "tool_name": "scene_explain", 99 | "args": {"image_url": "https://example.com/image.jpg"}, 100 | }, 101 | ) 102 | print(f"Response content: {response.json()}") 103 | assert response.status_code == 200 104 | assert "result" in response.json() 105 | 106 | 107 | def test_shell_tool(): 108 | response = client.post( 109 | "/tool/use", 110 | json={"tool_name": "shell", "args": {"command": "echo 'Hello, World!'"}}, 111 | ) 112 | print(f"Response content: {response.json()}") 113 | assert response.status_code == 200 114 | assert "result" in response.json() 115 | 116 | 117 | def test_tavily_search_tool(): 118 | response = client.post( 119 | "/tool/use", 120 | json={ 121 | "tool_name": "tavily_search", 122 | "args": {"query": "latest AI breakthroughs"}, 123 | }, 124 | ) 125 | print(f"Response content: {response.json()}") 126 | assert response.status_code == 200 127 | assert "result" in response.json() 128 | 129 | 130 | def test_python_repl_tool(): 131 | response = client.post( 132 | "/tool/use", 133 | json={"tool_name": "python_repl", "args": {"code": "print('Hello, World!')"}}, 134 | ) 135 | print(f"Response content: {response.json()}") 136 | assert response.status_code == 200 137 | assert "result" in response.json() 138 | 139 | 140 | def test_read_file_tool(): 141 | # Create a temporary file for testing 142 | with open("test_file.txt", "w") as f: 143 | f.write("Test content") 144 | 145 | response = client.post( 146 | "/tool/use", 147 | json={"tool_name": "read_file", "args": {"file_path": "test_file.txt"}}, 148 | ) 149 | print(f"Response content: {response.json()}") 150 | assert response.status_code == 200 151 | assert "result" in response.json() 152 | 153 | # Clean up the temporary file 154 | os.remove("test_file.txt") 155 | 156 | 157 | def test_list_directory_tool(): 158 | response = client.post( 159 | "/tool/use", 160 | json={"tool_name": "list_directory", "args": {"directory_path": "."}}, 161 | ) 162 | print(f"Response content: {response.json()}") 163 | assert response.status_code == 200 164 | assert "result" in response.json() 165 | 166 | 167 | def test_invalid_tool(): 168 | response = client.post( 169 | "/tool/use", 170 | json={ 171 | "tool_name": "invalid_tool", 172 | "args": {"query": "test query", "num_results": "5"}, 173 | }, 174 | ) 175 | print(f"Response status: {response.status_code}") 176 | print(f"Response content: {response.json()}") 177 | print(f"Response headers: {response.headers}") 178 | assert ( 179 | response.status_code == 400 180 | ), f"Expected 400, got {response.status_code}. Response: {response.json()}" 181 | 182 | 183 | def test_invalid_args(): 184 | response = client.post( 185 | "/tool/use", json={"tool_name": "search", "args": {"invalid_arg": "value"}} 186 | ) 187 | assert response.status_code == 422 # Validation error 188 | -------------------------------------------------------------------------------- /offchain/tools/tests/test_agent.py: -------------------------------------------------------------------------------- 1 | # test_agent.py 2 | 3 | from fastapi.testclient import TestClient 4 | from unittest.mock import patch, MagicMock 5 | import pytest 6 | from src.server.main import app 7 | from src.server.models import CreateAgentRequest 8 | 9 | client = TestClient(app) 10 | 11 | 12 | @pytest.mark.unit 13 | def test_create_agents_and_tasks(): 14 | request_company_descriptiondata = { 15 | "desciption": "A tech company specializing in AI solutions.", 16 | "company_domain": "ai-tech.com", 17 | "hiring_needs": "Senior AI Engineer", 18 | "specific_benefits": "Remote work, flexible hours, stock options", 19 | "agents": [ 20 | { 21 | "role": "Research Analyst", 22 | "goal": "Analyze the company website and provided description to extract insights on culture, values, and specific needs.", 23 | "backstory": "Expert in analyzing company cultures and identifying key values and needs from various sources, including websites and brief descriptions.", 24 | "tools": [ 25 | { 26 | "name": "WebsiteSearchTool", 27 | "description": "Tool for searching websites", 28 | }, 29 | { 30 | "name": "SeperDevTool", 31 | "description": "Development tool for data separation", 32 | }, 33 | ], 34 | }, 35 | { 36 | "role": "Job Description Writer", 37 | "goal": "Use insights from the Research Analyst to create a detailed, engaging, and enticing job posting.", 38 | "backstory": "Skilled in crafting compelling job descriptions that resonate with the company's values and attract the right candidates.", 39 | "tools": [ 40 | { 41 | "name": "WebsiteSearchTool", 42 | "description": "Tool for searching websites", 43 | }, 44 | { 45 | "name": "SeperDevTool", 46 | "description": "Development tool for data separation", 47 | }, 48 | {"name": "FileReadTool", "description": "Tool for reading files"}, 49 | ], 50 | }, 51 | { 52 | "role": "Review and Editing Specialist", 53 | "goal": "Review the job posting for clarity, engagement, grammatical accuracy, and alignment with company values and refine it to ensure perfection.", 54 | "backstory": "A meticulous editor with an eye for detail, ensuring every piece of content is clear, engaging, and grammatically perfect.", 55 | "tools": [ 56 | { 57 | "name": "WebsiteSearchTool", 58 | "description": "Tool for searching websites", 59 | }, 60 | { 61 | "name": "SeperDevTool", 62 | "description": "Development tool for data separation", 63 | }, 64 | {"name": "FileReadTool", "description": "Tool for reading files"}, 65 | ], 66 | }, 67 | ], 68 | "tasks": [ 69 | { 70 | "description": "Analyze the provided company website and the hiring manager's company's domain ai-tech.com, description: \"A tech company specializing in AI solutions.\". Focus on understanding the company's culture, values, and mission. Identify unique selling points and specific projects or achievements highlighted on the site. Compile a report summarizing these insights, specifically how they can be leveraged in a job posting to attract the right candidates.", 71 | "expected_output": "A comprehensive report detailing the company's culture, values, and mission, along with specific selling points relevant to the job role. Suggestions on incorporating these insights into the job posting should be included.", 72 | "agent_role": "Research Analyst", 73 | }, 74 | { 75 | "description": 'Draft a job posting for the role described by the hiring manager: "Senior AI Engineer". Use the insights on "A tech company specializing in AI solutions." to start with a compelling introduction, followed by a detailed role description, responsibilities, and required skills and qualifications. Ensure the tone aligns with the company\'s culture and incorporate any unique benefits or opportunities offered by the company. Specific benefits: "Remote work, flexible hours, stock options"', 76 | "expected_output": "A detailed, engaging job posting that includes an introduction, role description, responsibilities, requirements, and unique company benefits. The tone should resonate with the company's culture and values, aimed at attracting the right candidates.", 77 | "agent_role": "Job Description Writer", 78 | }, 79 | { 80 | "description": "Review the draft job posting for the role: \"Senior AI Engineer\". Check for clarity, engagement, grammatical accuracy, and alignment with the company's culture and values. Edit and refine the content, ensuring it speaks directly to the desired candidates and accurately reflects the role's unique benefits and opportunities. Provide feedback for any necessary revisions.", 81 | "expected_output": "A polished, error-free job posting that is clear, engaging, and perfectly aligned with the company's culture and values. Feedback on potential improvements and final approval for publishing. Formatted in markdown.", 82 | "agent_role": "Review and Editing Specialist", 83 | }, 84 | ], 85 | } 86 | 87 | response = client.post("/agent", json=request_data) 88 | assert response.status_code == 200 89 | data = response.json() 90 | assert data["message"] == "Job Posting Creation Process Completed" 91 | assert "result" in data 92 | 93 | 94 | @pytest.mark.unit 95 | def test_run_agent_process(): 96 | # Assuming you have an endpoint to run the agent process 97 | response = client.post("/agent/run") 98 | assert response.status_code == 200 99 | data = response.json() 100 | assert "message" in data 101 | assert "result" in data 102 | assert data["message"] == "Agent process started successfully" 103 | -------------------------------------------------------------------------------- /onchain/sources/task.move: -------------------------------------------------------------------------------- 1 | module talus::task { 2 | //! A task represents units of work within the Cluster's execution. 3 | //! It's always bound to a specific agent that is supposed to work on it. 4 | 5 | use std::string::{Self, String}; 6 | use talus::agent::AgentName; 7 | use talus::consts::{status_idle, status_running, status_success}; 8 | use talus::tool::Tool; 9 | 10 | // === Data models === 11 | 12 | /// Defines specifically what's the agent supposed to do. 13 | public struct TaskBlueprint has store, copy, drop { 14 | /// Tasks are identified by their name. 15 | /// This implies that task name must be unique within a single 16 | /// [`talus::cluster::Cluster`]. 17 | name: TaskName, 18 | /// Which agent is responsible for running this task to completion. 19 | /// This agent must exist within the same [`talus::cluster::Cluster`] as this 20 | /// task. 21 | agent: AgentName, 22 | description: String, 23 | expected_output: String, 24 | prompt: String, 25 | context: String, 26 | /// If provided then the node will execute this tool and use the result 27 | /// to run an inference using the prompt. 28 | /// The LLM output is then uploaded as the response for this task. 29 | tool: Option, 30 | } 31 | 32 | /// Puts a task into a concrete situation. 33 | public struct TaskState has store { 34 | /// You can find the information about this task by searching the Cluster's 35 | /// tasks by name. 36 | name: TaskName, 37 | agent_name: AgentName, 38 | /// TBD: This is used to build context but it's never changed from its 39 | /// initial value of empty string. 40 | input_context: String, 41 | /// Enumeration of 42 | /// - `StatusIdle` 43 | /// - `StatusRunning` 44 | /// - `StatusSuccess` 45 | /// 46 | /// We use string constants to be more friendly to explorers. 47 | status: String, 48 | prompt: Option, 49 | response: String, 50 | } 51 | 52 | /// Task name serves as an identifier for a task. 53 | public struct TaskName has store, copy, drop { 54 | inner: String, 55 | } 56 | 57 | // === Constructors === 58 | 59 | /// Returns a new instance of a [`TaskBlueprint`]. 60 | public fun new( 61 | name: TaskName, 62 | agent: AgentName, 63 | description: String, 64 | expected_output: String, 65 | prompt: String, 66 | context: String, 67 | ): TaskBlueprint { 68 | TaskBlueprint { 69 | name, 70 | description, 71 | expected_output, 72 | agent, 73 | prompt, 74 | context, 75 | tool: option::none(), 76 | } 77 | } 78 | 79 | /// Returns a new instance of a [`TaskBlueprint`] 80 | /// with a tool attached. 81 | public fun new_with_tool( 82 | name: TaskName, 83 | agent: AgentName, 84 | description: String, 85 | expected_output: String, 86 | prompt: String, 87 | context: String, 88 | tool: Tool, 89 | ): TaskBlueprint { 90 | TaskBlueprint { 91 | name, 92 | description, 93 | expected_output, 94 | agent, 95 | prompt, 96 | context, 97 | tool: option::some(tool), 98 | } 99 | } 100 | 101 | public fun new_state( 102 | name: TaskName, 103 | agent_name: AgentName, 104 | ): TaskState { 105 | TaskState { 106 | name, 107 | agent_name, 108 | input_context: string::utf8(b""), 109 | status: status_idle(), 110 | prompt: option::none(), 111 | response: string::utf8(b""), 112 | } 113 | } 114 | 115 | /// Create a new instance of a [`TaskName`] from given string. 116 | /// Name serves as an identifier. 117 | public fun into_name(s: String): TaskName { 118 | TaskName { inner: s } 119 | } 120 | 121 | /// Convert a [`TaskName`] into a string. 122 | public fun into_string(name: TaskName): String { 123 | name.inner 124 | } 125 | 126 | // === State management === 127 | 128 | public fun attach_tool(self: &mut TaskBlueprint, tool: Tool) { 129 | self.tool = option::some(tool); 130 | } 131 | 132 | // === Accessors === 133 | 134 | public fun get_agent_name(self: &TaskBlueprint): AgentName { self.agent } 135 | public fun get_context(self: &TaskBlueprint): String { self.context } 136 | public fun get_description(self: &TaskBlueprint): String { self.description } 137 | public fun get_expected_output(self: &TaskBlueprint): String { self.expected_output } 138 | public fun get_name(self: &TaskBlueprint): TaskName { self.name } 139 | public fun get_prompt(self: &TaskBlueprint): String { self.prompt } 140 | public fun get_tool(self: &TaskBlueprint): Option { self.tool } 141 | 142 | public fun get_state_agent_name(self: &TaskState): AgentName { self.agent_name } 143 | public fun get_state_input_context(self: &TaskState): String { self.input_context } 144 | public fun get_state_output_bytes(self: &TaskState): vector { *string::bytes(&self.response) } 145 | public fun get_state_status(self: &TaskState): String { self.status } 146 | public fun is_idle(self: &TaskState): bool { self.status == status_idle() } 147 | public fun is_running(self: &TaskState): bool { self.status == status_running() } 148 | public fun is_successful(self: &TaskState): bool { self.status == status_success() } 149 | 150 | // === Package protected === 151 | 152 | public(package) fun set_state_status(self: &mut TaskState, status: String) { self.status = status; } 153 | public(package) fun set_state_response(self: &mut TaskState, response: String) { self.response = response; } 154 | public(package) fun set_state_prompt(self: &mut TaskState, prompt: ID) { self.prompt = option::some(prompt); } 155 | 156 | // === Tests === 157 | 158 | #[test_only] 159 | public fun create_test_state(agent: AgentName): TaskState { 160 | TaskState { 161 | agent_name: agent, 162 | name: into_name(string::utf8(b"Write Talus Poem")), 163 | input_context: string::utf8(b"Talus is a decentralized network focusing on AI and blockchain"), 164 | status: status_idle(), 165 | prompt: option::none(), 166 | response: string::utf8(b""), 167 | } 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /e2e_tests/src/completion.rs: -------------------------------------------------------------------------------- 1 | //! We listen to emitted events named [`REQ_FOR_COMPLETION_EVENT`] in the [`PROMPT_MODULE`]. 2 | //! Then we run the LLM with the given input read from the event and wait for its completion. 3 | //! Finally, we submit the completion result to the chain. 4 | 5 | use { 6 | crate::{prelude::*, setup::TestsSetup, TestsContext}, 7 | futures_util::StreamExt, 8 | serde::Deserialize, 9 | serde_json::json, 10 | std::str::FromStr, 11 | sui_sdk::{ 12 | json::SuiJsonValue, 13 | rpc_types::{EventFilter, SuiEvent}, 14 | }, 15 | }; 16 | 17 | const CLUSTER_MODULE: &str = "cluster"; 18 | const CLUSTER_SUBMIT_COMPLETION_FUNCTION: &str = 19 | "submit_completion_as_cluster_owner"; 20 | const PROMPT_MODULE: &str = "prompt"; 21 | const REQ_FOR_COMPLETION_EVENT: &str = "RequestForCompletionEvent"; 22 | 23 | /// This is the JSON that we expect to capture. 24 | /// There are more fields on this event but for now we only care about these. 25 | #[derive(Deserialize)] 26 | struct RequestForCompletionEvent { 27 | /// Cluster execution ID 28 | cluster_execution: String, 29 | /// Model ID 30 | model: String, 31 | /// The model name that should complete 32 | model_name: String, 33 | /// What the model should complete 34 | prompt_contents: String, 35 | /// If attached, we execute the tool and attach the output to the 36 | /// LLM prompt. 37 | tool: Option, 38 | } 39 | 40 | /// As returned by Sui APIs. 41 | #[derive(Deserialize)] 42 | struct ToolObject { 43 | fields: Tool, 44 | } 45 | 46 | /// As defined in the smart contract. 47 | #[derive(Deserialize)] 48 | struct Tool { 49 | name: String, 50 | args: Vec, 51 | } 52 | 53 | /// Starts listening to the [`REQ_FOR_COMPLETION_EVENT`] events and completes 54 | /// them using the Ollama API in a separate task. 55 | pub(crate) async fn spawn_task( 56 | mut ctx: TestsContext, 57 | resources: TestsSetup, 58 | ) -> Result<()> { 59 | debug!("Creating stream of Sui events {REQ_FOR_COMPLETION_EVENT}"); 60 | 61 | // filter for the event type we are interested in 62 | // ideally we'd also filter by the event's data, but this event filter is 63 | // currently broken in the Sui SDK 64 | let mut stream = ctx 65 | .client() 66 | .await? 67 | .event_api() 68 | .subscribe_event(EventFilter::MoveEventType(FromStr::from_str( 69 | &format!( 70 | "{pkg_id}::{PROMPT_MODULE}::{REQ_FOR_COMPLETION_EVENT}", 71 | pkg_id = ctx.pkg_id, 72 | ), 73 | )?)) 74 | .await?; 75 | 76 | tokio::spawn(async move { 77 | while let Some(event_res) = stream.next().await { 78 | let event_json = match event_res { 79 | Ok(SuiEvent { parsed_json, .. }) => parsed_json, 80 | Err(err) => { 81 | error!( 82 | "Error while listening to \ 83 | {REQ_FOR_COMPLETION_EVENT}: {err}" 84 | ); 85 | break; 86 | } 87 | }; 88 | 89 | if let Err(err) = 90 | submit_for_event(&mut ctx, &resources, event_json).await 91 | { 92 | error!("Error submitting completion: {err}"); 93 | }; 94 | } 95 | }); 96 | 97 | Ok(()) 98 | } 99 | 100 | async fn submit_for_event( 101 | ctx: &mut TestsContext, 102 | resources: &TestsSetup, 103 | event_json: JsonValue, 104 | ) -> Result<()> { 105 | let RequestForCompletionEvent { 106 | cluster_execution, 107 | model, 108 | mut prompt_contents, 109 | model_name, 110 | tool, 111 | } = serde_json::from_value(event_json.clone()).map_err(|err| { 112 | anyhow!( 113 | "Failed to parse {REQ_FOR_COMPLETION_EVENT} event\n\ 114 | {event_json:#?}\n\nError: {err}" 115 | ) 116 | })?; 117 | 118 | let expected_model_id = resources.model.id; 119 | if expected_model_id != ObjectID::from_str(&model)? { 120 | // not an event that we are supposed to handle 121 | return Ok(()); 122 | } 123 | 124 | if let Some(ToolObject { 125 | fields: Tool { name, args }, 126 | .. 127 | }) = tool 128 | { 129 | match name.as_str() { 130 | // here we could implement some tool execution 131 | "some_tool" => { 132 | prompt_contents += 133 | &format!("\n\nInvoked some tool with args:\n{args:#?}",); 134 | } 135 | unknown_tool => warn!( 136 | "Execution '{cluster_execution}' \ 137 | asked for an unknown tool: {unknown_tool}" 138 | ), 139 | }; 140 | } 141 | 142 | // talk to ollama via HTTP API 143 | let client = reqwest::Client::new(); 144 | let res = client 145 | .post(ctx.ollama_http_api.clone()) 146 | .json(&json!({ 147 | "model": model_name, 148 | "prompt": prompt_contents, 149 | })) 150 | .send() 151 | .await?; 152 | 153 | if !res.status().is_success() { 154 | error!( 155 | "Failed to get completion from ollama: {}\n\n{:?}", 156 | res.status(), 157 | res.text().await 158 | ); 159 | return Ok(()); 160 | } 161 | 162 | let mut completion = String::with_capacity(1024); 163 | for line in res 164 | .text() 165 | .await? 166 | .lines() 167 | .map(|l| l.trim()) 168 | .filter(|l| !l.is_empty()) 169 | { 170 | #[derive(Deserialize)] 171 | struct OllamaResponseLine { 172 | response: String, 173 | done: bool, 174 | } 175 | 176 | let OllamaResponseLine { response, done } = serde_json::from_str(line)?; 177 | 178 | if done { 179 | break; 180 | } 181 | 182 | completion.push_str(&response); 183 | } 184 | 185 | let cluster_execution = ObjectID::from_str(&cluster_execution)?; 186 | // this could happen in a separate task and the listener can run another 187 | // completion meanwhile already 188 | let resp = ctx 189 | .move_call( 190 | CLUSTER_MODULE, 191 | CLUSTER_SUBMIT_COMPLETION_FUNCTION, 192 | vec![ 193 | SuiJsonValue::from_object_id(cluster_execution), 194 | SuiJsonValue::from_object_id(resources.cluster.owner_cap), 195 | SuiJsonValue::new(JsonValue::String(completion))?, 196 | ], 197 | ) 198 | .await?; 199 | 200 | info!( 201 | "Submitted completion for model '{model_name}' and cluster execution \ 202 | '{cluster_execution}' in tx {}", 203 | resp.digest 204 | ); 205 | 206 | Ok(()) 207 | } 208 | -------------------------------------------------------------------------------- /offchain/tools/src/nexus_tools/server/crew/talus_ollama.py: -------------------------------------------------------------------------------- 1 | ### This class will basically overrides the LLM implementaion for Ollama as we added 2 | ### the ability to report usage per agent request, the logic here is to be able to chrage 3 | 4 | 5 | import json 6 | from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union, cast 7 | 8 | from langchain_core._api import deprecated 9 | from langchain_core.callbacks import ( 10 | AsyncCallbackManagerForLLMRun, 11 | CallbackManagerForLLMRun, 12 | ) 13 | from langchain_core.language_models.chat_models import BaseChatModel 14 | from langchain_core.messages import ( 15 | AIMessage, 16 | AIMessageChunk, 17 | BaseMessage, 18 | ChatMessage, 19 | HumanMessage, 20 | SystemMessage, 21 | ) 22 | from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult 23 | 24 | from langchain_community.llms.ollama import OllamaEndpointNotFoundError, _OllamaCommon 25 | 26 | 27 | class TalusOllama(BaseChatModel, _OllamaCommon): 28 | def __init__(self, *args, **kwargs): 29 | super().__init__(*args, **kwargs) 30 | self.prompt_tokens = 0 31 | self.completion_tokens = 0 32 | 33 | @property 34 | def _llm_type(self) -> str: 35 | return "talus-ollama-chat" 36 | 37 | def _convert_messages_to_ollama_messages( 38 | self, messages: List[BaseMessage] 39 | ) -> List[Dict[str, Union[str, List[str]]]]: 40 | ollama_messages: List = [] 41 | for message in messages: 42 | role = "" 43 | if isinstance(message, HumanMessage): 44 | role = "user" 45 | elif isinstance(message, AIMessage): 46 | role = "assistant" 47 | elif isinstance(message, SystemMessage): 48 | role = "system" 49 | else: 50 | raise ValueError("Received unsupported message type for Ollama.") 51 | 52 | content = "" 53 | images = [] 54 | if isinstance(message.content, str): 55 | content = message.content 56 | else: 57 | for content_part in cast(List[Dict], message.content): 58 | if content_part.get("type") == "text": 59 | content += f"\n{content_part['text']}" 60 | elif content_part.get("type") == "image_url": 61 | if isinstance(content_part.get("image_url"), str): 62 | image_url_components = content_part["image_url"].split(",") 63 | if len(image_url_components) > 1: 64 | images.append(image_url_components[1]) 65 | else: 66 | images.append(image_url_components[0]) 67 | else: 68 | raise ValueError( 69 | "Only string image_url content parts are supported." 70 | ) 71 | else: 72 | raise ValueError( 73 | "Unsupported message content type. " 74 | "Must either have type 'text' or type 'image_url' " 75 | "with a string 'image_url' field." 76 | ) 77 | 78 | ollama_messages.append( 79 | { 80 | "role": role, 81 | "content": content, 82 | "images": images, 83 | } 84 | ) 85 | 86 | return ollama_messages 87 | 88 | def _create_chat_stream( 89 | self, 90 | messages: List[BaseMessage], 91 | stop: Optional[List[str]] = None, 92 | **kwargs: Any, 93 | ) -> Iterator[str]: 94 | payload = { 95 | "model": self.model, 96 | "messages": self._convert_messages_to_ollama_messages(messages), 97 | } 98 | self.prompt_tokens = self._count_tokens(payload) 99 | self.report_prompt_charges() # Report prompt charges before calling LLM 100 | yield from self._create_stream( 101 | payload=payload, stop=stop, api_url=f"{self.base_url}/api/chat", **kwargs 102 | ) 103 | 104 | def _chat_stream_with_aggregation( 105 | self, 106 | messages: List[BaseMessage], 107 | stop: Optional[List[str]] = None, 108 | run_manager: Optional[CallbackManagerForLLMRun] = None, 109 | verbose: bool = False, 110 | **kwargs: Any, 111 | ) -> ChatGenerationChunk: 112 | final_chunk: Optional[ChatGenerationChunk] = None 113 | for stream_resp in self._create_chat_stream(messages, stop, **kwargs): 114 | if stream_resp: 115 | chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp) 116 | if final_chunk is None: 117 | final_chunk = chunk 118 | else: 119 | final_chunk += chunk 120 | if run_manager: 121 | run_manager.on_llm_new_token( 122 | chunk.text, 123 | chunk=chunk, 124 | verbose=verbose, 125 | ) 126 | if final_chunk is None: 127 | raise ValueError("No data received from Ollama stream.") 128 | 129 | self.completion_tokens = self._count_tokens(final_chunk.text) 130 | self.report_completion_charges() # Report completion charges after receiving from LLM 131 | return final_chunk 132 | 133 | def _generate( 134 | self, 135 | messages: List[BaseMessage], 136 | stop: Optional[List[str]] = None, 137 | run_manager: Optional[CallbackManagerForLLMRun] = None, 138 | **kwargs: Any, 139 | ) -> ChatResult: 140 | final_chunk = self._chat_stream_with_aggregation( 141 | messages, 142 | stop=stop, 143 | run_manager=run_manager, 144 | verbose=self.verbose, 145 | **kwargs, 146 | ) 147 | chat_generation = ChatGeneration( 148 | message=AIMessage(content=final_chunk.text), 149 | generation_info=final_chunk.generation_info, 150 | ) 151 | return ChatResult(generations=[chat_generation]) 152 | 153 | def _count_tokens(self, text: Union[str, Dict]) -> int: 154 | # TODO: Implement token counting logic based on your specific requirements 155 | # This is a placeholder implementation 156 | if isinstance(text, str): 157 | return len(text.split()) 158 | elif isinstance(text, Dict): 159 | return sum(len(str(value).split()) for value in text.values()) 160 | else: 161 | raise ValueError("Unsupported text type for token counting.") 162 | 163 | def report_prompt_charges(self) -> None: 164 | # TODO: Implement the logic to report prompt charges to the blockchain 165 | print(f"Reporting prompt charges: {self.prompt_tokens} tokens") 166 | 167 | def report_completion_charges(self) -> None: 168 | # TODO: Implement the logic to report completion charges to the blockchain 169 | print(f"Reporting completion charges: {self.completion_tokens} tokens") 170 | -------------------------------------------------------------------------------- /onchain/README.md: -------------------------------------------------------------------------------- 1 | # Nexus smart contracts 2 | 3 | This is the onchain part of Nexus. 4 | 5 | ## General structure 6 | 7 | The Nexus Move contracts are organized into several modules, each responsible for a specific 8 | aspect of the agentic framework: 9 | 10 | 1. [`node`][node]: Represents computational units that can run models inferences. 11 | 2. [`model`][model]: Defines LLM models that can be run on nodes. 12 | 3. [`agent`][model]: Represents intelligent agents that use models to perform tasks. 13 | 4. [`cluster`][cluster]: Manages groups of agents working together. 14 | 5. [`task`][task]: Defines unit of work within a cluster. 15 | 6. [`tool`][tool]: Represents utilities that agents can use to complete tasks. 16 | 7. [`prompt`][prompt]: Handles the creation and management of prompts for LLMs. 17 | 18 | For a technical audience interested in building a client for Nexus or using it as a Move 19 | dependency, the following section provides details about the design. 20 | 21 | ## Design 22 | 23 | ### `Node` to `Model` to `Agent` 24 | 25 | Invoking machine learning models requires hardware. 26 | Nexus describes a state machine that tells the hardware what to do, but the execution of the machine learning models happens on `Node`s off-chain. 27 | Creating a `Node` object is the first step when interacting with Nexus. 28 | Each computing unit is represented by this _owned_ object, meaning whichever wallet owns the `Node` object has exclusive rights to permit other entities to use it. 29 | See the [`talus::node` module](./sources/node.move) to understand how to create a node and what information is shared with the network. 30 | 31 | Once we have defined the computing unit, we need to represent the machine learning model that powers LLM inference. 32 | At the moment, only a `Node` owner can create a new shared `Model` object. 33 | Since it's a shared object, it means it can be referenced in anyone's transaction. 34 | However, upon creation of `Model` the transaction sender receives an owned `ModelOwnerCap` object. 35 | This is a common Move pattern to handle permissions. 36 | The shared `Model` object is a wrapper around `ModelInfo` that contains the model's metadata. 37 | See the [`talus::model` module](./sources/model.move) to understand how to create a model and what information is shared with the network. 38 | With `ModelInfo` one can create agents as is shown in the next step. 39 | There are two paths to get access to the `ModelInfo`: 40 | 41 | 1. The model owner can get it from the `Model` object by showing the `ModelOwnerCap`. 42 | 2. The model owner can issue `ModelInferencePromise` and transfer it to another wallet. 43 | Such wallet can then use the `ModelInferencePromise` to get the `ModelInfo`. 44 | 45 | These access patterns enable the model owner to control who can use the model. 46 | Note the name `ModelInferencePromise`. 47 | At the moment, we don't have any punishment system for slashing inference providers that don't deliver the result. 48 | Hence, for now, the model owner only makes a promise to whoever wants to use the model that the inference will be done. 49 | 50 | Finally, we have the `Agent` object which is a wrapper around `AgentBlueprint` object similarly to `Model` and `ModelInfo`. 51 | Upon creation of an `Agent` object, the transaction sender receives an owned `AgentOwnerCap` object. 52 | See the [`talus::agent` module](./sources/agent.move) to understand how to create an agent and what information is shared with the network. 53 | 54 | An agent uses an LLM (the `Model`) for a specific narrower set of goals. 55 | One node can run multiple models, and one model can be used by multiple agents. 56 | Two agents with different roles are expected to still use the same model. 57 | 58 | ### Cluster 59 | 60 | Agents can be combined into a `Cluster` object. 61 | A `Cluster` also defines tasks to be performed by those agents. 62 | (The simplest cluster that's runnable has one agent performing one task.) 63 | When a `Cluster` is created, the creator receives a `ClusterOwnerCap` object. 64 | With this object they can add tasks to the cluster. 65 | They can also add agents to the cluster, either ones they created themselves (provided they have access to the `ModelInfo` via `ModelOwnerCap` or `ModelInferencePromise`) or agents created by others. 66 | 67 | However, agent owners have control over their agents. 68 | To add someone else's agent, the cluster owner needs to have `AgentRosterPromise` for that agent. 69 | They can only obtain it from the agent owner. 70 | This is the same pattern we saw with the `ModelInferencePromise`. 71 | 72 | An example of a cluster is given in the [`talus::cluster_tests` module](./sources/tests/cluster_tests.move). 73 | 74 | Once the `Cluster` has been defined, users can submit their prompt that will be fed into the LLM of the agent owning the first task in the cluster. 75 | This process creates `ClusterExecution` shared object which copies the `Cluster` blueprint and tracks the state of a particular user execution. 76 | Multiple users can submit their prompts to the same cluster, having each their own `ClusterExecution` object. 77 | See the [`talus::cluster::execute` entry function](./sources/cluster.move). 78 | 79 | Creating a new `ClusterExecution` emits a `RequestForCompletionEvent` event. 80 | Nodes listen to these events and filter them based on IDs of models they run. 81 | Once the node that runs the LLM inference for the first agent has finished its off-chain computation, it submits the result to the particular `ClusterExecution` object. 82 | It submits the result via either 83 | 84 | - `submit_completion_as_node_owner`, 85 | - `submit_completion_as_model_owner` or 86 | - `submit_completion_as_cluster_owner`. 87 | 88 | The specific function depends on the off-chain node implementation and only differs in the way permissions are checked. 89 | 90 | All LLM output is stored on-chain. 91 | If there are more than one task, the process repeats. 92 | The completion submission emits `RequestForCompletionEvent` which leads to a (possibly different) node again feeding the string in the `RequestForCompletionEvent.prompt_contents` property to the LLM and submitting the result via one of the aforementioned functions. 93 | 94 | Once all tasks are done, the `ClusterExecution` object is marked as completed by setting appropriate value of its `status` property. 95 | 96 | ### Tools 97 | 98 | Tools are defined on `ClusterBlueprint` level, specifically on a task. 99 | Each task can optionally have a tool name and a list of parameters. 100 | At the moment we make the assumption that the cluster owner defines only tools which the nodes that run agents know how to use. 101 | 102 | The off-chain listener then first matches the tool name to a function to execute. 103 | The output of the function is appended to the prompt that is fed to the LLM. 104 | LLM response is then submitted in the aforementioned completion flow. 105 | 106 | 107 | 108 | [gdoc-next-steps]: https://docs.google.com/document/d/1pWrayUt3zI1YQqnzR6MqLDYwz-x7i845WAGv9im0fis 109 | [gdoc-user-stories]: https://docs.google.com/document/d/1zf-NdrW6bSCmmVWuKvM8rqG1s2KwxlcPfrjwwHxzXzU 110 | [git-main]: https://github.com/Talus-Network/ai/tree/f64e92638 111 | 112 | [node]: ./sources/node.move 113 | [model]: ./sources/model.move 114 | [agent]: ./sources/agent.move 115 | [cluster]: ./sources/cluster.move 116 | [task]: ./sources/task.move 117 | [tool]: ./sources/tool.move 118 | [prompt]: ./sources/prompt.move 119 | -------------------------------------------------------------------------------- /docker/sui/compose.yaml: -------------------------------------------------------------------------------- 1 | x-validator-base: &validator-base 2 | image: talusnetwork/sui-tools:${SUI_TAG:-latest} 3 | environment: &validator-env 4 | - RUST_BACKTRACE=1 5 | - RUST_LOG=info,sui_core=info,sui_network=info,sui_node=info,narwhal=info,narwhal-primary::helper=info,jsonrpsee=error 6 | - RPC_WORKER_THREAD=12 7 | - NEW_CHECKPOINT_WARNING_TIMEOUT_MS=30000 8 | - NEW_CHECKPOINT_PANIC_TIMEOUT_MS=60000 9 | logging: &validator-logging 10 | driver: "json-file" 11 | options: 12 | max-file: "10" 13 | max-size: "1g" 14 | restart: on-failure 15 | depends_on: 16 | build-genesis: 17 | condition: service_completed_successfully 18 | build-suitools: 19 | condition: service_completed_successfully 20 | 21 | services: 22 | initialize-onchain: 23 | image: alpine:latest 24 | container_name: initialize-onchain 25 | command: ["sh", "-c", "cp -r /source/* /data"] 26 | volumes: 27 | - ../../onchain:/source:ro 28 | - onchain:/data 29 | restart: "no" 30 | 31 | build-suitools: 32 | container_name: build-suitools 33 | restart: "no" 34 | image: talusnetwork/sui-tools:${SUI_TAG} 35 | build: 36 | context: "." 37 | dockerfile: Dockerfile 38 | args: 39 | SUI_TAG: ${SUI_TAG} 40 | 41 | build-genesis: 42 | image: talusnetwork/build-sui-genesis:${SUI_TAG} 43 | container_name: build-genesis 44 | build: 45 | context: "./genesis" 46 | dockerfile: Dockerfile 47 | args: 48 | SUI_TAG: ${SUI_TAG} 49 | pull_policy: never 50 | volumes: 51 | - genesis:/opt/sui/genesis/files 52 | environment: 53 | - SUI_TAG=${SUI_TAG} 54 | restart: "no" 55 | depends_on: 56 | build-suitools: 57 | condition: service_completed_successfully 58 | 59 | publish-package: 60 | image: talusnetwork/sui-tools:${SUI_TAG} 61 | container_name: publish-package 62 | environment: 63 | - RUST_BACKTRACE=1 64 | - RUST_LOG=error 65 | - SUI_CONFIG_DIR=/opt/sui/config 66 | - SHARED_DIR=/app/shared 67 | - RPC_URL=${RPC_URL} 68 | 69 | volumes: 70 | - shared:/app/shared 71 | - ./bin/publish_package.sh:/opt/sui/publish_package.sh:ro 72 | - ./genesis/static/client.yaml:/opt/sui/config/client.yaml:rw 73 | - ./genesis/static/sui.keystore:/opt/sui/config/sui.keystore:ro 74 | - onchain:/opt/sui/onchain 75 | command: ["bash", "/opt/sui/publish_package.sh"] 76 | restart: on-failure 77 | depends_on: 78 | build-suitools: 79 | condition: service_completed_successfully 80 | initialize-onchain: 81 | condition: service_completed_successfully 82 | 83 | validator1: 84 | <<: *validator-base 85 | container_name: validator1 86 | hostname: validator1 87 | volumes: 88 | - validator1-db:/opt/sui/db:rw 89 | - genesis:/opt/sui/config 90 | command: 91 | [ 92 | "/usr/local/bin/sui-node", 93 | "--config-path", 94 | "/opt/sui/config/validator1-8080.yaml", 95 | ] 96 | 97 | validator2: 98 | <<: *validator-base 99 | container_name: validator2 100 | hostname: validator2 101 | volumes: 102 | - validator2-db:/opt/sui/db:rw 103 | - genesis:/opt/sui/config 104 | command: 105 | [ 106 | "/usr/local/bin/sui-node", 107 | "--config-path", 108 | "/opt/sui/config/validator2-8080.yaml", 109 | ] 110 | 111 | validator3: 112 | <<: *validator-base 113 | container_name: validator3 114 | hostname: validator3 115 | volumes: 116 | - validator3-db:/opt/sui/db:rw 117 | - genesis:/opt/sui/config 118 | command: 119 | [ 120 | "/usr/local/bin/sui-node", 121 | "--config-path", 122 | "/opt/sui/config/validator3-8080.yaml", 123 | ] 124 | 125 | validator4: 126 | <<: *validator-base 127 | container_name: validator4 128 | hostname: validator4 129 | volumes: 130 | - validator4-db:/opt/sui/db:rw 131 | - genesis:/opt/sui/config 132 | command: 133 | [ 134 | "/usr/local/bin/sui-node", 135 | "--config-path", 136 | "/opt/sui/config/validator4-8080.yaml", 137 | ] 138 | 139 | fullnode1: 140 | image: talusnetwork/sui-tools:${SUI_TAG} 141 | hostname: fullnode1 142 | container_name: fullnode1 143 | ports: 144 | - "9000:9000" 145 | environment: 146 | - RUST_BACKTRACE=1 147 | - RUST_LOG=info,sui_core=info,sui_network=info,sui_node=info,narwhal=info,narwhal-primary::helper=info,jsonrpsee=error 148 | - RPC_WORKER_THREAD=12 149 | - NEW_CHECKPOINT_WARNING_TIMEOUT_MS=30000 150 | - NEW_CHECKPOINT_PANIC_TIMEOUT_MS=60000 151 | - SUI_CONFIG_DIR=/opt/sui/config 152 | - RPC_URL=${RPC_URL} 153 | volumes: 154 | - fullnode1-db:/opt/sui/db:rw 155 | - genesis:/opt/sui/config 156 | - ./genesis/static/fullnode.yaml:/opt/sui/config/fullnode.yaml:ro 157 | - ./genesis/static/client.yaml:/opt/sui/config/client.yaml:rw 158 | healthcheck: 159 | test: 160 | [ 161 | "CMD", 162 | "curl", 163 | "-f", 164 | "-X", 165 | "POST", 166 | "-H", 167 | "Content-Type: application/json", 168 | "-d", 169 | '{"jsonrpc":"2.0","method":"sui_getChainIdentifier","id":1}', 170 | "http://localhost:9000", 171 | ] 172 | interval: 5s 173 | timeout: 10s 174 | retries: 3 175 | start_period: 120s 176 | command: 177 | [ 178 | "/usr/local/bin/sui-node", 179 | "--config-path", 180 | "/opt/sui/config/fullnode.yaml", 181 | ] 182 | restart: on-failure 183 | logging: 184 | driver: "json-file" 185 | options: 186 | max-file: "10" 187 | max-size: "1g" 188 | depends_on: 189 | build-genesis: 190 | condition: service_completed_successfully 191 | build-suitools: 192 | condition: service_completed_successfully 193 | 194 | faucet: 195 | image: talusnetwork/sui-tools:${SUI_TAG} 196 | hostname: faucet 197 | container_name: faucet 198 | environment: 199 | - RUST_BACKTRACE=1 200 | - RUST_LOG=info,sui_core=info,sui_network=info,sui_node=info,narwhal=info,narwhal-primary::helper=info,jsonrpsee=error 201 | - SUI_CONFIG_DIR=/opt/sui/config 202 | - HOST_IP=0.0.0.0 203 | - RPC_URL=${RPC_URL} 204 | ports: 205 | - "5003:5003" 206 | healthcheck: 207 | test: ["CMD", "curl", "-f", "http://localhost:5003"] 208 | interval: 5s 209 | timeout: 10s 210 | retries: 3 211 | start_period: 120s 212 | volumes: 213 | - genesis:/opt/sui/config 214 | - ./genesis/static/client.yaml:/opt/sui/config/client.yaml:rw 215 | - ./genesis/static/sui.keystore:/opt/sui/config/sui.keystore:ro 216 | command: 217 | [ 218 | "/usr/local/bin/sui-faucet", 219 | "--write-ahead-log", 220 | "/tmp/faucet.wal", 221 | "--host-ip", 222 | "0.0.0.0", 223 | "--amount", 224 | "10000000000000", 225 | ] 226 | restart: on-failure 227 | logging: 228 | driver: "json-file" 229 | options: 230 | max-file: "10" 231 | max-size: "1g" 232 | depends_on: 233 | build-genesis: 234 | condition: service_completed_successfully 235 | build-suitools: 236 | condition: service_completed_successfully 237 | fullnode1: 238 | condition: service_healthy 239 | 240 | volumes: 241 | onchain: 242 | genesis: 243 | validator1-db: 244 | validator2-db: 245 | validator3-db: 246 | validator4-db: 247 | fullnode1-db: 248 | shared: 249 | -------------------------------------------------------------------------------- /examples/cli_cluster.py: -------------------------------------------------------------------------------- 1 | # Runs an example that prompts the user to define a cluster, agents, tasks, and tools. 2 | 3 | from nexus_sdk import ( 4 | create_cluster, 5 | create_agent_for_cluster, 6 | create_task, 7 | execute_cluster, 8 | get_cluster_execution_response, 9 | ) 10 | from pysui.sui.sui_txn.sync_transaction import SuiTransaction 11 | from pysui.sui.sui_types.scalars import ObjectID, SuiString 12 | from pysui.sui.sui_types.collections import SuiArray 13 | 14 | 15 | def get_user_input_for_cluster(): 16 | cluster_name = input("Enter Cluster name: ") 17 | cluster_description = input("Enter Cluster description: ") 18 | return cluster_name, cluster_description 19 | 20 | 21 | def get_user_input_for_agent(): 22 | agent_name = input("Enter Agent name: ") 23 | agent_role = input("Enter Agent role: ") 24 | agent_goal = input("Enter Agent goal: ") 25 | agent_backstory = input("Enter Agent backstory: ") 26 | return { 27 | "name": agent_name, 28 | "role": agent_role, 29 | "goal": agent_goal, 30 | "backstory": agent_backstory, 31 | } 32 | 33 | 34 | def get_user_input_for_task(): 35 | task_name = input("Enter Task name: ") 36 | agent_name = input("Enter Agent name for this task: ") 37 | task_description = input("Enter Task description: ") 38 | task_expected_output = input("Enter Task expected output: ") 39 | task_prompt = input("Enter Task prompt: ") 40 | task_context = input("Enter Task context: ") 41 | return { 42 | "name": task_name, 43 | "agent_name": agent_name, 44 | "description": task_description, 45 | "expected_output": task_expected_output, 46 | "prompt": task_prompt, 47 | "context": task_context, 48 | } 49 | 50 | 51 | def get_user_input_for_tool(): 52 | task_name = input("Enter Task name for this tool: ") 53 | tool_name = input("Enter Tool name: ") 54 | tool_args = input("Enter Tool args (separated by commas, no spaces): ") 55 | return {"task_name": task_name, "tool_name": tool_name, "tool_args": tool_args} 56 | 57 | 58 | class CliCluster: 59 | def __init__( 60 | self, 61 | client, 62 | package_id, 63 | model_id, 64 | model_owner_cap_id, 65 | cluster_name, 66 | cluster_description, 67 | agents, 68 | tasks, 69 | tools, 70 | ): 71 | self.client = client 72 | self.package_id = package_id 73 | self.model_id = model_id 74 | self.model_owner_cap_id = model_owner_cap_id 75 | 76 | self.cluster_name = cluster_name 77 | self.cluster_description = cluster_description 78 | self.agents = agents 79 | self.tasks = tasks 80 | self.tools = tools 81 | 82 | def setup_cluster(self): 83 | cluster_id, cluster_owner_cap_id = create_cluster( 84 | self.client, 85 | self.package_id, 86 | self.cluster_name, 87 | self.cluster_description, 88 | ) 89 | return cluster_id, cluster_owner_cap_id 90 | 91 | def setup_agents(self, cluster_id, cluster_owner_cap_id): 92 | for agent in self.agents: 93 | create_agent_for_cluster( 94 | self.client, 95 | self.package_id, 96 | cluster_id, 97 | cluster_owner_cap_id, 98 | self.model_id, 99 | self.model_owner_cap_id, 100 | agent["name"], 101 | agent["role"], 102 | agent["goal"], 103 | agent["backstory"], 104 | ) 105 | 106 | def setup_tasks(self, cluster_id, cluster_owner_cap_id): 107 | for task in self.tasks: 108 | create_task( 109 | client=self.client, 110 | package_id=self.package_id, 111 | cluster_id=cluster_id, 112 | cluster_owner_cap_id=cluster_owner_cap_id, 113 | name=task["name"], 114 | agent_name=task["agent_name"], 115 | description=task["description"], 116 | expected_output=task["expected_output"], 117 | prompt=task["prompt"], 118 | context=task["context"], 119 | ) 120 | 121 | def setup_tools(self, cluster_id, cluster_owner_cap_id): 122 | for tool in self.tools: 123 | self.attach_tool_to_task( 124 | cluster_id=cluster_id, 125 | cluster_owner_cap_id=cluster_owner_cap_id, 126 | task_name=tool["task_name"], 127 | tool_name=tool["tool_name"], 128 | tool_args=tool["tool_args"], 129 | ) 130 | 131 | def attach_tool_to_task( 132 | self, 133 | cluster_id, 134 | cluster_owner_cap_id, 135 | task_name, 136 | tool_name, 137 | tool_args, 138 | ): 139 | txn = SuiTransaction(client=self.client) 140 | 141 | try: 142 | result = txn.move_call( 143 | target=f"{self.package_id}::cluster::attach_tool_to_task_entry", 144 | arguments=[ 145 | ObjectID(cluster_id), 146 | ObjectID(cluster_owner_cap_id), 147 | SuiString(task_name), 148 | SuiString(tool_name), 149 | SuiArray([SuiString(arg) for arg in tool_args]), 150 | ], 151 | ) 152 | except Exception as e: 153 | print(f"Error in attach_task_to_tool: {e}") 154 | return None 155 | 156 | result = txn.execute(gas_budget=10000000) 157 | 158 | if result.is_ok(): 159 | if result.result_data.effects.status.status == "success": 160 | print(f"Task attached to Tool") 161 | return True 162 | else: 163 | error_message = result.result_data.effects.status.error 164 | print(f"Transaction failed: {error_message}") 165 | return None 166 | return None 167 | 168 | def run(self, user_input): 169 | cluster_id, cluster_owner_cap_id = self.setup_cluster() 170 | self.setup_agents(cluster_id, cluster_owner_cap_id) 171 | self.setup_tasks(cluster_id, cluster_owner_cap_id) 172 | 173 | execution_id = execute_cluster( 174 | self.client, 175 | self.package_id, 176 | cluster_id, 177 | user_input, 178 | ) 179 | 180 | if execution_id is None: 181 | return "Cluster execution failed" 182 | 183 | print(f"Cluster execution started with ID: {execution_id}") 184 | return get_cluster_execution_response(self.client, execution_id, 600) 185 | 186 | 187 | # Runs the CLI agent example using the provided Nexus package ID. 188 | def run_cli_cluster_example(client, package_id, model_id, mode_owner_cap): 189 | cluster_name, cluster_description = get_user_input_for_cluster() 190 | 191 | num_agents = int(input("How many agents would you like to define? ")) 192 | num_tasks = int(input("How many tasks would you like to define? ")) 193 | num_tools = int(input("How many tools would you like to define? ")) 194 | 195 | agents = [] 196 | for i in range(num_agents): 197 | print(f"\nEnter details for Agent {i+1}:") 198 | agent = get_user_input_for_agent() 199 | agents.append(agent) 200 | 201 | tasks = [] 202 | for i in range(num_tasks): 203 | print(f"\nEnter details for Task {i+1}:") 204 | task = get_user_input_for_task() 205 | tasks.append(task) 206 | 207 | tools = [] 208 | for i in range(num_tools): 209 | print(f"\nEnter details for Tool {i+1}:") 210 | tool = get_user_input_for_tool() 211 | tools.append(tool) 212 | 213 | cluster = CliCluster( 214 | client, 215 | package_id, 216 | model_id, 217 | mode_owner_cap, 218 | cluster_name, 219 | cluster_description, 220 | agents, 221 | tasks, 222 | tools, 223 | ) 224 | 225 | print("Enter some text to start the execution with:") 226 | cluster.run(input()) 227 | -------------------------------------------------------------------------------- /examples/ig_post_planner.py: -------------------------------------------------------------------------------- 1 | # Use [run_ig_post_planner_example] to run the Instagram Post Planner example. 2 | # It's a blocking function that takes a client and package ID as arguments 3 | # and then prompts the user for input to describe what product they want to market. 4 | 5 | from nexus_sdk import ( 6 | create_cluster, 7 | create_agent_for_cluster, 8 | create_task, 9 | execute_cluster, 10 | get_cluster_execution_response, 11 | ) 12 | 13 | 14 | class InstagramPostPlanner: 15 | def __init__( 16 | self, 17 | client, 18 | package_id, 19 | model_id, 20 | model_owner_cap_id, 21 | product_website, 22 | product_details, 23 | ): 24 | self.client = client 25 | self.package_id = package_id 26 | self.model_id = model_id 27 | self.model_owner_cap_id = model_owner_cap_id 28 | 29 | self.product_website = product_website 30 | self.product_details = product_details 31 | 32 | def setup_cluster(self): 33 | cluster_id, cluster_owner_cap_id = create_cluster( 34 | self.client, 35 | self.package_id, 36 | "Instagram Post Planning Cluster", 37 | "A cluster for creating Instagram marketing content", 38 | ) 39 | return cluster_id, cluster_owner_cap_id 40 | 41 | def setup_agents(self, cluster_id, cluster_owner_cap_id): 42 | agent_configs = [ 43 | ( 44 | "product_competitor", 45 | "Lead Market Analyst", 46 | "Conduct amazing analysis of products and competitors", 47 | ), 48 | ( 49 | "strategy_planner", 50 | "Chief Marketing Strategist", 51 | "Synthesize insights to formulate incredible marketing strategies", 52 | ), 53 | ( 54 | "creative_content", 55 | "Creative Content Creator", 56 | "Develop compelling content for social media campaigns", 57 | ), 58 | ( 59 | "senior_photographer", 60 | "Senior Photographer", 61 | "Take amazing photographs for Instagram ads", 62 | ), 63 | ( 64 | "chief_creative_director", 65 | "Chief Creative Director", 66 | "Oversee and approve the final content", 67 | ), 68 | ] 69 | 70 | for agent_name, role, goal in agent_configs: 71 | create_agent_for_cluster( 72 | self.client, 73 | self.package_id, 74 | cluster_id, 75 | cluster_owner_cap_id, 76 | self.model_id, 77 | self.model_owner_cap_id, 78 | agent_name, 79 | role, 80 | goal, 81 | f"An AI agent specialized in {role.lower()} for Instagram marketing.", 82 | ) 83 | 84 | def setup_tasks(self, cluster_id, cluster_owner_cap_id): 85 | tasks = [ 86 | ( 87 | "product_analysis", 88 | "product_competitor", 89 | f""" 90 | Analyze the product website: {self.product_website}. 91 | Extra details: {self.product_details}. 92 | Identify unique features, benefits, and overall narrative. 93 | Report on key selling points, market appeal, and suggestions for enhancement. 94 | """, 95 | ), 96 | ( 97 | "competitor_analysis", 98 | "product_competitor", 99 | f""" 100 | Explore competitors of: {self.product_website}. 101 | Identify top 3 competitors and analyze their strategies and positioning. 102 | Provide a detailed comparison to the competitors. 103 | """, 104 | ), 105 | ( 106 | "campaign_development", 107 | "strategy_planner", 108 | f""" 109 | Create a targeted marketing campaign for: {self.product_website}. 110 | Develop a strategy and creative content ideas that will resonate with the audience. 111 | Include all context about the product and customer. 112 | """, 113 | ), 114 | ( 115 | "instagram_ad_copy", 116 | "creative_content", 117 | """ 118 | Craft 3 engaging Instagram post copy options. 119 | Make them punchy, captivating, and concise. 120 | Align with the product marketing strategy and highlight unique selling points. 121 | Encourage viewers to take action (visit website, make purchase, learn more). 122 | """, 123 | ), 124 | ( 125 | "take_photograph", 126 | "senior_photographer", 127 | f""" 128 | Describe 3 amazing photo options for an Instagram post. 129 | Use the product details: {self.product_details}. 130 | Each description should be a paragraph, focusing on capturing audience attention. 131 | Don't show the actual product in the photo. 132 | """, 133 | ), 134 | ( 135 | "review_photo", 136 | "chief_creative_director", 137 | f""" 138 | Review the 3 photo options from the senior photographer. 139 | Ensure they align with the product goals: {self.product_website}. 140 | Approve, ask clarifying questions, or suggest improvements. 141 | Provide 3 reviewed and improved photo descriptions. 142 | """, 143 | ), 144 | ] 145 | 146 | task_ids = [] 147 | for task_name, agent_id, description in tasks: 148 | task_id = create_task( 149 | self.client, 150 | self.package_id, 151 | cluster_id, 152 | cluster_owner_cap_id, 153 | task_name, 154 | agent_id, 155 | description, 156 | f"Complete {task_name} for Instagram post", 157 | description, 158 | "", 159 | ) 160 | task_ids.append(task_id) 161 | 162 | return task_ids 163 | 164 | def run(self): 165 | cluster_id, cluster_owner_cap_id = self.setup_cluster() 166 | self.setup_agents(cluster_id, cluster_owner_cap_id) 167 | self.setup_tasks(cluster_id, cluster_owner_cap_id) 168 | 169 | execution_id = execute_cluster( 170 | self.client, 171 | self.package_id, 172 | cluster_id, 173 | f""" 174 | Create an Instagram post for the product: {self.product_website} 175 | Additional details: {self.product_details} 176 | Provide both ad copy options and photo descriptions. 177 | """, 178 | ) 179 | 180 | if execution_id is None: 181 | return "Cluster execution failed" 182 | 183 | print(f"Cluster execution started with ID: {execution_id}") 184 | return get_cluster_execution_response(self.client, execution_id, 600) 185 | 186 | 187 | # Runs the Instagram Post Planner example using the provided Nexus package ID. 188 | def run_ig_post_planner_example(client, package_id, model_id, mode_owner_cap): 189 | print("## Welcome to the Instagram Post Planner") 190 | print("-------------------------------") 191 | product_website = input( 192 | "What is the product website you want a marketing strategy for? " 193 | ) 194 | product_details = input( 195 | "Any extra details about the product and/or the Instagram post you want? " 196 | ) 197 | 198 | planner = InstagramPostPlanner( 199 | client, package_id, model_id, mode_owner_cap, product_website, product_details 200 | ) 201 | result = planner.run() 202 | 203 | print("\n\n########################") 204 | print("## Here is the result") 205 | print("########################\n") 206 | print(result) 207 | -------------------------------------------------------------------------------- /nexus_sdk/src/nexus_sdk/cluster.py: -------------------------------------------------------------------------------- 1 | from pysui.sui.sui_builders.get_builders import GetObject 2 | from pysui.sui.sui_txn.sync_transaction import SuiTransaction 3 | from pysui.sui.sui_types.scalars import ObjectID, SuiString 4 | import time 5 | import ast 6 | import traceback 7 | 8 | # Equal to 1 SUI which should be enough for most transactions. 9 | GAS_BUDGET = 1000000000 10 | 11 | 12 | # Creates an empty cluster object to which agents and tasks can be added. 13 | # See functions [create_agent_for_cluster] and [create_task]. 14 | # 15 | # Returns the cluster ID and the cluster owner capability ID. 16 | def create_cluster(client, package_id, name, description, gas_budget=GAS_BUDGET): 17 | txn = SuiTransaction(client=client) 18 | 19 | try: 20 | result = txn.move_call( 21 | target=f"{package_id}::cluster::create", 22 | arguments=[SuiString(name), SuiString(description)], 23 | ) 24 | result = txn.execute(gas_budget=gas_budget) 25 | if result.is_ok(): 26 | if result.result_data.effects.status.status == "success": 27 | # just because it says "parsed_json" doesn't mean it's actually valid JSON apparently 28 | not_json = result.result_data.events[0].parsed_json 29 | created_event = ast.literal_eval(not_json.replace("\n", "\\n")) 30 | cluster_id = created_event["cluster"] 31 | cluster_owner_cap_id = created_event["owner_cap"] 32 | 33 | return cluster_id, cluster_owner_cap_id 34 | print(f"Failed to create Cluster: {result.result_string}") 35 | return None 36 | except Exception as e: 37 | print(f"Error in create_cluster: {e}") 38 | return None 39 | 40 | 41 | # Creates a new agent for the given cluster. 42 | # This means that the agent does not live on-chain as a standalone object that 43 | # other clusters could reference. 44 | def create_agent_for_cluster( 45 | client, 46 | package_id, 47 | cluster_id, 48 | cluster_owner_cap_id, 49 | model_id, 50 | model_owner_cap_id, 51 | name, 52 | role, 53 | goal, 54 | backstory, 55 | gas_budget=GAS_BUDGET, 56 | ): 57 | txn = SuiTransaction(client=client) 58 | 59 | try: 60 | result = txn.move_call( 61 | target=f"{package_id}::cluster::add_agent_entry", 62 | arguments=[ 63 | ObjectID(cluster_id), 64 | ObjectID(cluster_owner_cap_id), 65 | ObjectID(model_id), 66 | ObjectID(model_owner_cap_id), 67 | SuiString(name), 68 | SuiString(role), 69 | SuiString(goal), 70 | SuiString(backstory), 71 | ], 72 | ) 73 | result = txn.execute(gas_budget=gas_budget) 74 | if result.is_ok(): 75 | return True 76 | print(f"Failed to add Agent: {result.result_string}") 77 | return False 78 | except Exception as e: 79 | print(f"Error in create_agent: {e}") 80 | return False 81 | 82 | 83 | # Creates a new task for the given cluster. 84 | # Each task must be executed by an agent that is part of the cluster. 85 | def create_task( 86 | client, 87 | package_id, 88 | cluster_id, 89 | cluster_owner_cap_id, 90 | name, 91 | agent_name, 92 | description, 93 | expected_output, 94 | prompt, 95 | context, 96 | gas_budget=GAS_BUDGET, 97 | ): 98 | txn = SuiTransaction(client=client) 99 | 100 | try: 101 | result = txn.move_call( 102 | target=f"{package_id}::cluster::add_task_entry", 103 | arguments=[ 104 | ObjectID(cluster_id), 105 | ObjectID(cluster_owner_cap_id), 106 | SuiString(name), 107 | SuiString(agent_name), 108 | SuiString(description), 109 | SuiString(expected_output), 110 | SuiString(prompt), 111 | SuiString(context), 112 | ], 113 | ) 114 | result = txn.execute(gas_budget=gas_budget) 115 | if result.is_ok(): 116 | return True 117 | print(f"Failed to add Task: {result.result_string}") 118 | return False 119 | except Exception as e: 120 | print(f"Error in create_task: {e}") 121 | return False 122 | 123 | 124 | # Begins execution of a cluster. 125 | # Returns the cluster execution ID. 126 | # Use the function [get_cluster_execution_response] to fetch the response of the execution 127 | # in a blocking manner. 128 | def execute_cluster( 129 | client, 130 | package_id, 131 | cluster_id, 132 | input, 133 | gas_budget=GAS_BUDGET, 134 | ): 135 | txn = SuiTransaction(client=client) 136 | 137 | try: 138 | result = txn.move_call( 139 | target=f"{package_id}::cluster::execute", 140 | arguments=[ObjectID(cluster_id), SuiString(input)], 141 | ) 142 | except Exception as e: 143 | print(f"Error in execute_cluster: {e}") 144 | traceback.print_exc() 145 | return None 146 | 147 | result = txn.execute(gas_budget=gas_budget) 148 | 149 | if result.is_ok(): 150 | if result.result_data.effects.status.status == "success": 151 | # just because it says "parsed_json" doesn't mean it's actually valid JSON apparently 152 | not_json = result.result_data.events[0].parsed_json 153 | created_event = ast.literal_eval(not_json.replace("\n", "\\n")) 154 | 155 | # There's going to be either field "execution" or "cluster execution" 156 | # because there are two events emitted in the tx. 157 | # We could check for the event name or just try both. 158 | execution_id = created_event.get( 159 | "execution", created_event.get("cluster_execution") 160 | ) 161 | 162 | return execution_id 163 | else: 164 | error_message = result.result_data.effects.status.error 165 | print(f"Execute Cluster Transaction failed: {error_message}") 166 | return None 167 | else: 168 | print(f"Failed to create ClusterExecution: {result.result_string}") 169 | return None 170 | 171 | 172 | # Fetches the response of a cluster execution. 173 | # If the execution is not complete within the specified time, the function returns a timeout message. 174 | def get_cluster_execution_response( 175 | client, execution_id, max_wait_time_s=180, check_interval_s=5 176 | ): 177 | start_time = time.time() 178 | while time.time() - start_time < max_wait_time_s: 179 | try: 180 | # Create a GetObject builder 181 | get_object_builder = GetObject(object_id=ObjectID(execution_id)) 182 | 183 | # Execute the query 184 | result = client.execute(get_object_builder) 185 | 186 | if result.is_ok(): 187 | object_data = result.result_data 188 | if object_data and object_data.content: 189 | fields = object_data.content.fields 190 | status = fields.get("status") 191 | if status == "SUCCESS": 192 | return fields.get("cluster_response") 193 | elif status == "FAILED": 194 | return f"Execution failed: {fields.get('error_message')}" 195 | elif status == "IDLE": 196 | print("Execution has not started yet.") 197 | elif status == "RUNNING": 198 | until_timeout = max_wait_time_s - (time.time() - start_time) 199 | print( 200 | "Execution is still running, waiting... (%.2fs until timeout)" 201 | % until_timeout 202 | ) 203 | else: 204 | return f"Unknown status: {status}" 205 | 206 | time.sleep(check_interval_s) 207 | else: 208 | return f"Failed to get object: {result.result_string}" 209 | 210 | except Exception as e: 211 | return f"Error checking execution status: {e}" 212 | 213 | return "Timeout: Execution did not complete within the specified time." 214 | -------------------------------------------------------------------------------- /onchain/sources/tests/cluster_tests.move: -------------------------------------------------------------------------------- 1 | #[test_only] 2 | module talus::cluster_tests { 3 | use std::string; 4 | use sui::table; 5 | use sui::test_scenario::{Self, Scenario, ctx}; 6 | use sui::test_utils::print; 7 | use talus::agent::{Self, AgentBlueprint, AgentName}; 8 | use talus::cluster::{Self, Cluster, ClusterExecution, ClusterOwnerCap}; 9 | use talus::model::{Self, ModelInfo}; 10 | use talus::task::{Self, TaskBlueprint, TaskName}; 11 | 12 | #[test] 13 | /// The goal of this test is to verify that a cluster can be set up and 14 | /// that two tasks can be executed successfully, proving that the state 15 | /// machine works. 16 | /// 17 | /// 1. We setup a cluster with two tasks: analyze poem request and create poem. 18 | /// 2. We simulate the execution of the cluster with a valid input and verify 19 | /// that the cluster execution is in the correct state. 20 | /// 3. We simulate the completion of the first task (from the POV of an agent) 21 | /// and verify that the cluster execution is in the correct state. 22 | /// 4. We simulate the completion of the second task (from the POV of an agent) 23 | /// and verify that the cluster execution is successful. 24 | fun test_poem_creation_cluster() { 25 | let owner = @0x1; 26 | let mut scenario = test_scenario::begin(owner); 27 | 28 | // 29 | // 1. 30 | // 31 | setup_poem_creation_cluster(&mut scenario); 32 | 33 | // 34 | // 2. 35 | // 36 | test_scenario::next_tx(&mut scenario, owner); 37 | { 38 | print(b"Creating and executing cluster with valid input"); 39 | let cluster = test_scenario::take_shared(&scenario); 40 | let input = string::utf8(b"Create a poem about nature in a romantic style"); 41 | cluster::execute(&cluster, input, ctx(&mut scenario)); 42 | test_scenario::return_shared(cluster); 43 | }; 44 | test_scenario::next_tx(&mut scenario, owner); 45 | { 46 | let execution = test_scenario::take_shared(&scenario); 47 | verify_initial_state(&execution); 48 | test_scenario::return_shared(execution); 49 | }; 50 | 51 | // 52 | // 3. 53 | // 54 | test_scenario::next_tx(&mut scenario, owner); 55 | { 56 | let mut execution = test_scenario::take_shared(&scenario); 57 | let owner_cap = test_scenario::take_from_address(&scenario, owner); 58 | let analysis_result = string::utf8(b"The user has requested a romantic poem about nature. Both style (romantic) and subject (nature) are present."); 59 | cluster::submit_completion_as_cluster_owner(&mut execution, &owner_cap, analysis_result); 60 | verify_analysis_state(&execution); 61 | test_scenario::return_shared(execution); 62 | test_scenario::return_to_address(owner, owner_cap); 63 | }; 64 | 65 | // 66 | // 4. 67 | // 68 | test_scenario::next_tx(&mut scenario, owner); 69 | { 70 | let mut execution = test_scenario::take_shared(&scenario); 71 | let owner_cap = test_scenario::take_from_address(&scenario, owner); 72 | let poem = string::utf8(b"Gentle breeze whispers through leaves,\nNature's love song in the air,\nMoonlit meadows, stars above,\nA romantic scene beyond compare."); 73 | cluster::submit_completion_as_cluster_owner(&mut execution, &owner_cap, poem); 74 | verify_final_state(&execution); 75 | test_scenario::return_shared(execution); 76 | test_scenario::return_to_address(owner, owner_cap); 77 | }; 78 | 79 | test_scenario::end(scenario); 80 | } 81 | 82 | fun verify_initial_state(execution: &ClusterExecution) { 83 | assert!(cluster::is_execution_running(execution), 0); 84 | let tasks = cluster::get_execution_task_statuses(execution); 85 | assert!(table::length(tasks) == 2, 1); 86 | 87 | let task1 = table::borrow(tasks, task1_name()); 88 | assert!(task::is_running(task1), 2); 89 | let task2 = table::borrow(tasks, task2_name()); 90 | assert!(task::is_idle(task2), 3); 91 | } 92 | 93 | fun verify_analysis_state(execution: &ClusterExecution) { 94 | assert!(cluster::is_execution_running(execution), 0); 95 | let tasks = cluster::get_execution_task_statuses(execution); 96 | 97 | let task1 = table::borrow(tasks, task1_name()); 98 | assert!(task::is_successful(task1), 1); 99 | let task2 = table::borrow(tasks, task2_name()); 100 | assert!(task::is_running(task2), 2); 101 | } 102 | 103 | fun verify_final_state(execution: &ClusterExecution) { 104 | assert!(cluster::is_execution_successful(execution), 0); 105 | let tasks = cluster::get_execution_task_statuses(execution); 106 | 107 | let task1 = table::borrow(tasks, task1_name()); 108 | assert!(task::is_successful(task1), 2); 109 | let task2 = table::borrow(tasks, task2_name()); 110 | assert!(task::is_successful(task2) , 3); 111 | 112 | let response = cluster::get_execution_response(execution); 113 | assert!(string::index_of(&response, &string::utf8(b"Gentle breeze")) == 0, 4); 114 | } 115 | 116 | fun setup_poem_creation_cluster(scenario: &mut Scenario) { 117 | let ctx = ctx(scenario); 118 | 119 | let model = model::new_mock_info_for_testing(ctx); 120 | let manager_agent = create_manager_agent(&model); 121 | let poet_agent = create_poet_agent(&model); 122 | let poet_agent_name = agent::get_name(&poet_agent); 123 | 124 | let task1 = create_task1(poet_agent_name); 125 | let task2 = create_task2(poet_agent_name); 126 | 127 | cluster::create( 128 | string::utf8(b"Poem Creation Cluster"), 129 | string::utf8(b"A cluster for creating custom poems"), 130 | ctx 131 | ); 132 | 133 | test_scenario::next_tx(scenario, @0x1); 134 | { 135 | let mut cluster = test_scenario::take_shared(scenario); 136 | let cap = test_scenario::take_from_address(scenario, @0x1); 137 | cluster::add_agent(&mut cluster, &cap, manager_agent); 138 | cluster::add_agent(&mut cluster, &cap, poet_agent); 139 | cluster::add_task(&mut cluster, &cap, task1); 140 | cluster::add_task(&mut cluster, &cap, task2); 141 | test_scenario::return_shared(cluster); 142 | test_scenario::return_to_address(@0x1, cap); 143 | }; 144 | } 145 | 146 | fun create_manager_agent(model: &ModelInfo): AgentBlueprint { 147 | agent::new( 148 | agent::into_name(string::utf8(b"Manager")), 149 | string::utf8(b"Poem Creation Manager"), 150 | string::utf8(b"Manage the poem creation process"), 151 | string::utf8(b"An AI trained to oversee poem creation"), 152 | *model, 153 | ) 154 | } 155 | 156 | fun create_poet_agent(model: &ModelInfo): AgentBlueprint { 157 | agent::new( 158 | agent::into_name(string::utf8(b"Poet")), 159 | string::utf8(b"AI Poet"), 160 | string::utf8(b"Create beautiful poems"), 161 | string::utf8(b"An AI trained to create poetic masterpieces"), 162 | *model, 163 | ) 164 | } 165 | 166 | fun task1_name(): TaskName { 167 | task::into_name(string::utf8(b"Analyze Poem Request")) 168 | } 169 | 170 | fun create_task1(agent: AgentName): TaskBlueprint { 171 | task::new( 172 | task1_name(), 173 | agent, 174 | string::utf8(b"Analyze the user's request for poem creation"), 175 | string::utf8(b"A structured analysis of the poem request"), 176 | string::utf8(b"Analyze the user's input for poem style and subject. If either is missing, prepare an error message."), 177 | string::utf8(b""), 178 | ) 179 | } 180 | 181 | fun task2_name(): TaskName { 182 | task::into_name(string::utf8(b"Create Poem")) 183 | } 184 | 185 | fun create_task2(agent: AgentName): TaskBlueprint { 186 | task::new( 187 | task2_name(), 188 | agent, 189 | string::utf8(b"Create a poem based on the analyzed request"), 190 | string::utf8(b"A poem matching the user's requirements"), 191 | string::utf8(b"Create a poem based on the provided style and subject. Be creative and inspiring."), 192 | string::utf8(b""), 193 | ) 194 | } 195 | } 196 | --------------------------------------------------------------------------------