├── .gitattributes
├── .github
├── ISSUE_TEMPLATE
│ ├── config.yml
│ ├── request_new_features.yaml
│ └── show_me_the_bug.yaml
├── PULL_REQUEST_TEMPLATE.md
├── dependabot.yml
└── workflows
│ ├── build-package.yaml
│ ├── environment-corrupt-check.yaml
│ ├── pr-autodiff.yaml
│ ├── pre-commit.yaml
│ ├── stale.yaml
│ └── top-issues.yaml
├── .gitignore
├── .pre-commit-config.yaml
├── .vscode
├── extensions.json
└── settings.json
├── CODE_OF_CONDUCT.md
├── Dockerfile
├── LICENSE
├── README.md
├── README_ja.md
├── README_ko.md
├── README_zh.md
├── app
├── __init__.py
├── agent
│ ├── __init__.py
│ ├── base.py
│ ├── browser.py
│ ├── data_analysis.py
│ ├── manus.py
│ ├── mcp.py
│ ├── react.py
│ ├── swe.py
│ └── toolcall.py
├── bedrock.py
├── config.py
├── exceptions.py
├── flow
│ ├── __init__.py
│ ├── base.py
│ ├── flow_factory.py
│ └── planning.py
├── llm.py
├── logger.py
├── mcp
│ ├── __init__.py
│ └── server.py
├── prompt
│ ├── __init__.py
│ ├── browser.py
│ ├── manus.py
│ ├── mcp.py
│ ├── planning.py
│ ├── swe.py
│ ├── toolcall.py
│ └── visualization.py
├── sandbox
│ ├── __init__.py
│ ├── client.py
│ └── core
│ │ ├── exceptions.py
│ │ ├── manager.py
│ │ ├── sandbox.py
│ │ └── terminal.py
├── schema.py
└── tool
│ ├── __init__.py
│ ├── ask_human.py
│ ├── base.py
│ ├── bash.py
│ ├── browser_use_tool.py
│ ├── chart_visualization
│ ├── README.md
│ ├── README_ja.md
│ ├── README_ko.md
│ ├── README_zh.md
│ ├── __init__.py
│ ├── chart_prepare.py
│ ├── data_visualization.py
│ ├── package-lock.json
│ ├── package.json
│ ├── python_execute.py
│ ├── src
│ │ └── chartVisualize.ts
│ ├── test
│ │ ├── chart_demo.py
│ │ └── report_demo.py
│ └── tsconfig.json
│ ├── create_chat_completion.py
│ ├── file_operators.py
│ ├── mcp.py
│ ├── planning.py
│ ├── python_execute.py
│ ├── search
│ ├── __init__.py
│ ├── baidu_search.py
│ ├── base.py
│ ├── bing_search.py
│ ├── duckduckgo_search.py
│ └── google_search.py
│ ├── str_replace_editor.py
│ ├── terminate.py
│ ├── tool_collection.py
│ └── web_search.py
├── assets
├── community_group.jpg
└── logo.jpg
├── config
├── .gitignore
├── config.example-model-anthropic.toml
├── config.example-model-azure.toml
├── config.example-model-google.toml
├── config.example-model-ollama.toml
├── config.example-model-ppio.toml
├── config.example.toml
└── mcp.example.json
├── examples
├── benchmarks
│ └── __init__.py
└── use_case
│ ├── japan-travel-plan
│ ├── japan_travel_guide_instructions.txt
│ ├── japan_travel_handbook.html
│ ├── japan_travel_handbook_mobile.html
│ └── japan_travel_handbook_print.html
│ ├── pictures
│ ├── japan-travel-plan-1.png
│ └── japan-travel-plan-2.png
│ └── readme.md
├── main.py
├── requirements.txt
├── run_flow.py
├── run_mcp.py
├── run_mcp_server.py
├── setup.py
├── tests
└── sandbox
│ ├── test_client.py
│ ├── test_docker_terminal.py
│ ├── test_sandbox.py
│ └── test_sandbox_manager.py
└── workspace
└── example.txt
/.gitattributes:
--------------------------------------------------------------------------------
1 | # HTML code is incorrectly calculated into statistics, so ignore them
2 | *.html linguist-detectable=false
3 | # Auto detect text files and perform LF normalization
4 | * text=auto eol=lf
5 | # Ensure shell scripts use LF (Linux style) line endings on Windows
6 | *.sh text eol=lf
7 | # Treat specific binary files as binary and prevent line ending conversion
8 | *.png binary
9 | *.jpg binary
10 | *.gif binary
11 | *.ico binary
12 | *.jpeg binary
13 | *.mp3 binary
14 | *.zip binary
15 | *.bin binary
16 | # Preserve original line endings for specific document files
17 | *.doc text eol=crlf
18 | *.docx text eol=crlf
19 | *.pdf binary
20 | # Ensure source code and script files use LF line endings
21 | *.py text eol=lf
22 | *.js text eol=lf
23 | *.html text eol=lf
24 | *.css text eol=lf
25 | # Specify custom diff driver for specific file types
26 | *.md diff=markdown
27 | *.json diff=json
28 | *.mp4 filter=lfs diff=lfs merge=lfs -text
29 | *.mov filter=lfs diff=lfs merge=lfs -text
30 | *.webm filter=lfs diff=lfs merge=lfs -text
31 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: "Join the Community Group"
4 | about: Join the OpenManus community to discuss and get help from others
5 | url: https://github.com/FoundationAgents/OpenManus?tab=readme-ov-file#community-group
6 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/request_new_features.yaml:
--------------------------------------------------------------------------------
1 | name: "🤔 Request new features"
2 | description: Suggest ideas or features you’d like to see implemented in OpenManus.
3 | labels: enhancement
4 | body:
5 | - type: textarea
6 | id: feature-description
7 | attributes:
8 | label: Feature description
9 | description: |
10 | Provide a clear and concise description of the proposed feature
11 | validations:
12 | required: true
13 | - type: textarea
14 | id: your-feature
15 | attributes:
16 | label: Your Feature
17 | description: |
18 | Explain your idea or implementation process, if any. Optionally, include a Pull Request URL.
19 | Ensure accompanying docs/tests/examples are provided for review.
20 | validations:
21 | required: false
22 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/show_me_the_bug.yaml:
--------------------------------------------------------------------------------
1 | name: "🪲 Show me the Bug"
2 | description: Report a bug encountered while using OpenManus and seek assistance.
3 | labels: bug
4 | body:
5 | - type: textarea
6 | id: bug-description
7 | attributes:
8 | label: Bug Description
9 | description: |
10 | Clearly describe the bug you encountered
11 | validations:
12 | required: true
13 | - type: textarea
14 | id: solve-method
15 | attributes:
16 | label: Bug solved method
17 | description: |
18 | If resolved, explain the solution. Optionally, include a Pull Request URL.
19 | If unresolved, provide additional details to aid investigation
20 | validations:
21 | required: true
22 | - type: textarea
23 | id: environment-information
24 | attributes:
25 | label: Environment information
26 | description: |
27 | System: e.g., Ubuntu 22.04
28 | Python: e.g., 3.12
29 | OpenManus version: e.g., 0.1.0
30 | value: |
31 | - System version:
32 | - Python version:
33 | - OpenManus version or branch:
34 | - Installation method (e.g., `pip install -r requirements.txt` or `pip install -e .`):
35 | validations:
36 | required: true
37 | - type: textarea
38 | id: extra-information
39 | attributes:
40 | label: Extra information
41 | description: |
42 | For example, attach screenshots or logs to help diagnose the issue
43 | validations:
44 | required: false
45 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | **Features**
2 |
3 |
4 | - Feature 1
5 | - Feature 2
6 |
7 | **Feature Docs**
8 |
9 |
10 | **Influence**
11 |
12 |
13 | **Result**
14 |
15 |
16 | **Other**
17 |
18 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "pip"
4 | directory: "/"
5 | schedule:
6 | interval: "weekly"
7 | open-pull-requests-limit: 4
8 | groups:
9 | # Group critical packages that might need careful review
10 | core-dependencies:
11 | patterns:
12 | - "pydantic*"
13 | - "openai"
14 | - "fastapi"
15 | - "tiktoken"
16 | browsergym-related:
17 | patterns:
18 | - "browsergym*"
19 | - "browser-use"
20 | - "playwright"
21 | search-tools:
22 | patterns:
23 | - "googlesearch-python"
24 | - "baidusearch"
25 | - "duckduckgo_search"
26 | pre-commit:
27 | patterns:
28 | - "pre-commit"
29 | security-all:
30 | applies-to: "security-updates"
31 | patterns:
32 | - "*"
33 | version-all:
34 | applies-to: "version-updates"
35 | patterns:
36 | - "*"
37 | exclude-patterns:
38 | - "pydantic*"
39 | - "openai"
40 | - "fastapi"
41 | - "tiktoken"
42 | - "browsergym*"
43 | - "browser-use"
44 | - "playwright"
45 | - "googlesearch-python"
46 | - "baidusearch"
47 | - "duckduckgo_search"
48 | - "pre-commit"
49 |
50 | - package-ecosystem: "github-actions"
51 | directory: "/"
52 | schedule:
53 | interval: "weekly"
54 | open-pull-requests-limit: 4
55 | groups:
56 | actions:
57 | patterns:
58 | - "*"
59 |
--------------------------------------------------------------------------------
/.github/workflows/build-package.yaml:
--------------------------------------------------------------------------------
1 | name: Build and upload Python package
2 |
3 | on:
4 | workflow_dispatch:
5 | release:
6 | types: [created, published]
7 |
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v4
13 | - name: Set up Python
14 | uses: actions/setup-python@v5
15 | with:
16 | python-version: '3.12'
17 | cache: 'pip'
18 | - name: Install dependencies
19 | run: |
20 | python -m pip install --upgrade pip
21 | pip install -r requirements.txt
22 | pip install setuptools wheel twine
23 | - name: Set package version
24 | run: |
25 | export VERSION="${GITHUB_REF#refs/tags/v}"
26 | sed -i "s/version=.*/version=\"${VERSION}\",/" setup.py
27 | - name: Build and publish
28 | env:
29 | TWINE_USERNAME: __token__
30 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
31 | run: |
32 | python setup.py bdist_wheel sdist
33 | twine upload dist/*
34 |
--------------------------------------------------------------------------------
/.github/workflows/environment-corrupt-check.yaml:
--------------------------------------------------------------------------------
1 | name: Environment Corruption Check
2 | on:
3 | push:
4 | branches: ["main"]
5 | paths:
6 | - requirements.txt
7 | pull_request:
8 | branches: ["main"]
9 | paths:
10 | - requirements.txt
11 | concurrency:
12 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }}
13 | cancel-in-progress: true
14 | jobs:
15 | test-python-versions:
16 | runs-on: ubuntu-latest
17 | strategy:
18 | matrix:
19 | python-version: ["3.11.11", "3.12.8", "3.13.2"]
20 | fail-fast: false
21 | steps:
22 | - name: Checkout repository
23 | uses: actions/checkout@v4
24 | - name: Set up Python ${{ matrix.python-version }}
25 | uses: actions/setup-python@v5
26 | with:
27 | python-version: ${{ matrix.python-version }}
28 | - name: Upgrade pip
29 | run: |
30 | python -m pip install --upgrade pip
31 | - name: Install dependencies
32 | run: |
33 | pip install -r requirements.txt
34 |
--------------------------------------------------------------------------------
/.github/workflows/pr-autodiff.yaml:
--------------------------------------------------------------------------------
1 | name: PR Diff Summarization
2 | on:
3 | # pull_request:
4 | # branches: [main]
5 | # types: [opened, ready_for_review, reopened]
6 | issue_comment:
7 | types: [created]
8 | permissions:
9 | contents: read
10 | pull-requests: write
11 | jobs:
12 | pr-diff-summarization:
13 | runs-on: ubuntu-latest
14 | if: |
15 | (github.event_name == 'pull_request') ||
16 | (github.event_name == 'issue_comment' &&
17 | contains(github.event.comment.body, '!pr-diff') &&
18 | (github.event.comment.author_association == 'CONTRIBUTOR' || github.event.comment.author_association == 'COLLABORATOR' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') &&
19 | github.event.issue.pull_request)
20 | steps:
21 | - name: Get PR head SHA
22 | id: get-pr-sha
23 | run: |
24 | PR_URL="${{ github.event.issue.pull_request.url || github.event.pull_request.url }}"
25 | # https://api.github.com/repos/OpenManus/pulls/1
26 | RESPONSE=$(curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" $PR_URL)
27 | SHA=$(echo $RESPONSE | jq -r '.head.sha')
28 | TARGET_BRANCH=$(echo $RESPONSE | jq -r '.base.ref')
29 | echo "pr_sha=$SHA" >> $GITHUB_OUTPUT
30 | echo "target_branch=$TARGET_BRANCH" >> $GITHUB_OUTPUT
31 | echo "Retrieved PR head SHA from API: $SHA, target branch: $TARGET_BRANCH"
32 | - name: Check out code
33 | uses: actions/checkout@v4
34 | with:
35 | ref: ${{ steps.get-pr-sha.outputs.pr_sha }}
36 | fetch-depth: 0
37 | - name: Set up Python
38 | uses: actions/setup-python@v5
39 | with:
40 | python-version: '3.11'
41 | - name: Install dependencies
42 | run: |
43 | python -m pip install --upgrade pip
44 | pip install openai requests
45 | - name: Create and run Python script
46 | env:
47 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
48 | OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
49 | GH_TOKEN: ${{ github.token }}
50 | PR_NUMBER: ${{ github.event.pull_request.number || github.event.issue.number }}
51 | TARGET_BRANCH: ${{ steps.get-pr-sha.outputs.target_branch }}
52 | run: |-
53 | cat << 'EOF' > /tmp/_workflow_core.py
54 | import os
55 | import subprocess
56 | import json
57 | import requests
58 | from openai import OpenAI
59 |
60 | def get_diff():
61 | result = subprocess.run(
62 | ['git', 'diff', 'origin/' + os.getenv('TARGET_BRANCH') + '...HEAD'],
63 | capture_output=True, text=True, check=True)
64 | return '\n'.join(
65 | line for line in result.stdout.split('\n')
66 | if any(line.startswith(c) for c in ('+', '-'))
67 | and not line.startswith(('---', '+++'))
68 | )[:round(200000 * 0.4)] # Truncate to prevent overflow
69 |
70 | def generate_comment(diff_content):
71 | client = OpenAI(
72 | base_url=os.getenv("OPENAI_BASE_URL"),
73 | api_key=os.getenv("OPENAI_API_KEY")
74 | )
75 |
76 | guidelines = '''
77 | 1. English version first, Chinese Simplified version after
78 | 2. Example format:
79 | # Diff Report
80 | ## English
81 | - Added `ABC` class
82 | - Fixed `f()` behavior in `foo` module
83 |
84 | ### Comments Highlight
85 | - `config.toml` needs to be configured properly to make sure new features work as expected.
86 |
87 | ### Spelling/Offensive Content Check
88 | - No spelling mistakes or offensive content found in the code or comments.
89 |
90 | ## 中文(简体)
91 | - 新增了 `ABC` 类
92 | - `foo` 模块中的 `f()` 行为已修复
93 |
94 | ### 评论高亮
95 | - `config.toml` 需要正确配置才能确保新功能正常运行。
96 |
97 | ### 内容检查
98 | - 没有发现代码或注释中的拼写错误或不当措辞。
99 |
100 | 3. Highlight non-English comments
101 | 4. Check for spelling/offensive content'''
102 |
103 | response = client.chat.completions.create(
104 | model="o3-mini",
105 | messages=[{
106 | "role": "system",
107 | "content": "Generate bilingual code review feedback."
108 | }, {
109 | "role": "user",
110 | "content": f"Review these changes per guidelines:\n{guidelines}\n\nDIFF:\n{diff_content}"
111 | }]
112 | )
113 | return response.choices[0].message.content
114 |
115 | def post_comment(comment):
116 | repo = os.getenv("GITHUB_REPOSITORY")
117 | pr_number = os.getenv("PR_NUMBER")
118 |
119 | headers = {
120 | "Authorization": f"Bearer {os.getenv('GH_TOKEN')}",
121 | "Accept": "application/vnd.github.v3+json"
122 | }
123 | url = f"https://api.github.com/repos/{repo}/issues/{pr_number}/comments"
124 |
125 | requests.post(url, json={"body": comment}, headers=headers)
126 |
127 | if __name__ == "__main__":
128 | diff_content = get_diff()
129 | if not diff_content.strip():
130 | print("No meaningful diff detected.")
131 | exit(0)
132 |
133 | comment = generate_comment(diff_content)
134 | post_comment(comment)
135 | print("Comment posted successfully.")
136 | EOF
137 |
138 | python /tmp/_workflow_core.py
139 |
--------------------------------------------------------------------------------
/.github/workflows/pre-commit.yaml:
--------------------------------------------------------------------------------
1 | name: Pre-commit checks
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - '**'
7 | push:
8 | branches:
9 | - '**'
10 |
11 | jobs:
12 | pre-commit-check:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout Source Code
16 | uses: actions/checkout@v4
17 | - name: Set up Python 3.12
18 | uses: actions/setup-python@v5
19 | with:
20 | python-version: '3.12'
21 | - name: Install pre-commit and tools
22 | run: |
23 | python -m pip install --upgrade pip
24 | pip install pre-commit black==23.1.0 isort==5.12.0 autoflake==2.0.1
25 | - name: Run pre-commit hooks
26 | run: pre-commit run --all-files
27 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yaml:
--------------------------------------------------------------------------------
1 | name: Close inactive issues
2 |
3 | on:
4 | schedule:
5 | - cron: "5 0 * * *"
6 |
7 | jobs:
8 | close-issues:
9 | runs-on: ubuntu-latest
10 | permissions:
11 | issues: write
12 | pull-requests: write
13 | steps:
14 | - uses: actions/stale@v9
15 | with:
16 | days-before-issue-stale: 30
17 | days-before-issue-close: 14
18 | stale-issue-label: "inactive"
19 | stale-issue-message: "This issue has been inactive for 30 days. Please comment if you have updates."
20 | close-issue-message: "This issue was closed due to 45 days of inactivity. Reopen if still relevant."
21 | days-before-pr-stale: -1
22 | days-before-pr-close: -1
23 | repo-token: ${{ secrets.GITHUB_TOKEN }}
24 |
--------------------------------------------------------------------------------
/.github/workflows/top-issues.yaml:
--------------------------------------------------------------------------------
1 | name: Top issues
2 | on:
3 | schedule:
4 | - cron: '0 0/2 * * *'
5 | workflow_dispatch:
6 | jobs:
7 | ShowAndLabelTopIssues:
8 | permissions:
9 | issues: write
10 | pull-requests: write
11 | actions: read
12 | contents: read
13 | name: Display and label top issues
14 | runs-on: ubuntu-latest
15 | if: github.repository == 'FoundationAgents/OpenManus'
16 | steps:
17 | - name: Run top issues action
18 | uses: rickstaa/top-issues-action@7e8dda5d5ae3087670f9094b9724a9a091fc3ba1 # v1.3.101
19 | env:
20 | github_token: ${{ secrets.GITHUB_TOKEN }}
21 | with:
22 | label: true
23 | dashboard: true
24 | dashboard_show_total_reactions: true
25 | top_issues: true
26 | top_features: true
27 | top_bugs: true
28 | top_pull_requests: true
29 | top_list_size: 14
30 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ### Project-specific ###
2 | # Logs
3 | logs/
4 |
5 | # Data
6 | data/
7 |
8 | # Workspace
9 | workspace/
10 |
11 | ### Python ###
12 | # Byte-compiled / optimized / DLL files
13 | __pycache__/
14 | *.py[cod]
15 | *$py.class
16 |
17 | # C extensions
18 | *.so
19 |
20 | # Distribution / packaging
21 | .Python
22 | build/
23 | develop-eggs/
24 | dist/
25 | downloads/
26 | eggs/
27 | .eggs/
28 | lib/
29 | lib64/
30 | parts/
31 | sdist/
32 | var/
33 | wheels/
34 | share/python-wheels/
35 | *.egg-info/
36 | .installed.cfg
37 | *.egg
38 | MANIFEST
39 |
40 | # PyInstaller
41 | # Usually these files are written by a python script from a template
42 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
43 | *.manifest
44 | *.spec
45 |
46 | # Installer logs
47 | pip-log.txt
48 | pip-delete-this-directory.txt
49 |
50 | # Unit test / coverage reports
51 | htmlcov/
52 | .tox/
53 | .nox/
54 | .coverage
55 | .coverage.*
56 | .cache
57 | nosetests.xml
58 | coverage.xml
59 | *.cover
60 | *.py,cover
61 | .hypothesis/
62 | .pytest_cache/
63 | cover/
64 |
65 | # Translations
66 | *.mo
67 | *.pot
68 |
69 | # Django stuff:
70 | *.log
71 | local_settings.py
72 | db.sqlite3
73 | db.sqlite3-journal
74 |
75 | # Flask stuff:
76 | instance/
77 | .webassets-cache
78 |
79 | # Scrapy stuff:
80 | .scrapy
81 |
82 | # Sphinx documentation
83 | docs/_build/
84 |
85 | # PyBuilder
86 | .pybuilder/
87 | target/
88 |
89 | # Jupyter Notebook
90 | .ipynb_checkpoints
91 |
92 | # IPython
93 | profile_default/
94 | ipython_config.py
95 |
96 | # pyenv
97 | # For a library or package, you might want to ignore these files since the code is
98 | # intended to run in multiple environments; otherwise, check them in:
99 | # .python-version
100 |
101 | # pipenv
102 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
103 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
104 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
105 | # install all needed dependencies.
106 | #Pipfile.lock
107 |
108 | # UV
109 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
110 | # This is especially recommended for binary packages to ensure reproducibility, and is more
111 | # commonly ignored for libraries.
112 | #uv.lock
113 |
114 | # poetry
115 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
116 | # This is especially recommended for binary packages to ensure reproducibility, and is more
117 | # commonly ignored for libraries.
118 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
119 | #poetry.lock
120 |
121 | # pdm
122 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
123 | #pdm.lock
124 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
125 | # in version control.
126 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
127 | .pdm.toml
128 | .pdm-python
129 | .pdm-build/
130 |
131 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
132 | __pypackages__/
133 |
134 | # Celery stuff
135 | celerybeat-schedule
136 | celerybeat.pid
137 |
138 | # SageMath parsed files
139 | *.sage.py
140 |
141 | # Environments
142 | .env
143 | .venv
144 | env/
145 | venv/
146 | ENV/
147 | env.bak/
148 | venv.bak/
149 |
150 | # Spyder project settings
151 | .spyderproject
152 | .spyproject
153 |
154 | # Rope project settings
155 | .ropeproject
156 |
157 | # mkdocs documentation
158 | /site
159 |
160 | # mypy
161 | .mypy_cache/
162 | .dmypy.json
163 | dmypy.json
164 |
165 | # Pyre type checker
166 | .pyre/
167 |
168 | # pytype static type analyzer
169 | .pytype/
170 |
171 | # Cython debug symbols
172 | cython_debug/
173 |
174 | # PyCharm
175 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
176 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
177 | # and can be added to the global gitignore or merged into this file. For a more nuclear
178 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
179 | .idea/
180 |
181 | # PyPI configuration file
182 | .pypirc
183 |
184 | ### Visual Studio Code ###
185 | .vscode/*
186 | !.vscode/settings.json
187 | !.vscode/tasks.json
188 | !.vscode/launch.json
189 | !.vscode/extensions.json
190 | !.vscode/*.code-snippets
191 |
192 | # Local History for Visual Studio Code
193 | .history/
194 |
195 | # Built Visual Studio Code Extensions
196 | *.vsix
197 |
198 | # OSX
199 | .DS_Store
200 |
201 | # node
202 | node_modules
203 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/psf/black
3 | rev: 23.1.0
4 | hooks:
5 | - id: black
6 |
7 | - repo: https://github.com/pre-commit/pre-commit-hooks
8 | rev: v4.4.0
9 | hooks:
10 | - id: trailing-whitespace
11 | - id: end-of-file-fixer
12 | - id: check-yaml
13 | - id: check-added-large-files
14 |
15 | - repo: https://github.com/PyCQA/autoflake
16 | rev: v2.0.1
17 | hooks:
18 | - id: autoflake
19 | args: [
20 | --remove-all-unused-imports,
21 | --ignore-init-module-imports,
22 | --expand-star-imports,
23 | --remove-duplicate-keys,
24 | --remove-unused-variables,
25 | --recursive,
26 | --in-place,
27 | --exclude=__init__.py,
28 | ]
29 | files: \.py$
30 |
31 | - repo: https://github.com/pycqa/isort
32 | rev: 5.12.0
33 | hooks:
34 | - id: isort
35 | args: [
36 | "--profile", "black",
37 | "--filter-files",
38 | "--lines-after-imports=2",
39 | ]
40 |
--------------------------------------------------------------------------------
/.vscode/extensions.json:
--------------------------------------------------------------------------------
1 | {
2 | "recommendations": [
3 | "tamasfe.even-better-toml",
4 | "ms-python.black-formatter",
5 | "ms-python.isort"
6 | ],
7 | "unwantedRecommendations": []
8 | }
9 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "[python]": {
3 | "editor.defaultFormatter": "ms-python.black-formatter",
4 | "editor.codeActionsOnSave": {
5 | "source.organizeImports": "always"
6 | }
7 | },
8 | "[toml]": {
9 | "editor.defaultFormatter": "tamasfe.even-better-toml",
10 | },
11 | "pre-commit-helper.runOnSave": "none",
12 | "pre-commit-helper.config": ".pre-commit-config.yaml",
13 | "evenBetterToml.schema.enabled": true,
14 | "evenBetterToml.schema.associations": {
15 | "^.+config[/\\\\].+\\.toml$": "../config/schema.config.json"
16 | },
17 | "files.insertFinalNewline": true,
18 | "files.trimTrailingWhitespace": true,
19 | "editor.formatOnSave": true
20 | }
21 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.12-slim
2 |
3 | WORKDIR /app/OpenManus
4 |
5 | RUN apt-get update && apt-get install -y --no-install-recommends git curl \
6 | && rm -rf /var/lib/apt/lists/* \
7 | && (command -v uv >/dev/null 2>&1 || pip install --no-cache-dir uv)
8 |
9 | COPY . .
10 |
11 | RUN uv pip install --system -r requirements.txt
12 |
13 | CMD ["bash"]
14 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 manna_and_poem
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README_ja.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | [English](README.md) | [中文](README_zh.md) | [한국어](README_ko.md) | 日本語
6 |
7 | [](https://github.com/FoundationAgents/OpenManus/stargazers)
8 |
9 | [](https://opensource.org/licenses/MIT)
10 | [](https://discord.gg/DYn29wFk9z)
11 | [](https://huggingface.co/spaces/lyh-917/OpenManusDemo)
12 | [](https://doi.org/10.5281/zenodo.15186407)
13 |
14 | # 👋 OpenManus
15 |
16 | Manusは素晴らしいですが、OpenManusは*招待コード*なしでどんなアイデアも実現できます!🛫
17 |
18 | 私たちのチームメンバー [@Xinbin Liang](https://github.com/mannaandpoem) と [@Jinyu Xiang](https://github.com/XiangJinyu)(主要開発者)、そして [@Zhaoyang Yu](https://github.com/MoshiQAQ)、[@Jiayi Zhang](https://github.com/didiforgithub)、[@Sirui Hong](https://github.com/stellaHSR) は [@MetaGPT](https://github.com/geekan/MetaGPT) から来ました。プロトタイプは3時間以内に立ち上げられ、継続的に開発を進めています!
19 |
20 | これはシンプルな実装ですので、どんな提案、貢献、フィードバックも歓迎します!
21 |
22 | OpenManusで自分だけのエージェントを楽しみましょう!
23 |
24 | また、UIUCとOpenManusの研究者が共同開発した[OpenManus-RL](https://github.com/OpenManus/OpenManus-RL)をご紹介できることを嬉しく思います。これは強化学習(RL)ベース(GRPOなど)のLLMエージェントチューニング手法に特化したオープンソースプロジェクトです。
25 |
26 | ## プロジェクトデモ
27 |
28 |
29 |
30 | ## インストール方法
31 |
32 | インストール方法は2つ提供しています。方法2(uvを使用)は、より高速なインストールと優れた依存関係管理のため推奨されています。
33 |
34 | ### 方法1:condaを使用
35 |
36 | 1. 新しいconda環境を作成します:
37 |
38 | ```bash
39 | conda create -n open_manus python=3.12
40 | conda activate open_manus
41 | ```
42 |
43 | 2. リポジトリをクローンします:
44 |
45 | ```bash
46 | git clone https://github.com/FoundationAgents/OpenManus.git
47 | cd OpenManus
48 | ```
49 |
50 | 3. 依存関係をインストールします:
51 |
52 | ```bash
53 | pip install -r requirements.txt
54 | ```
55 |
56 | ### 方法2:uvを使用(推奨)
57 |
58 | 1. uv(高速なPythonパッケージインストーラーと管理機能)をインストールします:
59 |
60 | ```bash
61 | curl -LsSf https://astral.sh/uv/install.sh | sh
62 | ```
63 |
64 | 2. リポジトリをクローンします:
65 |
66 | ```bash
67 | git clone https://github.com/FoundationAgents/OpenManus.git
68 | cd OpenManus
69 | ```
70 |
71 | 3. 新しい仮想環境を作成してアクティベートします:
72 |
73 | ```bash
74 | uv venv --python 3.12
75 | source .venv/bin/activate # Unix/macOSの場合
76 | # Windowsの場合:
77 | # .venv\Scripts\activate
78 | ```
79 |
80 | 4. 依存関係をインストールします:
81 |
82 | ```bash
83 | uv pip install -r requirements.txt
84 | ```
85 |
86 | ### ブラウザ自動化ツール(オプション)
87 | ```bash
88 | playwright install
89 | ```
90 |
91 | ## 設定
92 |
93 | OpenManusを使用するには、LLM APIの設定が必要です。以下の手順に従って設定してください:
94 |
95 | 1. `config`ディレクトリに`config.toml`ファイルを作成します(サンプルからコピーできます):
96 |
97 | ```bash
98 | cp config/config.example.toml config/config.toml
99 | ```
100 |
101 | 2. `config/config.toml`を編集してAPIキーを追加し、設定をカスタマイズします:
102 |
103 | ```toml
104 | # グローバルLLM設定
105 | [llm]
106 | model = "gpt-4o"
107 | base_url = "https://api.openai.com/v1"
108 | api_key = "sk-..." # 実際のAPIキーに置き換えてください
109 | max_tokens = 4096
110 | temperature = 0.0
111 |
112 | # 特定のLLMモデル用のオプション設定
113 | [llm.vision]
114 | model = "gpt-4o"
115 | base_url = "https://api.openai.com/v1"
116 | api_key = "sk-..." # 実際のAPIキーに置き換えてください
117 | ```
118 |
119 | ## クイックスタート
120 |
121 | OpenManusを実行する一行コマンド:
122 |
123 | ```bash
124 | python main.py
125 | ```
126 |
127 | その後、ターミナルからプロンプトを入力してください!
128 |
129 | MCP ツールバージョンを使用する場合は、以下を実行します:
130 | ```bash
131 | python run_mcp.py
132 | ```
133 |
134 | 開発中のマルチエージェントバージョンを試すには、以下を実行します:
135 |
136 | ```bash
137 | python run_flow.py
138 | ```
139 |
140 | ## カスタムマルチエージェントの追加
141 |
142 | 現在、一般的なOpenManusエージェントに加えて、データ分析とデータ可視化タスクに適したDataAnalysisエージェントが組み込まれています。このエージェントを`config.toml`の`run_flow`に追加することができます。
143 |
144 | ```toml
145 | # run-flowのオプション設定
146 | [runflow]
147 | use_data_analysis_agent = true # デフォルトでは無効、trueに変更すると有効化されます
148 | ```
149 |
150 | これに加えて、エージェントが正常に動作するために必要な依存関係をインストールする必要があります:[具体的なインストールガイド](app/tool/chart_visualization/README_ja.md##インストール)
151 |
152 |
153 | ## 貢献方法
154 |
155 | 我々は建設的な意見や有益な貢献を歓迎します!issueを作成するか、プルリクエストを提出してください。
156 |
157 | または @mannaandpoem に📧メールでご連絡ください:mannaandpoem@gmail.com
158 |
159 | **注意**: プルリクエストを送信する前に、pre-commitツールを使用して変更を確認してください。`pre-commit run --all-files`を実行してチェックを実行します。
160 |
161 | ## コミュニティグループ
162 | Feishuのネットワーキンググループに参加して、他の開発者と経験を共有しましょう!
163 |
164 |
165 |
166 |
167 |
168 | ## スター履歴
169 |
170 | [](https://star-history.com/#FoundationAgents/OpenManus&Date)
171 |
172 | ## 謝辞
173 |
174 | このプロジェクトの基本的なサポートを提供してくれた[anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo)
175 | と[browser-use](https://github.com/browser-use/browser-use)に感謝します!
176 |
177 | さらに、[AAAJ](https://github.com/metauto-ai/agent-as-a-judge)、[MetaGPT](https://github.com/geekan/MetaGPT)、[OpenHands](https://github.com/All-Hands-AI/OpenHands)、[SWE-agent](https://github.com/SWE-agent/SWE-agent)にも感謝します。
178 |
179 | また、Hugging Face デモスペースをサポートしてくださった阶跃星辰 (stepfun)にも感謝いたします。
180 |
181 | OpenManusはMetaGPTのコントリビューターによって構築されました。このエージェントコミュニティに大きな感謝を!
182 |
183 | ## 引用
184 | ```bibtex
185 | @misc{openmanus2025,
186 | author = {Xinbin Liang and Jinyu Xiang and Zhaoyang Yu and Jiayi Zhang and Sirui Hong and Sheng Fan and Xiao Tang},
187 | title = {OpenManus: An open-source framework for building general AI agents},
188 | year = {2025},
189 | publisher = {Zenodo},
190 | doi = {10.5281/zenodo.15186407},
191 | url = {https://doi.org/10.5281/zenodo.15186407},
192 | }
193 | ```
194 |
--------------------------------------------------------------------------------
/README_zh.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | [English](README.md) | 中文 | [한국어](README_ko.md) | [日本語](README_ja.md)
6 |
7 | [](https://github.com/FoundationAgents/OpenManus/stargazers)
8 |
9 | [](https://opensource.org/licenses/MIT)
10 | [](https://discord.gg/DYn29wFk9z)
11 | [](https://huggingface.co/spaces/lyh-917/OpenManusDemo)
12 | [](https://doi.org/10.5281/zenodo.15186407)
13 |
14 | # 👋 OpenManus
15 |
16 | Manus 非常棒,但 OpenManus 无需邀请码即可实现任何创意 🛫!
17 |
18 | 我们的团队成员 [@Xinbin Liang](https://github.com/mannaandpoem) 和 [@Jinyu Xiang](https://github.com/XiangJinyu)(核心作者),以及 [@Zhaoyang Yu](https://github.com/MoshiQAQ)、[@Jiayi Zhang](https://github.com/didiforgithub) 和 [@Sirui Hong](https://github.com/stellaHSR),来自 [@MetaGPT](https://github.com/geekan/MetaGPT)团队。我们在 3
19 | 小时内完成了开发并持续迭代中!
20 |
21 | 这是一个简洁的实现方案,欢迎任何建议、贡献和反馈!
22 |
23 | 用 OpenManus 开启你的智能体之旅吧!
24 |
25 | 我们也非常高兴地向大家介绍 [OpenManus-RL](https://github.com/OpenManus/OpenManus-RL),这是一个专注于基于强化学习(RL,例如 GRPO)的方法来优化大语言模型(LLM)智能体的开源项目,由来自UIUC 和 OpenManus 的研究人员合作开发。
26 |
27 | ## 项目演示
28 |
29 |
30 |
31 | ## 安装指南
32 |
33 | 我们提供两种安装方式。推荐使用方式二(uv),因为它能提供更快的安装速度和更好的依赖管理。
34 |
35 | ### 方式一:使用 conda
36 |
37 | 1. 创建新的 conda 环境:
38 |
39 | ```bash
40 | conda create -n open_manus python=3.12
41 | conda activate open_manus
42 | ```
43 |
44 | 2. 克隆仓库:
45 |
46 | ```bash
47 | git clone https://github.com/FoundationAgents/OpenManus.git
48 | cd OpenManus
49 | ```
50 |
51 | 3. 安装依赖:
52 |
53 | ```bash
54 | pip install -r requirements.txt
55 | ```
56 |
57 | ### 方式二:使用 uv(推荐)
58 |
59 | 1. 安装 uv(一个快速的 Python 包管理器):
60 |
61 | ```bash
62 | curl -LsSf https://astral.sh/uv/install.sh | sh
63 | ```
64 |
65 | 2. 克隆仓库:
66 |
67 | ```bash
68 | git clone https://github.com/FoundationAgents/OpenManus.git
69 | cd OpenManus
70 | ```
71 |
72 | 3. 创建并激活虚拟环境:
73 |
74 | ```bash
75 | uv venv --python 3.12
76 | source .venv/bin/activate # Unix/macOS 系统
77 | # Windows 系统使用:
78 | # .venv\Scripts\activate
79 | ```
80 |
81 | 4. 安装依赖:
82 |
83 | ```bash
84 | uv pip install -r requirements.txt
85 | ```
86 |
87 | ### 浏览器自动化工具(可选)
88 | ```bash
89 | playwright install
90 | ```
91 |
92 | ## 配置说明
93 |
94 | OpenManus 需要配置使用的 LLM API,请按以下步骤设置:
95 |
96 | 1. 在 `config` 目录创建 `config.toml` 文件(可从示例复制):
97 |
98 | ```bash
99 | cp config/config.example.toml config/config.toml
100 | ```
101 |
102 | 2. 编辑 `config/config.toml` 添加 API 密钥和自定义设置:
103 |
104 | ```toml
105 | # 全局 LLM 配置
106 | [llm]
107 | model = "gpt-4o"
108 | base_url = "https://api.openai.com/v1"
109 | api_key = "sk-..." # 替换为真实 API 密钥
110 | max_tokens = 4096
111 | temperature = 0.0
112 |
113 | # 可选特定 LLM 模型配置
114 | [llm.vision]
115 | model = "gpt-4o"
116 | base_url = "https://api.openai.com/v1"
117 | api_key = "sk-..." # 替换为真实 API 密钥
118 | ```
119 |
120 | ## 快速启动
121 |
122 | 一行命令运行 OpenManus:
123 |
124 | ```bash
125 | python main.py
126 | ```
127 |
128 | 然后通过终端输入你的创意!
129 |
130 | 如需使用 MCP 工具版本,可运行:
131 | ```bash
132 | python run_mcp.py
133 | ```
134 |
135 | 如需体验不稳定的多智能体版本,可运行:
136 |
137 | ```bash
138 | python run_flow.py
139 | ```
140 |
141 | ## 添加自定义多智能体
142 |
143 | 目前除了通用的 OpenManus Agent, 我们还内置了DataAnalysis Agent,适用于数据分析和数据可视化任务,你可以在`config.toml`中将这个智能体加入到`run_flow`中
144 | ```toml
145 | # run-flow可选配置
146 | [runflow]
147 | use_data_analysis_agent = true # 默认关闭,将其改为true则为激活
148 | ```
149 | 除此之外,你还需要安装相关的依赖来确保智能体正常运行:[具体安装指南](app/tool/chart_visualization/README_zh.md##安装)
150 |
151 |
152 | ## 贡献指南
153 |
154 | 我们欢迎任何友好的建议和有价值的贡献!可以直接创建 issue 或提交 pull request。
155 |
156 | 或通过 📧 邮件联系 @mannaandpoem:mannaandpoem@gmail.com
157 |
158 | **注意**: 在提交 pull request 之前,请使用 pre-commit 工具检查您的更改。运行 `pre-commit run --all-files` 来执行检查。
159 |
160 | ## 交流群
161 |
162 | 加入我们的飞书交流群,与其他开发者分享经验!
163 |
164 |
165 |
166 |
167 |
168 | ## Star 数量
169 |
170 | [](https://star-history.com/#FoundationAgents/OpenManus&Date)
171 |
172 |
173 | ## 赞助商
174 | 感谢[PPIO](https://ppinfra.com/user/register?invited_by=OCPKCN&utm_source=github_openmanus&utm_medium=github_readme&utm_campaign=link) 提供的算力支持。
175 | > PPIO派欧云:一键调用高性价比的开源模型API和GPU容器
176 |
177 | ## 致谢
178 |
179 | 特别感谢 [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo)
180 | 和 [browser-use](https://github.com/browser-use/browser-use) 为本项目提供的基础支持!
181 |
182 | 此外,我们感谢 [AAAJ](https://github.com/metauto-ai/agent-as-a-judge),[MetaGPT](https://github.com/geekan/MetaGPT),[OpenHands](https://github.com/All-Hands-AI/OpenHands) 和 [SWE-agent](https://github.com/SWE-agent/SWE-agent).
183 |
184 | 我们也感谢阶跃星辰 (stepfun) 提供的 Hugging Face 演示空间支持。
185 |
186 | OpenManus 由 MetaGPT 社区的贡献者共同构建,感谢这个充满活力的智能体开发者社区!
187 |
188 | ## 引用
189 | ```bibtex
190 | @misc{openmanus2025,
191 | author = {Xinbin Liang and Jinyu Xiang and Zhaoyang Yu and Jiayi Zhang and Sirui Hong and Sheng Fan and Xiao Tang},
192 | title = {OpenManus: An open-source framework for building general AI agents},
193 | year = {2025},
194 | publisher = {Zenodo},
195 | doi = {10.5281/zenodo.15186407},
196 | url = {https://doi.org/10.5281/zenodo.15186407},
197 | }
198 | ```
199 |
--------------------------------------------------------------------------------
/app/__init__.py:
--------------------------------------------------------------------------------
1 | # Python version check: 3.11-3.13
2 | import sys
3 |
4 |
5 | if sys.version_info < (3, 11) or sys.version_info > (3, 13):
6 | print(
7 | "Warning: Unsupported Python version {ver}, please use 3.11-3.13".format(
8 | ver=".".join(map(str, sys.version_info))
9 | )
10 | )
11 |
--------------------------------------------------------------------------------
/app/agent/__init__.py:
--------------------------------------------------------------------------------
1 | from app.agent.base import BaseAgent
2 | from app.agent.browser import BrowserAgent
3 | from app.agent.mcp import MCPAgent
4 | from app.agent.react import ReActAgent
5 | from app.agent.swe import SWEAgent
6 | from app.agent.toolcall import ToolCallAgent
7 |
8 |
9 | __all__ = [
10 | "BaseAgent",
11 | "BrowserAgent",
12 | "ReActAgent",
13 | "SWEAgent",
14 | "ToolCallAgent",
15 | "MCPAgent",
16 | ]
17 |
--------------------------------------------------------------------------------
/app/agent/browser.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import TYPE_CHECKING, Optional
3 |
4 | from pydantic import Field, model_validator
5 |
6 | from app.agent.toolcall import ToolCallAgent
7 | from app.logger import logger
8 | from app.prompt.browser import NEXT_STEP_PROMPT, SYSTEM_PROMPT
9 | from app.schema import Message, ToolChoice
10 | from app.tool import BrowserUseTool, Terminate, ToolCollection
11 |
12 |
13 | # Avoid circular import if BrowserAgent needs BrowserContextHelper
14 | if TYPE_CHECKING:
15 | from app.agent.base import BaseAgent # Or wherever memory is defined
16 |
17 |
18 | class BrowserContextHelper:
19 | def __init__(self, agent: "BaseAgent"):
20 | self.agent = agent
21 | self._current_base64_image: Optional[str] = None
22 |
23 | async def get_browser_state(self) -> Optional[dict]:
24 | browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name)
25 | if not browser_tool or not hasattr(browser_tool, "get_current_state"):
26 | logger.warning("BrowserUseTool not found or doesn't have get_current_state")
27 | return None
28 | try:
29 | result = await browser_tool.get_current_state()
30 | if result.error:
31 | logger.debug(f"Browser state error: {result.error}")
32 | return None
33 | if hasattr(result, "base64_image") and result.base64_image:
34 | self._current_base64_image = result.base64_image
35 | else:
36 | self._current_base64_image = None
37 | return json.loads(result.output)
38 | except Exception as e:
39 | logger.debug(f"Failed to get browser state: {str(e)}")
40 | return None
41 |
42 | async def format_next_step_prompt(self) -> str:
43 | """Gets browser state and formats the browser prompt."""
44 | browser_state = await self.get_browser_state()
45 | url_info, tabs_info, content_above_info, content_below_info = "", "", "", ""
46 | results_info = "" # Or get from agent if needed elsewhere
47 |
48 | if browser_state and not browser_state.get("error"):
49 | url_info = f"\n URL: {browser_state.get('url', 'N/A')}\n Title: {browser_state.get('title', 'N/A')}"
50 | tabs = browser_state.get("tabs", [])
51 | if tabs:
52 | tabs_info = f"\n {len(tabs)} tab(s) available"
53 | pixels_above = browser_state.get("pixels_above", 0)
54 | pixels_below = browser_state.get("pixels_below", 0)
55 | if pixels_above > 0:
56 | content_above_info = f" ({pixels_above} pixels)"
57 | if pixels_below > 0:
58 | content_below_info = f" ({pixels_below} pixels)"
59 |
60 | if self._current_base64_image:
61 | image_message = Message.user_message(
62 | content="Current browser screenshot:",
63 | base64_image=self._current_base64_image,
64 | )
65 | self.agent.memory.add_message(image_message)
66 | self._current_base64_image = None # Consume the image after adding
67 |
68 | return NEXT_STEP_PROMPT.format(
69 | url_placeholder=url_info,
70 | tabs_placeholder=tabs_info,
71 | content_above_placeholder=content_above_info,
72 | content_below_placeholder=content_below_info,
73 | results_placeholder=results_info,
74 | )
75 |
76 | async def cleanup_browser(self):
77 | browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name)
78 | if browser_tool and hasattr(browser_tool, "cleanup"):
79 | await browser_tool.cleanup()
80 |
81 |
82 | class BrowserAgent(ToolCallAgent):
83 | """
84 | A browser agent that uses the browser_use library to control a browser.
85 |
86 | This agent can navigate web pages, interact with elements, fill forms,
87 | extract content, and perform other browser-based actions to accomplish tasks.
88 | """
89 |
90 | name: str = "browser"
91 | description: str = "A browser agent that can control a browser to accomplish tasks"
92 |
93 | system_prompt: str = SYSTEM_PROMPT
94 | next_step_prompt: str = NEXT_STEP_PROMPT
95 |
96 | max_observe: int = 10000
97 | max_steps: int = 20
98 |
99 | # Configure the available tools
100 | available_tools: ToolCollection = Field(
101 | default_factory=lambda: ToolCollection(BrowserUseTool(), Terminate())
102 | )
103 |
104 | # Use Auto for tool choice to allow both tool usage and free-form responses
105 | tool_choices: ToolChoice = ToolChoice.AUTO
106 | special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])
107 |
108 | browser_context_helper: Optional[BrowserContextHelper] = None
109 |
110 | @model_validator(mode="after")
111 | def initialize_helper(self) -> "BrowserAgent":
112 | self.browser_context_helper = BrowserContextHelper(self)
113 | return self
114 |
115 | async def think(self) -> bool:
116 | """Process current state and decide next actions using tools, with browser state info added"""
117 | self.next_step_prompt = (
118 | await self.browser_context_helper.format_next_step_prompt()
119 | )
120 | return await super().think()
121 |
122 | async def cleanup(self):
123 | """Clean up browser agent resources by calling parent cleanup."""
124 | await self.browser_context_helper.cleanup_browser()
125 |
--------------------------------------------------------------------------------
/app/agent/data_analysis.py:
--------------------------------------------------------------------------------
1 | from pydantic import Field
2 |
3 | from app.agent.toolcall import ToolCallAgent
4 | from app.config import config
5 | from app.prompt.visualization import NEXT_STEP_PROMPT, SYSTEM_PROMPT
6 | from app.tool import Terminate, ToolCollection
7 | from app.tool.chart_visualization.chart_prepare import VisualizationPrepare
8 | from app.tool.chart_visualization.data_visualization import DataVisualization
9 | from app.tool.chart_visualization.python_execute import NormalPythonExecute
10 |
11 |
12 | class DataAnalysis(ToolCallAgent):
13 | """
14 | A data analysis agent that uses planning to solve various data analysis tasks.
15 |
16 | This agent extends ToolCallAgent with a comprehensive set of tools and capabilities,
17 | including Data Analysis, Chart Visualization, Data Report.
18 | """
19 |
20 | name: str = "Data_Analysis"
21 | description: str = "An analytical agent that utilizes python and data visualization tools to solve diverse data analysis tasks"
22 |
23 | system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root)
24 | next_step_prompt: str = NEXT_STEP_PROMPT
25 |
26 | max_observe: int = 15000
27 | max_steps: int = 20
28 |
29 | # Add general-purpose tools to the tool collection
30 | available_tools: ToolCollection = Field(
31 | default_factory=lambda: ToolCollection(
32 | NormalPythonExecute(),
33 | VisualizationPrepare(),
34 | DataVisualization(),
35 | Terminate(),
36 | )
37 | )
38 |
--------------------------------------------------------------------------------
/app/agent/manus.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Optional
2 |
3 | from pydantic import Field, model_validator
4 |
5 | from app.agent.browser import BrowserContextHelper
6 | from app.agent.toolcall import ToolCallAgent
7 | from app.config import config
8 | from app.logger import logger
9 | from app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT
10 | from app.tool import Terminate, ToolCollection
11 | from app.tool.ask_human import AskHuman
12 | from app.tool.browser_use_tool import BrowserUseTool
13 | from app.tool.mcp import MCPClients, MCPClientTool
14 | from app.tool.python_execute import PythonExecute
15 | from app.tool.str_replace_editor import StrReplaceEditor
16 |
17 |
18 | class Manus(ToolCallAgent):
19 | """A versatile general-purpose agent with support for both local and MCP tools."""
20 |
21 | name: str = "Manus"
22 | description: str = "A versatile agent that can solve various tasks using multiple tools including MCP-based tools"
23 |
24 | system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root)
25 | next_step_prompt: str = NEXT_STEP_PROMPT
26 |
27 | max_observe: int = 10000
28 | max_steps: int = 20
29 |
30 | # MCP clients for remote tool access
31 | mcp_clients: MCPClients = Field(default_factory=MCPClients)
32 |
33 | # Add general-purpose tools to the tool collection
34 | available_tools: ToolCollection = Field(
35 | default_factory=lambda: ToolCollection(
36 | PythonExecute(),
37 | BrowserUseTool(),
38 | StrReplaceEditor(),
39 | AskHuman(),
40 | Terminate(),
41 | )
42 | )
43 |
44 | special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])
45 | browser_context_helper: Optional[BrowserContextHelper] = None
46 |
47 | # Track connected MCP servers
48 | connected_servers: Dict[str, str] = Field(
49 | default_factory=dict
50 | ) # server_id -> url/command
51 | _initialized: bool = False
52 |
53 | @model_validator(mode="after")
54 | def initialize_helper(self) -> "Manus":
55 | """Initialize basic components synchronously."""
56 | self.browser_context_helper = BrowserContextHelper(self)
57 | return self
58 |
59 | @classmethod
60 | async def create(cls, **kwargs) -> "Manus":
61 | """Factory method to create and properly initialize a Manus instance."""
62 | instance = cls(**kwargs)
63 | await instance.initialize_mcp_servers()
64 | instance._initialized = True
65 | return instance
66 |
67 | async def initialize_mcp_servers(self) -> None:
68 | """Initialize connections to configured MCP servers."""
69 | for server_id, server_config in config.mcp_config.servers.items():
70 | try:
71 | if server_config.type == "sse":
72 | if server_config.url:
73 | await self.connect_mcp_server(server_config.url, server_id)
74 | logger.info(
75 | f"Connected to MCP server {server_id} at {server_config.url}"
76 | )
77 | elif server_config.type == "stdio":
78 | if server_config.command:
79 | await self.connect_mcp_server(
80 | server_config.command,
81 | server_id,
82 | use_stdio=True,
83 | stdio_args=server_config.args,
84 | )
85 | logger.info(
86 | f"Connected to MCP server {server_id} using command {server_config.command}"
87 | )
88 | except Exception as e:
89 | logger.error(f"Failed to connect to MCP server {server_id}: {e}")
90 |
91 | async def connect_mcp_server(
92 | self,
93 | server_url: str,
94 | server_id: str = "",
95 | use_stdio: bool = False,
96 | stdio_args: List[str] = None,
97 | ) -> None:
98 | """Connect to an MCP server and add its tools."""
99 | if use_stdio:
100 | await self.mcp_clients.connect_stdio(
101 | server_url, stdio_args or [], server_id
102 | )
103 | self.connected_servers[server_id or server_url] = server_url
104 | else:
105 | await self.mcp_clients.connect_sse(server_url, server_id)
106 | self.connected_servers[server_id or server_url] = server_url
107 |
108 | # Update available tools with only the new tools from this server
109 | new_tools = [
110 | tool for tool in self.mcp_clients.tools if tool.server_id == server_id
111 | ]
112 | self.available_tools.add_tools(*new_tools)
113 |
114 | async def disconnect_mcp_server(self, server_id: str = "") -> None:
115 | """Disconnect from an MCP server and remove its tools."""
116 | await self.mcp_clients.disconnect(server_id)
117 | if server_id:
118 | self.connected_servers.pop(server_id, None)
119 | else:
120 | self.connected_servers.clear()
121 |
122 | # Rebuild available tools without the disconnected server's tools
123 | base_tools = [
124 | tool
125 | for tool in self.available_tools.tools
126 | if not isinstance(tool, MCPClientTool)
127 | ]
128 | self.available_tools = ToolCollection(*base_tools)
129 | self.available_tools.add_tools(*self.mcp_clients.tools)
130 |
131 | async def cleanup(self):
132 | """Clean up Manus agent resources."""
133 | if self.browser_context_helper:
134 | await self.browser_context_helper.cleanup_browser()
135 | # Disconnect from all MCP servers only if we were initialized
136 | if self._initialized:
137 | await self.disconnect_mcp_server()
138 | self._initialized = False
139 |
140 | async def think(self) -> bool:
141 | """Process current state and decide next actions with appropriate context."""
142 | if not self._initialized:
143 | await self.initialize_mcp_servers()
144 | self._initialized = True
145 |
146 | original_prompt = self.next_step_prompt
147 | recent_messages = self.memory.messages[-3:] if self.memory.messages else []
148 | browser_in_use = any(
149 | tc.function.name == BrowserUseTool().name
150 | for msg in recent_messages
151 | if msg.tool_calls
152 | for tc in msg.tool_calls
153 | )
154 |
155 | if browser_in_use:
156 | self.next_step_prompt = (
157 | await self.browser_context_helper.format_next_step_prompt()
158 | )
159 |
160 | result = await super().think()
161 |
162 | # Restore original prompt
163 | self.next_step_prompt = original_prompt
164 |
165 | return result
166 |
--------------------------------------------------------------------------------
/app/agent/react.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Optional
3 |
4 | from pydantic import Field
5 |
6 | from app.agent.base import BaseAgent
7 | from app.llm import LLM
8 | from app.schema import AgentState, Memory
9 |
10 |
11 | class ReActAgent(BaseAgent, ABC):
12 | name: str
13 | description: Optional[str] = None
14 |
15 | system_prompt: Optional[str] = None
16 | next_step_prompt: Optional[str] = None
17 |
18 | llm: Optional[LLM] = Field(default_factory=LLM)
19 | memory: Memory = Field(default_factory=Memory)
20 | state: AgentState = AgentState.IDLE
21 |
22 | max_steps: int = 10
23 | current_step: int = 0
24 |
25 | @abstractmethod
26 | async def think(self) -> bool:
27 | """Process current state and decide next action"""
28 |
29 | @abstractmethod
30 | async def act(self) -> str:
31 | """Execute decided actions"""
32 |
33 | async def step(self) -> str:
34 | """Execute a single step: think and act."""
35 | should_act = await self.think()
36 | if not should_act:
37 | return "Thinking complete - no action needed"
38 | return await self.act()
39 |
--------------------------------------------------------------------------------
/app/agent/swe.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from pydantic import Field
4 |
5 | from app.agent.toolcall import ToolCallAgent
6 | from app.prompt.swe import SYSTEM_PROMPT
7 | from app.tool import Bash, StrReplaceEditor, Terminate, ToolCollection
8 |
9 |
10 | class SWEAgent(ToolCallAgent):
11 | """An agent that implements the SWEAgent paradigm for executing code and natural conversations."""
12 |
13 | name: str = "swe"
14 | description: str = "an autonomous AI programmer that interacts directly with the computer to solve tasks."
15 |
16 | system_prompt: str = SYSTEM_PROMPT
17 | next_step_prompt: str = ""
18 |
19 | available_tools: ToolCollection = ToolCollection(
20 | Bash(), StrReplaceEditor(), Terminate()
21 | )
22 | special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])
23 |
24 | max_steps: int = 20
25 |
--------------------------------------------------------------------------------
/app/exceptions.py:
--------------------------------------------------------------------------------
1 | class ToolError(Exception):
2 | """Raised when a tool encounters an error."""
3 |
4 | def __init__(self, message):
5 | self.message = message
6 |
7 |
8 | class OpenManusError(Exception):
9 | """Base exception for all OpenManus errors"""
10 |
11 |
12 | class TokenLimitExceeded(OpenManusError):
13 | """Exception raised when the token limit is exceeded"""
14 |
--------------------------------------------------------------------------------
/app/flow/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FoundationAgents/OpenManus/7cd3057ddab94989ec02f17060b1c7ed13b0bf92/app/flow/__init__.py
--------------------------------------------------------------------------------
/app/flow/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Dict, List, Optional, Union
3 |
4 | from pydantic import BaseModel
5 |
6 | from app.agent.base import BaseAgent
7 |
8 |
9 | class BaseFlow(BaseModel, ABC):
10 | """Base class for execution flows supporting multiple agents"""
11 |
12 | agents: Dict[str, BaseAgent]
13 | tools: Optional[List] = None
14 | primary_agent_key: Optional[str] = None
15 |
16 | class Config:
17 | arbitrary_types_allowed = True
18 |
19 | def __init__(
20 | self, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **data
21 | ):
22 | # Handle different ways of providing agents
23 | if isinstance(agents, BaseAgent):
24 | agents_dict = {"default": agents}
25 | elif isinstance(agents, list):
26 | agents_dict = {f"agent_{i}": agent for i, agent in enumerate(agents)}
27 | else:
28 | agents_dict = agents
29 |
30 | # If primary agent not specified, use first agent
31 | primary_key = data.get("primary_agent_key")
32 | if not primary_key and agents_dict:
33 | primary_key = next(iter(agents_dict))
34 | data["primary_agent_key"] = primary_key
35 |
36 | # Set the agents dictionary
37 | data["agents"] = agents_dict
38 |
39 | # Initialize using BaseModel's init
40 | super().__init__(**data)
41 |
42 | @property
43 | def primary_agent(self) -> Optional[BaseAgent]:
44 | """Get the primary agent for the flow"""
45 | return self.agents.get(self.primary_agent_key)
46 |
47 | def get_agent(self, key: str) -> Optional[BaseAgent]:
48 | """Get a specific agent by key"""
49 | return self.agents.get(key)
50 |
51 | def add_agent(self, key: str, agent: BaseAgent) -> None:
52 | """Add a new agent to the flow"""
53 | self.agents[key] = agent
54 |
55 | @abstractmethod
56 | async def execute(self, input_text: str) -> str:
57 | """Execute the flow with given input"""
58 |
--------------------------------------------------------------------------------
/app/flow/flow_factory.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | from typing import Dict, List, Union
3 |
4 | from app.agent.base import BaseAgent
5 | from app.flow.base import BaseFlow
6 | from app.flow.planning import PlanningFlow
7 |
8 |
9 | class FlowType(str, Enum):
10 | PLANNING = "planning"
11 |
12 |
13 | class FlowFactory:
14 | """Factory for creating different types of flows with support for multiple agents"""
15 |
16 | @staticmethod
17 | def create_flow(
18 | flow_type: FlowType,
19 | agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]],
20 | **kwargs,
21 | ) -> BaseFlow:
22 | flows = {
23 | FlowType.PLANNING: PlanningFlow,
24 | }
25 |
26 | flow_class = flows.get(flow_type)
27 | if not flow_class:
28 | raise ValueError(f"Unknown flow type: {flow_type}")
29 |
30 | return flow_class(agents, **kwargs)
31 |
--------------------------------------------------------------------------------
/app/logger.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from datetime import datetime
3 |
4 | from loguru import logger as _logger
5 |
6 | from app.config import PROJECT_ROOT
7 |
8 |
9 | _print_level = "INFO"
10 |
11 |
12 | def define_log_level(print_level="INFO", logfile_level="DEBUG", name: str = None):
13 | """Adjust the log level to above level"""
14 | global _print_level
15 | _print_level = print_level
16 |
17 | current_date = datetime.now()
18 | formatted_date = current_date.strftime("%Y%m%d%H%M%S")
19 | log_name = (
20 | f"{name}_{formatted_date}" if name else formatted_date
21 | ) # name a log with prefix name
22 |
23 | _logger.remove()
24 | _logger.add(sys.stderr, level=print_level)
25 | _logger.add(PROJECT_ROOT / f"logs/{log_name}.log", level=logfile_level)
26 | return _logger
27 |
28 |
29 | logger = define_log_level()
30 |
31 |
32 | if __name__ == "__main__":
33 | logger.info("Starting application")
34 | logger.debug("Debug message")
35 | logger.warning("Warning message")
36 | logger.error("Error message")
37 | logger.critical("Critical message")
38 |
39 | try:
40 | raise ValueError("Test error")
41 | except Exception as e:
42 | logger.exception(f"An error occurred: {e}")
43 |
--------------------------------------------------------------------------------
/app/mcp/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FoundationAgents/OpenManus/7cd3057ddab94989ec02f17060b1c7ed13b0bf92/app/mcp/__init__.py
--------------------------------------------------------------------------------
/app/mcp/server.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 |
4 |
5 | logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(sys.stderr)])
6 |
7 | import argparse
8 | import asyncio
9 | import atexit
10 | import json
11 | from inspect import Parameter, Signature
12 | from typing import Any, Dict, Optional
13 |
14 | from mcp.server.fastmcp import FastMCP
15 |
16 | from app.logger import logger
17 | from app.tool.base import BaseTool
18 | from app.tool.bash import Bash
19 | from app.tool.browser_use_tool import BrowserUseTool
20 | from app.tool.str_replace_editor import StrReplaceEditor
21 | from app.tool.terminate import Terminate
22 |
23 |
24 | class MCPServer:
25 | """MCP Server implementation with tool registration and management."""
26 |
27 | def __init__(self, name: str = "openmanus"):
28 | self.server = FastMCP(name)
29 | self.tools: Dict[str, BaseTool] = {}
30 |
31 | # Initialize standard tools
32 | self.tools["bash"] = Bash()
33 | self.tools["browser"] = BrowserUseTool()
34 | self.tools["editor"] = StrReplaceEditor()
35 | self.tools["terminate"] = Terminate()
36 |
37 | def register_tool(self, tool: BaseTool, method_name: Optional[str] = None) -> None:
38 | """Register a tool with parameter validation and documentation."""
39 | tool_name = method_name or tool.name
40 | tool_param = tool.to_param()
41 | tool_function = tool_param["function"]
42 |
43 | # Define the async function to be registered
44 | async def tool_method(**kwargs):
45 | logger.info(f"Executing {tool_name}: {kwargs}")
46 | result = await tool.execute(**kwargs)
47 |
48 | logger.info(f"Result of {tool_name}: {result}")
49 |
50 | # Handle different types of results (match original logic)
51 | if hasattr(result, "model_dump"):
52 | return json.dumps(result.model_dump())
53 | elif isinstance(result, dict):
54 | return json.dumps(result)
55 | return result
56 |
57 | # Set method metadata
58 | tool_method.__name__ = tool_name
59 | tool_method.__doc__ = self._build_docstring(tool_function)
60 | tool_method.__signature__ = self._build_signature(tool_function)
61 |
62 | # Store parameter schema (important for tools that access it programmatically)
63 | param_props = tool_function.get("parameters", {}).get("properties", {})
64 | required_params = tool_function.get("parameters", {}).get("required", [])
65 | tool_method._parameter_schema = {
66 | param_name: {
67 | "description": param_details.get("description", ""),
68 | "type": param_details.get("type", "any"),
69 | "required": param_name in required_params,
70 | }
71 | for param_name, param_details in param_props.items()
72 | }
73 |
74 | # Register with server
75 | self.server.tool()(tool_method)
76 | logger.info(f"Registered tool: {tool_name}")
77 |
78 | def _build_docstring(self, tool_function: dict) -> str:
79 | """Build a formatted docstring from tool function metadata."""
80 | description = tool_function.get("description", "")
81 | param_props = tool_function.get("parameters", {}).get("properties", {})
82 | required_params = tool_function.get("parameters", {}).get("required", [])
83 |
84 | # Build docstring (match original format)
85 | docstring = description
86 | if param_props:
87 | docstring += "\n\nParameters:\n"
88 | for param_name, param_details in param_props.items():
89 | required_str = (
90 | "(required)" if param_name in required_params else "(optional)"
91 | )
92 | param_type = param_details.get("type", "any")
93 | param_desc = param_details.get("description", "")
94 | docstring += (
95 | f" {param_name} ({param_type}) {required_str}: {param_desc}\n"
96 | )
97 |
98 | return docstring
99 |
100 | def _build_signature(self, tool_function: dict) -> Signature:
101 | """Build a function signature from tool function metadata."""
102 | param_props = tool_function.get("parameters", {}).get("properties", {})
103 | required_params = tool_function.get("parameters", {}).get("required", [])
104 |
105 | parameters = []
106 |
107 | # Follow original type mapping
108 | for param_name, param_details in param_props.items():
109 | param_type = param_details.get("type", "")
110 | default = Parameter.empty if param_name in required_params else None
111 |
112 | # Map JSON Schema types to Python types (same as original)
113 | annotation = Any
114 | if param_type == "string":
115 | annotation = str
116 | elif param_type == "integer":
117 | annotation = int
118 | elif param_type == "number":
119 | annotation = float
120 | elif param_type == "boolean":
121 | annotation = bool
122 | elif param_type == "object":
123 | annotation = dict
124 | elif param_type == "array":
125 | annotation = list
126 |
127 | # Create parameter with same structure as original
128 | param = Parameter(
129 | name=param_name,
130 | kind=Parameter.KEYWORD_ONLY,
131 | default=default,
132 | annotation=annotation,
133 | )
134 | parameters.append(param)
135 |
136 | return Signature(parameters=parameters)
137 |
138 | async def cleanup(self) -> None:
139 | """Clean up server resources."""
140 | logger.info("Cleaning up resources")
141 | # Follow original cleanup logic - only clean browser tool
142 | if "browser" in self.tools and hasattr(self.tools["browser"], "cleanup"):
143 | await self.tools["browser"].cleanup()
144 |
145 | def register_all_tools(self) -> None:
146 | """Register all tools with the server."""
147 | for tool in self.tools.values():
148 | self.register_tool(tool)
149 |
150 | def run(self, transport: str = "stdio") -> None:
151 | """Run the MCP server."""
152 | # Register all tools
153 | self.register_all_tools()
154 |
155 | # Register cleanup function (match original behavior)
156 | atexit.register(lambda: asyncio.run(self.cleanup()))
157 |
158 | # Start server (with same logging as original)
159 | logger.info(f"Starting OpenManus server ({transport} mode)")
160 | self.server.run(transport=transport)
161 |
162 |
163 | def parse_args() -> argparse.Namespace:
164 | """Parse command line arguments."""
165 | parser = argparse.ArgumentParser(description="OpenManus MCP Server")
166 | parser.add_argument(
167 | "--transport",
168 | choices=["stdio"],
169 | default="stdio",
170 | help="Communication method: stdio or http (default: stdio)",
171 | )
172 | return parser.parse_args()
173 |
174 |
175 | if __name__ == "__main__":
176 | args = parse_args()
177 |
178 | # Create and run server (maintaining original flow)
179 | server = MCPServer()
180 | server.run(transport=args.transport)
181 |
--------------------------------------------------------------------------------
/app/prompt/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FoundationAgents/OpenManus/7cd3057ddab94989ec02f17060b1c7ed13b0bf92/app/prompt/__init__.py
--------------------------------------------------------------------------------
/app/prompt/browser.py:
--------------------------------------------------------------------------------
1 | SYSTEM_PROMPT = """\
2 | You are an AI agent designed to automate browser tasks. Your goal is to accomplish the ultimate task following the rules.
3 |
4 | # Input Format
5 | Task
6 | Previous steps
7 | Current URL
8 | Open Tabs
9 | Interactive Elements
10 | [index]text
11 | - index: Numeric identifier for interaction
12 | - type: HTML element type (button, input, etc.)
13 | - text: Element description
14 | Example:
15 | [33]Submit Form
16 |
17 | - Only elements with numeric indexes in [] are interactive
18 | - elements without [] provide only context
19 |
20 | # Response Rules
21 | 1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format:
22 | {{"current_state": {{"evaluation_previous_goal": "Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Mention if something unexpected happened. Shortly state why/why not",
23 | "memory": "Description of what has been done and what you need to remember. Be very specific. Count here ALWAYS how many times you have done something and how many remain. E.g. 0 out of 10 websites analyzed. Continue with abc and xyz",
24 | "next_goal": "What needs to be done with the next immediate action"}},
25 | "action":[{{"one_action_name": {{// action-specific parameter}}}}, // ... more actions in sequence]}}
26 |
27 | 2. ACTIONS: You can specify multiple actions in the list to be executed in sequence. But always specify only one action name per item. Use maximum {{max_actions}} actions per sequence.
28 | Common action sequences:
29 | - Form filling: [{{"input_text": {{"index": 1, "text": "username"}}}}, {{"input_text": {{"index": 2, "text": "password"}}}}, {{"click_element": {{"index": 3}}}}]
30 | - Navigation and extraction: [{{"go_to_url": {{"url": "https://example.com"}}}}, {{"extract_content": {{"goal": "extract the names"}}}}]
31 | - Actions are executed in the given order
32 | - If the page changes after an action, the sequence is interrupted and you get the new state.
33 | - Only provide the action sequence until an action which changes the page state significantly.
34 | - Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page
35 | - only use multiple actions if it makes sense.
36 |
37 | 3. ELEMENT INTERACTION:
38 | - Only use indexes of the interactive elements
39 | - Elements marked with "[]Non-interactive text" are non-interactive
40 |
41 | 4. NAVIGATION & ERROR HANDLING:
42 | - If no suitable elements exist, use other functions to complete the task
43 | - If stuck, try alternative approaches - like going back to a previous page, new search, new tab etc.
44 | - Handle popups/cookies by accepting or closing them
45 | - Use scroll to find elements you are looking for
46 | - If you want to research something, open a new tab instead of using the current tab
47 | - If captcha pops up, try to solve it - else try a different approach
48 | - If the page is not fully loaded, use wait action
49 |
50 | 5. TASK COMPLETION:
51 | - Use the done action as the last action as soon as the ultimate task is complete
52 | - Dont use "done" before you are done with everything the user asked you, except you reach the last step of max_steps.
53 | - If you reach your last step, use the done action even if the task is not fully finished. Provide all the information you have gathered so far. If the ultimate task is completly finished set success to true. If not everything the user asked for is completed set success in done to false!
54 | - If you have to do something repeatedly for example the task says for "each", or "for all", or "x times", count always inside "memory" how many times you have done it and how many remain. Don't stop until you have completed like the task asked you. Only call done after the last step.
55 | - Don't hallucinate actions
56 | - Make sure you include everything you found out for the ultimate task in the done text parameter. Do not just say you are done, but include the requested information of the task.
57 |
58 | 6. VISUAL CONTEXT:
59 | - When an image is provided, use it to understand the page layout
60 | - Bounding boxes with labels on their top right corner correspond to element indexes
61 |
62 | 7. Form filling:
63 | - If you fill an input field and your action sequence is interrupted, most often something changed e.g. suggestions popped up under the field.
64 |
65 | 8. Long tasks:
66 | - Keep track of the status and subresults in the memory.
67 |
68 | 9. Extraction:
69 | - If your task is to find information - call extract_content on the specific pages to get and store the information.
70 | Your responses must be always JSON with the specified format.
71 | """
72 |
73 | NEXT_STEP_PROMPT = """
74 | What should I do next to achieve my goal?
75 |
76 | When you see [Current state starts here], focus on the following:
77 | - Current URL and page title{url_placeholder}
78 | - Available tabs{tabs_placeholder}
79 | - Interactive elements and their indices
80 | - Content above{content_above_placeholder} or below{content_below_placeholder} the viewport (if indicated)
81 | - Any action results or errors{results_placeholder}
82 |
83 | For browser interactions:
84 | - To navigate: browser_use with action="go_to_url", url="..."
85 | - To click: browser_use with action="click_element", index=N
86 | - To type: browser_use with action="input_text", index=N, text="..."
87 | - To extract: browser_use with action="extract_content", goal="..."
88 | - To scroll: browser_use with action="scroll_down" or "scroll_up"
89 |
90 | Consider both what's visible and what might be beyond the current viewport.
91 | Be methodical - remember your progress and what you've learned so far.
92 |
93 | If you want to stop the interaction at any point, use the `terminate` tool/function call.
94 | """
95 |
--------------------------------------------------------------------------------
/app/prompt/manus.py:
--------------------------------------------------------------------------------
1 | SYSTEM_PROMPT = (
2 | "You are OpenManus, an all-capable AI assistant, aimed at solving any task presented by the user. You have various tools at your disposal that you can call upon to efficiently complete complex requests. Whether it's programming, information retrieval, file processing, web browsing, or human interaction (only for extreme cases), you can handle it all."
3 | "The initial directory is: {directory}"
4 | )
5 |
6 | NEXT_STEP_PROMPT = """
7 | Based on user needs, proactively select the most appropriate tool or combination of tools. For complex tasks, you can break down the problem and use different tools step by step to solve it. After using each tool, clearly explain the execution results and suggest the next steps.
8 |
9 | If you want to stop the interaction at any point, use the `terminate` tool/function call.
10 | """
11 |
--------------------------------------------------------------------------------
/app/prompt/mcp.py:
--------------------------------------------------------------------------------
1 | """Prompts for the MCP Agent."""
2 |
3 | SYSTEM_PROMPT = """You are an AI assistant with access to a Model Context Protocol (MCP) server.
4 | You can use the tools provided by the MCP server to complete tasks.
5 | The MCP server will dynamically expose tools that you can use - always check the available tools first.
6 |
7 | When using an MCP tool:
8 | 1. Choose the appropriate tool based on your task requirements
9 | 2. Provide properly formatted arguments as required by the tool
10 | 3. Observe the results and use them to determine next steps
11 | 4. Tools may change during operation - new tools might appear or existing ones might disappear
12 |
13 | Follow these guidelines:
14 | - Call tools with valid parameters as documented in their schemas
15 | - Handle errors gracefully by understanding what went wrong and trying again with corrected parameters
16 | - For multimedia responses (like images), you'll receive a description of the content
17 | - Complete user requests step by step, using the most appropriate tools
18 | - If multiple tools need to be called in sequence, make one call at a time and wait for results
19 |
20 | Remember to clearly explain your reasoning and actions to the user.
21 | """
22 |
23 | NEXT_STEP_PROMPT = """Based on the current state and available tools, what should be done next?
24 | Think step by step about the problem and identify which MCP tool would be most helpful for the current stage.
25 | If you've already made progress, consider what additional information you need or what actions would move you closer to completing the task.
26 | """
27 |
28 | # Additional specialized prompts
29 | TOOL_ERROR_PROMPT = """You encountered an error with the tool '{tool_name}'.
30 | Try to understand what went wrong and correct your approach.
31 | Common issues include:
32 | - Missing or incorrect parameters
33 | - Invalid parameter formats
34 | - Using a tool that's no longer available
35 | - Attempting an operation that's not supported
36 |
37 | Please check the tool specifications and try again with corrected parameters.
38 | """
39 |
40 | MULTIMEDIA_RESPONSE_PROMPT = """You've received a multimedia response (image, audio, etc.) from the tool '{tool_name}'.
41 | This content has been processed and described for you.
42 | Use this information to continue the task or provide insights to the user.
43 | """
44 |
--------------------------------------------------------------------------------
/app/prompt/planning.py:
--------------------------------------------------------------------------------
1 | PLANNING_SYSTEM_PROMPT = """
2 | You are an expert Planning Agent tasked with solving problems efficiently through structured plans.
3 | Your job is:
4 | 1. Analyze requests to understand the task scope
5 | 2. Create a clear, actionable plan that makes meaningful progress with the `planning` tool
6 | 3. Execute steps using available tools as needed
7 | 4. Track progress and adapt plans when necessary
8 | 5. Use `finish` to conclude immediately when the task is complete
9 |
10 |
11 | Available tools will vary by task but may include:
12 | - `planning`: Create, update, and track plans (commands: create, update, mark_step, etc.)
13 | - `finish`: End the task when complete
14 | Break tasks into logical steps with clear outcomes. Avoid excessive detail or sub-steps.
15 | Think about dependencies and verification methods.
16 | Know when to conclude - don't continue thinking once objectives are met.
17 | """
18 |
19 | NEXT_STEP_PROMPT = """
20 | Based on the current state, what's your next action?
21 | Choose the most efficient path forward:
22 | 1. Is the plan sufficient, or does it need refinement?
23 | 2. Can you execute the next step immediately?
24 | 3. Is the task complete? If so, use `finish` right away.
25 |
26 | Be concise in your reasoning, then select the appropriate tool or action.
27 | """
28 |
--------------------------------------------------------------------------------
/app/prompt/swe.py:
--------------------------------------------------------------------------------
1 | SYSTEM_PROMPT = """SETTING: You are an autonomous programmer, and you're working directly in the command line with a special interface.
2 |
3 | The special interface consists of a file editor that shows you {{WINDOW}} lines of a file at a time.
4 | In addition to typical bash commands, you can also use specific commands to help you navigate and edit files.
5 | To call a command, you need to invoke it with a function call/tool call.
6 |
7 | Please note that THE EDIT COMMAND REQUIRES PROPER INDENTATION.
8 | If you'd like to add the line ' print(x)' you must fully write that out, with all those spaces before the code! Indentation is important and code that is not indented correctly will fail and require fixing before it can be run.
9 |
10 | RESPONSE FORMAT:
11 | Your shell prompt is formatted as follows:
12 | (Open file: )
13 | (Current directory: )
14 | bash-$
15 |
16 | First, you should _always_ include a general thought about what you're going to do next.
17 | Then, for every response, you must include exactly _ONE_ tool call/function call.
18 |
19 | Remember, you should always include a _SINGLE_ tool call/function call and then wait for a response from the shell before continuing with more discussion and commands. Everything you include in the DISCUSSION section will be saved for future reference.
20 | If you'd like to issue two commands at once, PLEASE DO NOT DO THAT! Please instead first submit just the first tool call, and then after receiving a response you'll be able to issue the second tool call.
21 | Note that the environment does NOT support interactive session commands (e.g. python, vim), so please do not invoke them.
22 | """
23 |
--------------------------------------------------------------------------------
/app/prompt/toolcall.py:
--------------------------------------------------------------------------------
1 | SYSTEM_PROMPT = "You are an agent that can execute tool calls"
2 |
3 | NEXT_STEP_PROMPT = (
4 | "If you want to stop interaction, use `terminate` tool/function call."
5 | )
6 |
--------------------------------------------------------------------------------
/app/prompt/visualization.py:
--------------------------------------------------------------------------------
1 | SYSTEM_PROMPT = """You are an AI agent designed to data analysis / visualization task. You have various tools at your disposal that you can call upon to efficiently complete complex requests.
2 | # Note:
3 | 1. The workspace directory is: {directory}; Read / write file in workspace
4 | 2. Generate analysis conclusion report in the end"""
5 |
6 | NEXT_STEP_PROMPT = """Based on user needs, break down the problem and use different tools step by step to solve it.
7 | # Note
8 | 1. Each step select the most appropriate tool proactively (ONLY ONE).
9 | 2. After using each tool, clearly explain the execution results and suggest the next steps.
10 | 3. When observation with Error, review and fix it."""
11 |
--------------------------------------------------------------------------------
/app/sandbox/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Docker Sandbox Module
3 |
4 | Provides secure containerized execution environment with resource limits
5 | and isolation for running untrusted code.
6 | """
7 | from app.sandbox.client import (
8 | BaseSandboxClient,
9 | LocalSandboxClient,
10 | create_sandbox_client,
11 | )
12 | from app.sandbox.core.exceptions import (
13 | SandboxError,
14 | SandboxResourceError,
15 | SandboxTimeoutError,
16 | )
17 | from app.sandbox.core.manager import SandboxManager
18 | from app.sandbox.core.sandbox import DockerSandbox
19 |
20 |
21 | __all__ = [
22 | "DockerSandbox",
23 | "SandboxManager",
24 | "BaseSandboxClient",
25 | "LocalSandboxClient",
26 | "create_sandbox_client",
27 | "SandboxError",
28 | "SandboxTimeoutError",
29 | "SandboxResourceError",
30 | ]
31 |
--------------------------------------------------------------------------------
/app/sandbox/client.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Dict, Optional, Protocol
3 |
4 | from app.config import SandboxSettings
5 | from app.sandbox.core.sandbox import DockerSandbox
6 |
7 |
8 | class SandboxFileOperations(Protocol):
9 | """Protocol for sandbox file operations."""
10 |
11 | async def copy_from(self, container_path: str, local_path: str) -> None:
12 | """Copies file from container to local.
13 |
14 | Args:
15 | container_path: File path in container.
16 | local_path: Local destination path.
17 | """
18 | ...
19 |
20 | async def copy_to(self, local_path: str, container_path: str) -> None:
21 | """Copies file from local to container.
22 |
23 | Args:
24 | local_path: Local source file path.
25 | container_path: Destination path in container.
26 | """
27 | ...
28 |
29 | async def read_file(self, path: str) -> str:
30 | """Reads file content from container.
31 |
32 | Args:
33 | path: File path in container.
34 |
35 | Returns:
36 | str: File content.
37 | """
38 | ...
39 |
40 | async def write_file(self, path: str, content: str) -> None:
41 | """Writes content to file in container.
42 |
43 | Args:
44 | path: File path in container.
45 | content: Content to write.
46 | """
47 | ...
48 |
49 |
50 | class BaseSandboxClient(ABC):
51 | """Base sandbox client interface."""
52 |
53 | @abstractmethod
54 | async def create(
55 | self,
56 | config: Optional[SandboxSettings] = None,
57 | volume_bindings: Optional[Dict[str, str]] = None,
58 | ) -> None:
59 | """Creates sandbox."""
60 |
61 | @abstractmethod
62 | async def run_command(self, command: str, timeout: Optional[int] = None) -> str:
63 | """Executes command."""
64 |
65 | @abstractmethod
66 | async def copy_from(self, container_path: str, local_path: str) -> None:
67 | """Copies file from container."""
68 |
69 | @abstractmethod
70 | async def copy_to(self, local_path: str, container_path: str) -> None:
71 | """Copies file to container."""
72 |
73 | @abstractmethod
74 | async def read_file(self, path: str) -> str:
75 | """Reads file."""
76 |
77 | @abstractmethod
78 | async def write_file(self, path: str, content: str) -> None:
79 | """Writes file."""
80 |
81 | @abstractmethod
82 | async def cleanup(self) -> None:
83 | """Cleans up resources."""
84 |
85 |
86 | class LocalSandboxClient(BaseSandboxClient):
87 | """Local sandbox client implementation."""
88 |
89 | def __init__(self):
90 | """Initializes local sandbox client."""
91 | self.sandbox: Optional[DockerSandbox] = None
92 |
93 | async def create(
94 | self,
95 | config: Optional[SandboxSettings] = None,
96 | volume_bindings: Optional[Dict[str, str]] = None,
97 | ) -> None:
98 | """Creates a sandbox.
99 |
100 | Args:
101 | config: Sandbox configuration.
102 | volume_bindings: Volume mappings.
103 |
104 | Raises:
105 | RuntimeError: If sandbox creation fails.
106 | """
107 | self.sandbox = DockerSandbox(config, volume_bindings)
108 | await self.sandbox.create()
109 |
110 | async def run_command(self, command: str, timeout: Optional[int] = None) -> str:
111 | """Runs command in sandbox.
112 |
113 | Args:
114 | command: Command to execute.
115 | timeout: Execution timeout in seconds.
116 |
117 | Returns:
118 | Command output.
119 |
120 | Raises:
121 | RuntimeError: If sandbox not initialized.
122 | """
123 | if not self.sandbox:
124 | raise RuntimeError("Sandbox not initialized")
125 | return await self.sandbox.run_command(command, timeout)
126 |
127 | async def copy_from(self, container_path: str, local_path: str) -> None:
128 | """Copies file from container to local.
129 |
130 | Args:
131 | container_path: File path in container.
132 | local_path: Local destination path.
133 |
134 | Raises:
135 | RuntimeError: If sandbox not initialized.
136 | """
137 | if not self.sandbox:
138 | raise RuntimeError("Sandbox not initialized")
139 | await self.sandbox.copy_from(container_path, local_path)
140 |
141 | async def copy_to(self, local_path: str, container_path: str) -> None:
142 | """Copies file from local to container.
143 |
144 | Args:
145 | local_path: Local source file path.
146 | container_path: Destination path in container.
147 |
148 | Raises:
149 | RuntimeError: If sandbox not initialized.
150 | """
151 | if not self.sandbox:
152 | raise RuntimeError("Sandbox not initialized")
153 | await self.sandbox.copy_to(local_path, container_path)
154 |
155 | async def read_file(self, path: str) -> str:
156 | """Reads file from container.
157 |
158 | Args:
159 | path: File path in container.
160 |
161 | Returns:
162 | File content.
163 |
164 | Raises:
165 | RuntimeError: If sandbox not initialized.
166 | """
167 | if not self.sandbox:
168 | raise RuntimeError("Sandbox not initialized")
169 | return await self.sandbox.read_file(path)
170 |
171 | async def write_file(self, path: str, content: str) -> None:
172 | """Writes file to container.
173 |
174 | Args:
175 | path: File path in container.
176 | content: File content.
177 |
178 | Raises:
179 | RuntimeError: If sandbox not initialized.
180 | """
181 | if not self.sandbox:
182 | raise RuntimeError("Sandbox not initialized")
183 | await self.sandbox.write_file(path, content)
184 |
185 | async def cleanup(self) -> None:
186 | """Cleans up resources."""
187 | if self.sandbox:
188 | await self.sandbox.cleanup()
189 | self.sandbox = None
190 |
191 |
192 | def create_sandbox_client() -> LocalSandboxClient:
193 | """Creates a sandbox client.
194 |
195 | Returns:
196 | LocalSandboxClient: Sandbox client instance.
197 | """
198 | return LocalSandboxClient()
199 |
200 |
201 | SANDBOX_CLIENT = create_sandbox_client()
202 |
--------------------------------------------------------------------------------
/app/sandbox/core/exceptions.py:
--------------------------------------------------------------------------------
1 | """Exception classes for the sandbox system.
2 |
3 | This module defines custom exceptions used throughout the sandbox system to
4 | handle various error conditions in a structured way.
5 | """
6 |
7 |
8 | class SandboxError(Exception):
9 | """Base exception for sandbox-related errors."""
10 |
11 |
12 | class SandboxTimeoutError(SandboxError):
13 | """Exception raised when a sandbox operation times out."""
14 |
15 |
16 | class SandboxResourceError(SandboxError):
17 | """Exception raised for resource-related errors."""
18 |
--------------------------------------------------------------------------------
/app/schema.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | from typing import Any, List, Literal, Optional, Union
3 |
4 | from pydantic import BaseModel, Field
5 |
6 |
7 | class Role(str, Enum):
8 | """Message role options"""
9 |
10 | SYSTEM = "system"
11 | USER = "user"
12 | ASSISTANT = "assistant"
13 | TOOL = "tool"
14 |
15 |
16 | ROLE_VALUES = tuple(role.value for role in Role)
17 | ROLE_TYPE = Literal[ROLE_VALUES] # type: ignore
18 |
19 |
20 | class ToolChoice(str, Enum):
21 | """Tool choice options"""
22 |
23 | NONE = "none"
24 | AUTO = "auto"
25 | REQUIRED = "required"
26 |
27 |
28 | TOOL_CHOICE_VALUES = tuple(choice.value for choice in ToolChoice)
29 | TOOL_CHOICE_TYPE = Literal[TOOL_CHOICE_VALUES] # type: ignore
30 |
31 |
32 | class AgentState(str, Enum):
33 | """Agent execution states"""
34 |
35 | IDLE = "IDLE"
36 | RUNNING = "RUNNING"
37 | FINISHED = "FINISHED"
38 | ERROR = "ERROR"
39 |
40 |
41 | class Function(BaseModel):
42 | name: str
43 | arguments: str
44 |
45 |
46 | class ToolCall(BaseModel):
47 | """Represents a tool/function call in a message"""
48 |
49 | id: str
50 | type: str = "function"
51 | function: Function
52 |
53 |
54 | class Message(BaseModel):
55 | """Represents a chat message in the conversation"""
56 |
57 | role: ROLE_TYPE = Field(...) # type: ignore
58 | content: Optional[str] = Field(default=None)
59 | tool_calls: Optional[List[ToolCall]] = Field(default=None)
60 | name: Optional[str] = Field(default=None)
61 | tool_call_id: Optional[str] = Field(default=None)
62 | base64_image: Optional[str] = Field(default=None)
63 |
64 | def __add__(self, other) -> List["Message"]:
65 | """支持 Message + list 或 Message + Message 的操作"""
66 | if isinstance(other, list):
67 | return [self] + other
68 | elif isinstance(other, Message):
69 | return [self, other]
70 | else:
71 | raise TypeError(
72 | f"unsupported operand type(s) for +: '{type(self).__name__}' and '{type(other).__name__}'"
73 | )
74 |
75 | def __radd__(self, other) -> List["Message"]:
76 | """支持 list + Message 的操作"""
77 | if isinstance(other, list):
78 | return other + [self]
79 | else:
80 | raise TypeError(
81 | f"unsupported operand type(s) for +: '{type(other).__name__}' and '{type(self).__name__}'"
82 | )
83 |
84 | def to_dict(self) -> dict:
85 | """Convert message to dictionary format"""
86 | message = {"role": self.role}
87 | if self.content is not None:
88 | message["content"] = self.content
89 | if self.tool_calls is not None:
90 | message["tool_calls"] = [tool_call.dict() for tool_call in self.tool_calls]
91 | if self.name is not None:
92 | message["name"] = self.name
93 | if self.tool_call_id is not None:
94 | message["tool_call_id"] = self.tool_call_id
95 | if self.base64_image is not None:
96 | message["base64_image"] = self.base64_image
97 | return message
98 |
99 | @classmethod
100 | def user_message(
101 | cls, content: str, base64_image: Optional[str] = None
102 | ) -> "Message":
103 | """Create a user message"""
104 | return cls(role=Role.USER, content=content, base64_image=base64_image)
105 |
106 | @classmethod
107 | def system_message(cls, content: str) -> "Message":
108 | """Create a system message"""
109 | return cls(role=Role.SYSTEM, content=content)
110 |
111 | @classmethod
112 | def assistant_message(
113 | cls, content: Optional[str] = None, base64_image: Optional[str] = None
114 | ) -> "Message":
115 | """Create an assistant message"""
116 | return cls(role=Role.ASSISTANT, content=content, base64_image=base64_image)
117 |
118 | @classmethod
119 | def tool_message(
120 | cls, content: str, name, tool_call_id: str, base64_image: Optional[str] = None
121 | ) -> "Message":
122 | """Create a tool message"""
123 | return cls(
124 | role=Role.TOOL,
125 | content=content,
126 | name=name,
127 | tool_call_id=tool_call_id,
128 | base64_image=base64_image,
129 | )
130 |
131 | @classmethod
132 | def from_tool_calls(
133 | cls,
134 | tool_calls: List[Any],
135 | content: Union[str, List[str]] = "",
136 | base64_image: Optional[str] = None,
137 | **kwargs,
138 | ):
139 | """Create ToolCallsMessage from raw tool calls.
140 |
141 | Args:
142 | tool_calls: Raw tool calls from LLM
143 | content: Optional message content
144 | base64_image: Optional base64 encoded image
145 | """
146 | formatted_calls = [
147 | {"id": call.id, "function": call.function.model_dump(), "type": "function"}
148 | for call in tool_calls
149 | ]
150 | return cls(
151 | role=Role.ASSISTANT,
152 | content=content,
153 | tool_calls=formatted_calls,
154 | base64_image=base64_image,
155 | **kwargs,
156 | )
157 |
158 |
159 | class Memory(BaseModel):
160 | messages: List[Message] = Field(default_factory=list)
161 | max_messages: int = Field(default=100)
162 |
163 | def add_message(self, message: Message) -> None:
164 | """Add a message to memory"""
165 | self.messages.append(message)
166 | # Optional: Implement message limit
167 | if len(self.messages) > self.max_messages:
168 | self.messages = self.messages[-self.max_messages :]
169 |
170 | def add_messages(self, messages: List[Message]) -> None:
171 | """Add multiple messages to memory"""
172 | self.messages.extend(messages)
173 | # Optional: Implement message limit
174 | if len(self.messages) > self.max_messages:
175 | self.messages = self.messages[-self.max_messages :]
176 |
177 | def clear(self) -> None:
178 | """Clear all messages"""
179 | self.messages.clear()
180 |
181 | def get_recent_messages(self, n: int) -> List[Message]:
182 | """Get n most recent messages"""
183 | return self.messages[-n:]
184 |
185 | def to_dict_list(self) -> List[dict]:
186 | """Convert messages to list of dicts"""
187 | return [msg.to_dict() for msg in self.messages]
188 |
--------------------------------------------------------------------------------
/app/tool/__init__.py:
--------------------------------------------------------------------------------
1 | from app.tool.base import BaseTool
2 | from app.tool.bash import Bash
3 | from app.tool.browser_use_tool import BrowserUseTool
4 | from app.tool.create_chat_completion import CreateChatCompletion
5 | from app.tool.planning import PlanningTool
6 | from app.tool.str_replace_editor import StrReplaceEditor
7 | from app.tool.terminate import Terminate
8 | from app.tool.tool_collection import ToolCollection
9 | from app.tool.web_search import WebSearch
10 |
11 |
12 | __all__ = [
13 | "BaseTool",
14 | "Bash",
15 | "BrowserUseTool",
16 | "Terminate",
17 | "StrReplaceEditor",
18 | "WebSearch",
19 | "ToolCollection",
20 | "CreateChatCompletion",
21 | "PlanningTool",
22 | ]
23 |
--------------------------------------------------------------------------------
/app/tool/ask_human.py:
--------------------------------------------------------------------------------
1 | from app.tool import BaseTool
2 |
3 |
4 | class AskHuman(BaseTool):
5 | """Add a tool to ask human for help."""
6 |
7 | name: str = "ask_human"
8 | description: str = "Use this tool to ask human for help."
9 | parameters: str = {
10 | "type": "object",
11 | "properties": {
12 | "inquire": {
13 | "type": "string",
14 | "description": "The question you want to ask human.",
15 | }
16 | },
17 | "required": ["inquire"],
18 | }
19 |
20 | async def execute(self, inquire: str) -> str:
21 | return input(f"""Bot: {inquire}\n\nYou: """).strip()
22 |
--------------------------------------------------------------------------------
/app/tool/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Any, Dict, Optional
3 |
4 | from pydantic import BaseModel, Field
5 |
6 |
7 | class BaseTool(ABC, BaseModel):
8 | name: str
9 | description: str
10 | parameters: Optional[dict] = None
11 |
12 | class Config:
13 | arbitrary_types_allowed = True
14 |
15 | async def __call__(self, **kwargs) -> Any:
16 | """Execute the tool with given parameters."""
17 | return await self.execute(**kwargs)
18 |
19 | @abstractmethod
20 | async def execute(self, **kwargs) -> Any:
21 | """Execute the tool with given parameters."""
22 |
23 | def to_param(self) -> Dict:
24 | """Convert tool to function call format."""
25 | return {
26 | "type": "function",
27 | "function": {
28 | "name": self.name,
29 | "description": self.description,
30 | "parameters": self.parameters,
31 | },
32 | }
33 |
34 |
35 | class ToolResult(BaseModel):
36 | """Represents the result of a tool execution."""
37 |
38 | output: Any = Field(default=None)
39 | error: Optional[str] = Field(default=None)
40 | base64_image: Optional[str] = Field(default=None)
41 | system: Optional[str] = Field(default=None)
42 |
43 | class Config:
44 | arbitrary_types_allowed = True
45 |
46 | def __bool__(self):
47 | return any(getattr(self, field) for field in self.__fields__)
48 |
49 | def __add__(self, other: "ToolResult"):
50 | def combine_fields(
51 | field: Optional[str], other_field: Optional[str], concatenate: bool = True
52 | ):
53 | if field and other_field:
54 | if concatenate:
55 | return field + other_field
56 | raise ValueError("Cannot combine tool results")
57 | return field or other_field
58 |
59 | return ToolResult(
60 | output=combine_fields(self.output, other.output),
61 | error=combine_fields(self.error, other.error),
62 | base64_image=combine_fields(self.base64_image, other.base64_image, False),
63 | system=combine_fields(self.system, other.system),
64 | )
65 |
66 | def __str__(self):
67 | return f"Error: {self.error}" if self.error else self.output
68 |
69 | def replace(self, **kwargs):
70 | """Returns a new ToolResult with the given fields replaced."""
71 | # return self.copy(update=kwargs)
72 | return type(self)(**{**self.dict(), **kwargs})
73 |
74 |
75 | class CLIResult(ToolResult):
76 | """A ToolResult that can be rendered as a CLI output."""
77 |
78 |
79 | class ToolFailure(ToolResult):
80 | """A ToolResult that represents a failure."""
81 |
--------------------------------------------------------------------------------
/app/tool/bash.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 | from typing import Optional
4 |
5 | from app.exceptions import ToolError
6 | from app.tool.base import BaseTool, CLIResult
7 |
8 |
9 | _BASH_DESCRIPTION = """Execute a bash command in the terminal.
10 | * Long running commands: For commands that may run indefinitely, it should be run in the background and the output should be redirected to a file, e.g. command = `python3 app.py > server.log 2>&1 &`.
11 | * Interactive: If a bash command returns exit code `-1`, this means the process is not yet finished. The assistant must then send a second call to terminal with an empty `command` (which will retrieve any additional logs), or it can send additional text (set `command` to the text) to STDIN of the running process, or it can send command=`ctrl+c` to interrupt the process.
12 | * Timeout: If a command execution result says "Command timed out. Sending SIGINT to the process", the assistant should retry running the command in the background.
13 | """
14 |
15 |
16 | class _BashSession:
17 | """A session of a bash shell."""
18 |
19 | _started: bool
20 | _process: asyncio.subprocess.Process
21 |
22 | command: str = "/bin/bash"
23 | _output_delay: float = 0.2 # seconds
24 | _timeout: float = 120.0 # seconds
25 | _sentinel: str = "<>"
26 |
27 | def __init__(self):
28 | self._started = False
29 | self._timed_out = False
30 |
31 | async def start(self):
32 | if self._started:
33 | return
34 |
35 | self._process = await asyncio.create_subprocess_shell(
36 | self.command,
37 | preexec_fn=os.setsid,
38 | shell=True,
39 | bufsize=0,
40 | stdin=asyncio.subprocess.PIPE,
41 | stdout=asyncio.subprocess.PIPE,
42 | stderr=asyncio.subprocess.PIPE,
43 | )
44 |
45 | self._started = True
46 |
47 | def stop(self):
48 | """Terminate the bash shell."""
49 | if not self._started:
50 | raise ToolError("Session has not started.")
51 | if self._process.returncode is not None:
52 | return
53 | self._process.terminate()
54 |
55 | async def run(self, command: str):
56 | """Execute a command in the bash shell."""
57 | if not self._started:
58 | raise ToolError("Session has not started.")
59 | if self._process.returncode is not None:
60 | return CLIResult(
61 | system="tool must be restarted",
62 | error=f"bash has exited with returncode {self._process.returncode}",
63 | )
64 | if self._timed_out:
65 | raise ToolError(
66 | f"timed out: bash has not returned in {self._timeout} seconds and must be restarted",
67 | )
68 |
69 | # we know these are not None because we created the process with PIPEs
70 | assert self._process.stdin
71 | assert self._process.stdout
72 | assert self._process.stderr
73 |
74 | # send command to the process
75 | self._process.stdin.write(
76 | command.encode() + f"; echo '{self._sentinel}'\n".encode()
77 | )
78 | await self._process.stdin.drain()
79 |
80 | # read output from the process, until the sentinel is found
81 | try:
82 | async with asyncio.timeout(self._timeout):
83 | while True:
84 | await asyncio.sleep(self._output_delay)
85 | # if we read directly from stdout/stderr, it will wait forever for
86 | # EOF. use the StreamReader buffer directly instead.
87 | output = (
88 | self._process.stdout._buffer.decode()
89 | ) # pyright: ignore[reportAttributeAccessIssue]
90 | if self._sentinel in output:
91 | # strip the sentinel and break
92 | output = output[: output.index(self._sentinel)]
93 | break
94 | except asyncio.TimeoutError:
95 | self._timed_out = True
96 | raise ToolError(
97 | f"timed out: bash has not returned in {self._timeout} seconds and must be restarted",
98 | ) from None
99 |
100 | if output.endswith("\n"):
101 | output = output[:-1]
102 |
103 | error = (
104 | self._process.stderr._buffer.decode()
105 | ) # pyright: ignore[reportAttributeAccessIssue]
106 | if error.endswith("\n"):
107 | error = error[:-1]
108 |
109 | # clear the buffers so that the next output can be read correctly
110 | self._process.stdout._buffer.clear() # pyright: ignore[reportAttributeAccessIssue]
111 | self._process.stderr._buffer.clear() # pyright: ignore[reportAttributeAccessIssue]
112 |
113 | return CLIResult(output=output, error=error)
114 |
115 |
116 | class Bash(BaseTool):
117 | """A tool for executing bash commands"""
118 |
119 | name: str = "bash"
120 | description: str = _BASH_DESCRIPTION
121 | parameters: dict = {
122 | "type": "object",
123 | "properties": {
124 | "command": {
125 | "type": "string",
126 | "description": "The bash command to execute. Can be empty to view additional logs when previous exit code is `-1`. Can be `ctrl+c` to interrupt the currently running process.",
127 | },
128 | },
129 | "required": ["command"],
130 | }
131 |
132 | _session: Optional[_BashSession] = None
133 |
134 | async def execute(
135 | self, command: str | None = None, restart: bool = False, **kwargs
136 | ) -> CLIResult:
137 | if restart:
138 | if self._session:
139 | self._session.stop()
140 | self._session = _BashSession()
141 | await self._session.start()
142 |
143 | return CLIResult(system="tool has been restarted.")
144 |
145 | if self._session is None:
146 | self._session = _BashSession()
147 | await self._session.start()
148 |
149 | if command is not None:
150 | return await self._session.run(command)
151 |
152 | raise ToolError("no command provided.")
153 |
154 |
155 | if __name__ == "__main__":
156 | bash = Bash()
157 | rst = asyncio.run(bash.execute("ls -l"))
158 | print(rst)
159 |
--------------------------------------------------------------------------------
/app/tool/chart_visualization/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Chart Visualization Tool
4 |
5 | The chart visualization tool generates data processing code through Python and ultimately invokes [@visactor/vmind](https://github.com/VisActor/VMind) to obtain chart specifications. Chart rendering is implemented using [@visactor/vchart](https://github.com/VisActor/VChart).
6 |
7 | ## Installation (Mac / Linux)
8 |
9 | 1. Install node >= 18
10 |
11 | ```bash
12 | curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
13 | # Activate nvm, for example in Bash
14 | source ~/.bashrc
15 | # Then install the latest stable release of Node
16 | nvm install node
17 | # Activate usage, for example if the latest stable release is 22, then use 22
18 | nvm use 22
19 | ```
20 |
21 | 2. Install dependencies
22 |
23 | ```bash
24 | # Navigate to the appropriate location in the current repository
25 | cd app/tool/chart_visualization
26 | npm install
27 | ```
28 |
29 | ## Installation (Windows)
30 | 1. Install nvm-windows
31 |
32 | Download the latest version `nvm-setup.exe` from the [official GitHub page](https://github.com/coreybutler/nvm-windows?tab=readme-ov-file#readme) and install it.
33 |
34 | 2. Use nvm to install node
35 |
36 | ```powershell
37 | # Then install the latest stable release of Node
38 | nvm install node
39 | # Activate usage, for example if the latest stable release is 22, then use 22
40 | nvm use 22
41 | ```
42 |
43 | 3. Install dependencies
44 |
45 | ```bash
46 | # Navigate to the appropriate location in the current repository
47 | cd app/tool/chart_visualization
48 | npm install
49 | ```
50 |
51 | ## Tool
52 | ### python_execute
53 |
54 | Execute the necessary parts of data analysis (excluding data visualization) using Python code, including data processing, data summary, report generation, and some general Python script code.
55 |
56 | #### Input
57 | ```typescript
58 | {
59 | // Code type: data processing/data report/other general tasks
60 | code_type: "process" | "report" | "others"
61 | // Final execution code
62 | code: string;
63 | }
64 | ```
65 |
66 | #### Output
67 | Python execution results, including the saving of intermediate files and print output results.
68 |
69 | ### visualization_preparation
70 |
71 | A pre-tool for data visualization with two purposes,
72 |
73 | #### Data -> Chart
74 | Used to extract the data needed for analysis (.csv) and the corresponding visualization description from the data, ultimately outputting a JSON configuration file.
75 |
76 | #### Chart + Insight -> Chart
77 | Select existing charts and corresponding data insights, choose data insights to add to the chart in the form of data annotations, and finally generate a JSON configuration file.
78 |
79 | #### Input
80 | ```typescript
81 | {
82 | // Code type: data visualization or data insight addition
83 | code_type: "visualization" | "insight"
84 | // Python code used to produce the final JSON file
85 | code: string;
86 | }
87 | ```
88 |
89 | #### Output
90 | A configuration file for data visualization, used for the `data_visualization tool`.
91 |
92 | ## data_visualization
93 |
94 | Generate specific data visualizations based on the content of `visualization_preparation`.
95 |
96 | ### Input
97 | ```typescript
98 | {
99 | // Configuration file path
100 | json_path: string;
101 | // Current purpose, data visualization or insight annotation addition
102 | tool_type: "visualization" | "insight";
103 | // Final product png or html; html supports vchart rendering and interaction
104 | output_type: 'png' | 'html'
105 | // Language, currently supports Chinese and English
106 | language: "zh" | "en"
107 | }
108 | ```
109 |
110 | ## VMind Configuration
111 |
112 | ### LLM
113 |
114 | VMind requires LLM invocation for intelligent chart generation. By default, it uses the `config.llm["default"]` configuration.
115 |
116 | ### Generation Settings
117 |
118 | Main configurations include chart dimensions, theme, and generation method:
119 | ### Generation Method
120 | Default: png. Currently supports automatic selection of `output_type` by LLM based on context.
121 |
122 | ### Dimensions
123 | Default dimensions are unspecified. For HTML output, charts fill the entire page by default. For PNG output, defaults to `1000*1000`.
124 |
125 | ### Theme
126 | Default theme: `'light'`. VChart supports multiple themes. See [Themes](https://www.visactor.io/vchart/guide/tutorial_docs/Theme/Theme_Extension).
127 |
128 | ## Test
129 |
130 | Currently, three tasks of different difficulty levels are set for testing.
131 |
132 | ### Simple Chart Generation Task
133 |
134 | Provide data and specific chart generation requirements, test results, execute the command:
135 | ```bash
136 | python -m app.tool.chart_visualization.test.chart_demo
137 | ```
138 | The results should be located under `workspace\visualization`, involving 9 different chart results.
139 |
140 | ### Simple Data Report Task
141 |
142 | Provide simple raw data analysis requirements, requiring simple processing of the data, execute the command:
143 | ```bash
144 | python -m app.tool.chart_visualization.test.report_demo
145 | ```
146 | The results are also located under `workspace\visualization`.
147 |
--------------------------------------------------------------------------------
/app/tool/chart_visualization/README_ja.md:
--------------------------------------------------------------------------------
1 | # グラフ可視化ツール
2 |
3 | グラフ可視化ツールは、Pythonを使用してデータ処理コードを生成し、最終的に[@visactor/vmind](https://github.com/VisActor/VMind)を呼び出してグラフのspec結果を得ます。グラフのレンダリングには[@visactor/vchart](https://github.com/VisActor/VChart)を使用します。
4 |
5 | ## インストール (Mac / Linux)
6 |
7 | 1. Node >= 18をインストール
8 |
9 | ```bash
10 | curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
11 | # nvmを有効化、例としてBashを使用
12 | source ~/.bashrc
13 | # その後、最新の安定版Nodeをインストール
14 | nvm install node
15 | # 使用を有効化、例えば最新の安定版が22の場合、use 22
16 | nvm use 22
17 | ```
18 |
19 | 2. 依存関係をインストール
20 |
21 | ```bash
22 | cd app/tool/chart_visualization
23 | npm install
24 | ```
25 |
26 | ## インストール (Windows)
27 | 1. nvm-windowsをインストール
28 |
29 | [GitHub公式サイト](https://github.com/coreybutler/nvm-windows?tab=readme-ov-file#readme)から最新バージョンの`nvm-setup.exe`をダウンロードしてインストール
30 |
31 | 2. nvmを使用してNodeをインストール
32 |
33 | ```powershell
34 | # その後、最新の安定版Nodeをインストール
35 | nvm install node
36 | # 使用を有効化、例えば最新の安定版が22の場合、use 22
37 | nvm use 22
38 | ```
39 |
40 | 3. 依存関係をインストール
41 |
42 | ```bash
43 | # 現在のリポジトリで適切な位置に移動
44 | cd app/tool/chart_visualization
45 | npm install
46 | ```
47 |
48 | ## ツール
49 | ### python_execute
50 |
51 | Pythonコードを使用してデータ分析(データ可視化を除く)に必要な部分を実行します。これにはデータ処理、データ要約、レポート生成、および一般的なPythonスクリプトコードが含まれます。
52 |
53 | #### 入力
54 | ```typescript
55 | {
56 | // コードタイプ:データ処理/データレポート/その他の一般的なタスク
57 | code_type: "process" | "report" | "others"
58 | // 最終実行コード
59 | code: string;
60 | }
61 | ```
62 |
63 | #### 出力
64 | Python実行結果、中間ファイルの保存とprint出力結果を含む
65 |
66 | ### visualization_preparation
67 |
68 | データ可視化の準備ツールで、2つの用途があります。
69 |
70 | #### Data -> Chart
71 | データから分析に必要なデータ(.csv)と対応する可視化の説明を抽出し、最終的にJSON設定ファイルを出力します。
72 |
73 | #### Chart + Insight -> Chart
74 | 既存のグラフと対応するデータインサイトを選択し、データインサイトをデータ注釈の形式でグラフに追加し、最終的にJSON設定ファイルを生成します。
75 |
76 | #### 入力
77 | ```typescript
78 | {
79 | // コードタイプ:データ可視化またはデータインサイト追加
80 | code_type: "visualization" | "insight"
81 | // 最終的なJSONファイルを生成するためのPythonコード
82 | code: string;
83 | }
84 | ```
85 |
86 | #### 出力
87 | データ可視化の設定ファイル、`data_visualization tool`で使用
88 |
89 | ## data_visualization
90 |
91 | `visualization_preparation`の内容に基づいて具体的なデータ可視化を生成
92 |
93 | ### 入力
94 | ```typescript
95 | {
96 | // 設定ファイルのパス
97 | json_path: string;
98 | // 現在の用途、データ可視化またはインサイト注釈追加
99 | tool_type: "visualization" | "insight";
100 | // 最終成果物pngまたはhtml;htmlではvchartのレンダリングとインタラクションをサポート
101 | output_type: 'png' | 'html'
102 | // 言語、現在は中国語と英語をサポート
103 | language: "zh" | "en"
104 | }
105 | ```
106 |
107 | ## 出力
108 | 最終的に'png'または'html'の形式でローカルに保存され、保存されたグラフのパスとグラフ内で発見されたデータインサイトを出力
109 |
110 | ## VMind設定
111 |
112 | ### LLM
113 |
114 | VMind自体
115 |
--------------------------------------------------------------------------------
/app/tool/chart_visualization/README_ko.md:
--------------------------------------------------------------------------------
1 | # 차트 시각화 도구
2 |
3 | 차트 시각화 도구는 Python을 통해 데이터 처리 코드를 생성하고, 최종적으로 [@visactor/vmind](https://github.com/VisActor/VMind)를 호출하여 차트 사양을 얻습니다. 차트 렌더링은 [@visactor/vchart](https://github.com/VisActor/VChart)를 사용하여 구현됩니다.
4 |
5 | ## 설치 (Mac / Linux)
6 |
7 | 1. Node.js 18 이상 설치
8 |
9 | ```bash
10 | curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
11 | # nvm 활성화, 예를 들어 Bash
12 | source ~/.bashrc
13 | # 그런 다음 최신 안정 버전의 Node 설치
14 | nvm install node
15 | # 사용 활성화, 예를 들어 최신 안정 버전이 22인 경우 use 22
16 | nvm use 22
17 | ```
18 |
19 | 2. 의존성 설치
20 |
21 | ```bash
22 | # 현재 저장소에서 해당 위치로 이동
23 | cd app/tool/chart_visualization
24 | npm install
25 | ```
26 |
27 | ## 설치 (Windows)
28 | 1. nvm-windows 설치
29 |
30 | [공식 GitHub 페이지](https://github.com/coreybutler/nvm-windows?tab=readme-ov-file#readme)에서 최신 버전의 `nvm-setup.exe`를 다운로드하고 설치합니다.
31 |
32 | 2. nvm을 사용하여 Node.js 설치
33 |
34 | ```powershell
35 | # 그런 다음 최신 안정 버전의 Node 설치
36 | nvm install node
37 | # 사용 활성화, 예를 들어 최신 안정 버전이 22인 경우 use 22
38 | nvm use 22
39 | ```
40 |
41 | 3. 의존성 설치
42 |
43 | ```bash
44 | # 현재 저장소에서 해당 위치로 이동
45 | cd app/tool/chart_visualization
46 | npm install
47 | ```
48 |
49 | ## 도구
50 | ### python_execute
51 |
52 | Python 코드를 사용하여 데이터 분석의 필요한 부분(데이터 시각화 제외)을 실행합니다. 여기에는 데이터 처리, 데이터 요약, 보고서 생성 및 일부 일반적인 Python 스크립트 코드가 포함됩니다.
53 |
54 | #### 입력
55 | ```typescript
56 | {
57 | // 코드 유형: 데이터 처리/데이터 보고서/기타 일반 작업
58 | code_type: "process" | "report" | "others"
59 | // 최종 실행 코드
60 | code: string;
61 | }
62 | ```
63 |
64 | #### 출력
65 | Python 실행 결과, 중간 파일 저장 및 출력 결과 포함.
66 |
67 | ### visualization_preparation
68 |
69 | 데이터 시각화를 위한 사전 도구로 두 가지 목적이 있습니다.
70 |
71 | #### 데이터 -> 차트
72 | 분석에 필요한 데이터(.csv)와 해당 시각화 설명을 데이터에서 추출하여 최종적으로 JSON 구성 파일을 출력합니다.
73 |
74 | #### 차트 + 인사이트 -> 차트
75 | 기존 차트와 해당 데이터 인사이트를 선택하고, 데이터 주석 형태로 차트에 추가할 데이터 인사이트를 선택하여 최종적으로 JSON 구성 파일을 생성합니다.
76 |
77 | #### 입력
78 | ```typescript
79 | {
80 | // 코드 유형: 데이터 시각화 또는 데이터 인사이트 추가
81 | code_type: "visualization" | "insight"
82 | // 최종 JSON 파일을 생성하는 데 사용되는 Python 코드
83 | code: string;
84 | }
85 | ```
86 |
87 | #### 출력
88 | `data_visualization tool`에 사용되는 데이터 시각화를 위한 구성 파일.
89 |
90 | ## data_visualization
91 |
92 | `visualization_preparation`의 내용을 기반으로 특정 데이터 시각화를 생성합니다.
93 |
94 | ### 입력
95 | ```typescript
96 | {
97 | // 구성 파일 경로
98 | json_path: string;
99 | // 현재 목적, 데이터 시각화 또는 인사이트 주석 추가
100 | tool_type: "visualization" | "insight";
101 | // 최종 제품 png 또는 html; html은 vchart 렌더링 및 상호작용 지원
102 | output_type: 'png' | 'html'
103 | // 언어, 현재 중국어 및 영어 지원
104 | language: "zh" | "en"
105 | }
106 | ```
107 |
108 | ## VMind 구성
109 |
110 | ### LLM
111 |
112 | VMind는 지능형 차트 생성을 위해 LLM 호출이 필요합니다. 기본적으로 `config.llm["default"]` 구성을 사용합니다.
113 |
114 | ### 생성 설정
115 |
116 | 주요 구성에는 차트 크기, 테마 및 생성 방법이 포함됩니다.
117 | ### 생성 방법
118 | 기본값: png. 현재 LLM이 컨텍스트에 따라 `output_type`을 자동으로 선택하는 것을 지원합니다.
119 |
120 | ### 크기
121 | 기본 크기는 지정되지 않았습니다. HTML 출력의 경우 차트는 기본적으로 전체 페이지를 채웁니다. PNG 출력의 경우 기본값은 `1000*1000`입니다.
122 |
123 | ### 테마
124 | 기본 테마: `'light'`. VChart는 여러 테마를 지원합니다. [테마](https://www.visactor.io/vchart/guide/tutorial_docs/Theme/Theme_Extension)를 참조하세요.
125 |
126 | ## 테스트
127 |
128 | 현재, 서로 다른 난이도의
129 |
--------------------------------------------------------------------------------
/app/tool/chart_visualization/README_zh.md:
--------------------------------------------------------------------------------
1 | # 图表可视化工具
2 |
3 | 图表可视化工具,通过python生成数据处理代码,最终调用[@visactor/vmind](https://github.com/VisActor/VMind)得到图表的spec结果,图表渲染使用[@visactor/vchart](https://github.com/VisActor/VChart)
4 |
5 | ## 安装(Mac / Linux)
6 |
7 | 1. 安装node >= 18
8 |
9 | ```bash
10 | curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
11 | # 激活nvm,以Bash为例
12 | source ~/.bashrc
13 | # 然后安装 Node 最近一个稳定颁布
14 | nvm install node
15 | # 激活使用,例如最新一个稳定颁布为22,则use 22
16 | nvm use 22
17 | ```
18 |
19 | 2. 安装依赖
20 |
21 | ```bash
22 | cd app/tool/chart_visualization
23 | npm install
24 | ```
25 |
26 | ## 安装(Windows)
27 | 1. 安装nvm-windows
28 |
29 | 从[github官网](https://github.com/coreybutler/nvm-windows?tab=readme-ov-file#readme)上下载最新版本`nvm-setup.exe`并且安装
30 |
31 | 2. 使用nvm安装node
32 |
33 | ```powershell
34 | # 然后安装 Node 最近一个稳定颁布
35 | nvm install node
36 | # 激活使用,例如最新一个稳定颁布为22,则use 22
37 | nvm use 22
38 | ```
39 |
40 | 3. 安装依赖
41 |
42 | ```bash
43 | # 在当前仓库下定位到相应位置
44 | cd app/tool/chart_visualization
45 | npm install
46 | ```
47 | ## Tool
48 | ### python_execute
49 |
50 | 用python代码执行数据分析(除数据可视化以外)中需要的部分,包括数据处理,数据总结摘要,报告生成以及一些通用python脚本代码
51 |
52 | #### 输入
53 | ```typescript
54 | {
55 | // 代码类型:数据处理/数据报告/其他通用任务
56 | code_type: "process" | "report" | "others"
57 | // 最终执行代码
58 | code: string;
59 | }
60 | ```
61 |
62 | #### 输出
63 | python执行结果,带有中间文件的保存和print输出结果
64 |
65 | ### visualization_preparation
66 |
67 | 数据可视化前置工具,有两种用途,
68 |
69 | #### Data -〉 Chart
70 | 用于从数据中提取需要分析的数据(.csv)和对应可视化的描述,最终输出一份json配置文件。
71 |
72 | #### Chart + Insight -> Chart
73 | 选取已有的图表和对应的数据洞察,挑选数据洞察以数据标注的形式增加到图表中,最终生成一份json配置文件。
74 |
75 | #### 输入
76 | ```typescript
77 | {
78 | // 代码类型:数据可视化 或者 数据洞察添加
79 | code_type: "visualization" | "insight"
80 | // 用于生产最终json文件的python代码
81 | code: string;
82 | }
83 | ```
84 |
85 | #### 输出
86 | 数据可视化的配置文件,用于`data_visualization tool`
87 |
88 |
89 | ## data_visualization
90 |
91 | 根据`visualization_preparation`的内容,生成具体的数据可视化
92 |
93 | ### 输入
94 | ```typescript
95 | {
96 | // 配置文件路径
97 | json_path: string;
98 | // 当前用途,数据可视化或者洞察标注添加
99 | tool_type: "visualization" | "insight";
100 | // 最终产物png或者html;html下支持vchart渲染和交互
101 | output_type: 'png' | 'html'
102 | // 语言,目前支持中文和英文
103 | language: "zh" | "en"
104 | }
105 | ```
106 |
107 | ## 输出
108 | 最终以'png'或者'html'的形式保存在本地,输出保存的图表路径以及图表中发现的数据洞察
109 |
110 | ## VMind配置
111 |
112 | ### LLM
113 |
114 | VMind本身也需要通过调用大模型得到智能图表生成结果,目前默认会使用`config.llm["default"]`配置
115 |
116 | ### 生成配置
117 |
118 | 主要生成配置包括图表的宽高、主题以及生成方式;
119 | ### 生成方式
120 | 默认为png,目前支持大模型根据上下文自己选择`output_type`
121 |
122 | ### 宽高
123 | 目前默认不指定宽高,`html`下默认占满整个页面,'png'下默认为`1000 * 1000`
124 |
125 | ### 主题
126 | 目前默认主题为`'light'`,VChart图表支持多种主题,详见[主题](https://www.visactor.io/vchart/guide/tutorial_docs/Theme/Theme_Extension)
127 |
128 |
129 | ## 测试
130 |
131 | 当前设置了三种不同难度的任务用于测试
132 |
133 | ### 简单图表生成任务
134 |
135 | 给予数据和具体的图表生成需求,测试结果,执行命令:
136 | ```bash
137 | python -m app.tool.chart_visualization.test.chart_demo
138 | ```
139 | 结果应位于`worksapce\visualization`下,涉及到9种不同的图表结果
140 |
141 | ### 简单数据报表任务
142 |
143 | 给予简单原始数据可分析需求,需要对数据进行简单加工处理,执行命令:
144 | ```bash
145 | python -m app.tool.chart_visualization.test.report_demo
146 | ```
147 | 结果同样位于`worksapce\visualization`下
148 |
--------------------------------------------------------------------------------
/app/tool/chart_visualization/__init__.py:
--------------------------------------------------------------------------------
1 | from app.tool.chart_visualization.chart_prepare import VisualizationPrepare
2 | from app.tool.chart_visualization.data_visualization import DataVisualization
3 | from app.tool.chart_visualization.python_execute import NormalPythonExecute
4 |
5 |
6 | __all__ = ["DataVisualization", "VisualizationPrepare", "NormalPythonExecute"]
7 |
--------------------------------------------------------------------------------
/app/tool/chart_visualization/chart_prepare.py:
--------------------------------------------------------------------------------
1 | from app.tool.chart_visualization.python_execute import NormalPythonExecute
2 |
3 |
4 | class VisualizationPrepare(NormalPythonExecute):
5 | """A tool for Chart Generation Preparation"""
6 |
7 | name: str = "visualization_preparation"
8 | description: str = "Using Python code to generates metadata of data_visualization tool. Outputs: 1) JSON Information. 2) Cleaned CSV data files (Optional)."
9 | parameters: dict = {
10 | "type": "object",
11 | "properties": {
12 | "code_type": {
13 | "description": "code type, visualization: csv -> chart; insight: choose insight into chart",
14 | "type": "string",
15 | "default": "visualization",
16 | "enum": ["visualization", "insight"],
17 | },
18 | "code": {
19 | "type": "string",
20 | "description": """Python code for data_visualization prepare.
21 | ## Visualization Type
22 | 1. Data loading logic
23 | 2. Csv Data and chart description generate
24 | 2.1 Csv data (The data you want to visulazation, cleaning / transform from origin data, saved in .csv)
25 | 2.2 Chart description of csv data (The chart title or description should be concise and clear. Examples: 'Product sales distribution', 'Monthly revenue trend'.)
26 | 3. Save information in json file.( format: {"csvFilePath": string, "chartTitle": string}[])
27 | ## Insight Type
28 | 1. Select the insights from the data_visualization results that you want to add to the chart.
29 | 2. Save information in json file.( format: {"chartPath": string, "insights_id": number[]}[])
30 | # Note
31 | 1. You can generate one or multiple csv data with different visualization needs.
32 | 2. Make each chart data esay, clean and different.
33 | 3. Json file saving in utf-8 with path print: print(json_path)
34 | """,
35 | },
36 | },
37 | "required": ["code", "code_type"],
38 | }
39 |
--------------------------------------------------------------------------------
/app/tool/chart_visualization/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "chart_visualization",
3 | "version": "1.0.0",
4 | "main": "src/index.ts",
5 | "devDependencies": {
6 | "@types/node": "^22.10.1",
7 | "ts-node": "^10.9.2",
8 | "typescript": "^5.7.2"
9 | },
10 | "dependencies": {
11 | "@visactor/vchart": "^1.13.7",
12 | "@visactor/vmind": "2.0.5",
13 | "get-stdin": "^9.0.0",
14 | "puppeteer": "^24.9.0"
15 | },
16 | "scripts": {
17 | "test": "echo \"Error: no test specified\" && exit 1"
18 | },
19 | "author": "",
20 | "license": "ISC",
21 | "description": ""
22 | }
23 |
--------------------------------------------------------------------------------
/app/tool/chart_visualization/python_execute.py:
--------------------------------------------------------------------------------
1 | from app.config import config
2 | from app.tool.python_execute import PythonExecute
3 |
4 |
5 | class NormalPythonExecute(PythonExecute):
6 | """A tool for executing Python code with timeout and safety restrictions."""
7 |
8 | name: str = "python_execute"
9 | description: str = """Execute Python code for in-depth data analysis / data report(task conclusion) / other normal task without direct visualization."""
10 | parameters: dict = {
11 | "type": "object",
12 | "properties": {
13 | "code_type": {
14 | "description": "code type, data process / data report / others",
15 | "type": "string",
16 | "default": "process",
17 | "enum": ["process", "report", "others"],
18 | },
19 | "code": {
20 | "type": "string",
21 | "description": """Python code to execute.
22 | # Note
23 | 1. The code should generate a comprehensive text-based report containing dataset overview, column details, basic statistics, derived metrics, timeseries comparisons, outliers, and key insights.
24 | 2. Use print() for all outputs so the analysis (including sections like 'Dataset Overview' or 'Preprocessing Results') is clearly visible and save it also
25 | 3. Save any report / processed files / each analysis result in worksapce directory: {directory}
26 | 4. Data reports need to be content-rich, including your overall analysis process and corresponding data visualization.
27 | 5. You can invode this tool step-by-step to do data analysis from summary to in-depth with data report saved also""".format(
28 | directory=config.workspace_root
29 | ),
30 | },
31 | },
32 | "required": ["code"],
33 | }
34 |
35 | async def execute(self, code: str, code_type: str | None = None, timeout=5):
36 | return await super().execute(code, timeout)
37 |
--------------------------------------------------------------------------------
/app/tool/chart_visualization/test/chart_demo.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from app.agent.data_analysis import DataAnalysis
4 | from app.logger import logger
5 |
6 |
7 | prefix = "Help me generate charts and save them locally, specifically:"
8 | tasks = [
9 | {
10 | "prompt": "Help me show the sales of different products in different regions",
11 | "data": """Product Name,Region,Sales
12 | Coke,South,2350
13 | Coke,East,1027
14 | Coke,West,1027
15 | Coke,North,1027
16 | Sprite,South,215
17 | Sprite,East,654
18 | Sprite,West,159
19 | Sprite,North,28
20 | Fanta,South,345
21 | Fanta,East,654
22 | Fanta,West,2100
23 | Fanta,North,1679
24 | Xingmu,South,1476
25 | Xingmu,East,830
26 | Xingmu,West,532
27 | Xingmu,North,498
28 | """,
29 | },
30 | {
31 | "prompt": "Show market share of each brand",
32 | "data": """Brand Name,Market Share,Average Price,Net Profit
33 | Apple,0.5,7068,314531
34 | Samsung,0.2,6059,362345
35 | Vivo,0.05,3406,234512
36 | Nokia,0.01,1064,-1345
37 | Xiaomi,0.1,4087,131345""",
38 | },
39 | {
40 | "prompt": "Please help me show the sales trend of each product",
41 | "data": """Date,Type,Value
42 | 2023-01-01,Product A,52.9
43 | 2023-01-01,Product B,63.6
44 | 2023-01-01,Product C,11.2
45 | 2023-01-02,Product A,45.7
46 | 2023-01-02,Product B,89.1
47 | 2023-01-02,Product C,21.4
48 | 2023-01-03,Product A,67.2
49 | 2023-01-03,Product B,82.4
50 | 2023-01-03,Product C,31.7
51 | 2023-01-04,Product A,80.7
52 | 2023-01-04,Product B,55.1
53 | 2023-01-04,Product C,21.1
54 | 2023-01-05,Product A,65.6
55 | 2023-01-05,Product B,78
56 | 2023-01-05,Product C,31.3
57 | 2023-01-06,Product A,75.6
58 | 2023-01-06,Product B,89.1
59 | 2023-01-06,Product C,63.5
60 | 2023-01-07,Product A,67.3
61 | 2023-01-07,Product B,77.2
62 | 2023-01-07,Product C,43.7
63 | 2023-01-08,Product A,96.1
64 | 2023-01-08,Product B,97.6
65 | 2023-01-08,Product C,59.9
66 | 2023-01-09,Product A,96.1
67 | 2023-01-09,Product B,100.6
68 | 2023-01-09,Product C,66.8
69 | 2023-01-10,Product A,101.6
70 | 2023-01-10,Product B,108.3
71 | 2023-01-10,Product C,56.9""",
72 | },
73 | {
74 | "prompt": "Show the popularity of search keywords",
75 | "data": """Keyword,Popularity
76 | Hot Word,1000
77 | Zao Le Wo Men,800
78 | Rao Jian Huo,400
79 | My Wish is World Peace,400
80 | Xiu Xiu Xiu,400
81 | Shenzhou 11,400
82 | Hundred Birds Facing the Wind,400
83 | China Women's Volleyball Team,400
84 | My Guan Na,400
85 | Leg Dong,400
86 | Hot Pot Hero,400
87 | Baby's Heart is Bitter,400
88 | Olympics,400
89 | Awesome My Brother,400
90 | Poetry and Distance,400
91 | Song Joong-ki,400
92 | PPAP,400
93 | Blue Thin Mushroom,400
94 | Rain Dew Evenly,400
95 | Friendship's Little Boat Says It Flips,400
96 | Beijing Slump,400
97 | Dedication,200
98 | Apple,200
99 | Dog Belt,200
100 | Old Driver,200
101 | Melon-Eating Crowd,200
102 | Zootopia,200
103 | City Will Play,200
104 | Routine,200
105 | Water Reverse,200
106 | Why Don't You Go to Heaven,200
107 | Snake Spirit Man,200
108 | Why Don't You Go to Heaven,200
109 | Samsung Explosion Gate,200
110 | Little Li Oscar,200
111 | Ugly People Need to Read More,200
112 | Boyfriend Power,200
113 | A Face of Confusion,200
114 | Descendants of the Sun,200""",
115 | },
116 | {
117 | "prompt": "Help me compare the performance of different electric vehicle brands using a scatter plot",
118 | "data": """Range,Charging Time,Brand Name,Average Price
119 | 2904,46,Brand1,2350
120 | 1231,146,Brand2,1027
121 | 5675,324,Brand3,1242
122 | 543,57,Brand4,6754
123 | 326,234,Brand5,215
124 | 1124,67,Brand6,654
125 | 3426,81,Brand7,159
126 | 2134,24,Brand8,28
127 | 1234,52,Brand9,345
128 | 2345,27,Brand10,654
129 | 526,145,Brand11,2100
130 | 234,93,Brand12,1679
131 | 567,94,Brand13,1476
132 | 789,45,Brand14,830
133 | 469,75,Brand15,532
134 | 5689,54,Brand16,498
135 | """,
136 | },
137 | {
138 | "prompt": "Show conversion rates for each process",
139 | "data": """Process,Conversion Rate,Month
140 | Step1,100,1
141 | Step2,80,1
142 | Step3,60,1
143 | Step4,40,1""",
144 | },
145 | {
146 | "prompt": "Show the difference in breakfast consumption between men and women",
147 | "data": """Day,Men-Breakfast,Women-Breakfast
148 | Monday,15,22
149 | Tuesday,12,10
150 | Wednesday,15,20
151 | Thursday,10,12
152 | Friday,13,15
153 | Saturday,10,15
154 | Sunday,12,14""",
155 | },
156 | {
157 | "prompt": "Help me show this person's performance in different aspects, is he a hexagonal warrior",
158 | "data": """dimension,performance
159 | Strength,5
160 | Speed,5
161 | Shooting,3
162 | Endurance,5
163 | Precision,5
164 | Growth,5""",
165 | },
166 | {
167 | "prompt": "Show data flow",
168 | "data": """Origin,Destination,value
169 | Node A,Node 1,10
170 | Node A,Node 2,5
171 | Node B,Node 2,8
172 | Node B,Node 3,2
173 | Node C,Node 2,4
174 | Node A,Node C,2
175 | Node C,Node 1,2""",
176 | },
177 | ]
178 |
179 |
180 | async def main():
181 | for index, item in enumerate(tasks):
182 | logger.info(f"Begin task {index} / {len(tasks)}!")
183 | agent = DataAnalysis()
184 | await agent.run(
185 | f"{prefix},chart_description:{item['prompt']},Data:{item['data']}"
186 | )
187 | logger.info(f"Finish with {item['prompt']}")
188 |
189 |
190 | if __name__ == "__main__":
191 | asyncio.run(main())
192 |
--------------------------------------------------------------------------------
/app/tool/chart_visualization/test/report_demo.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from app.agent.data_analysis import DataAnalysis
4 |
5 |
6 | # from app.agent.manus import Manus
7 |
8 |
9 | async def main():
10 | agent = DataAnalysis()
11 | # agent = Manus()
12 | await agent.run(
13 | """Requirement:
14 | 1. Analyze the following data and generate a graphical data report in HTML format. The final product should be a data report.
15 | Data:
16 | Month | Team A | Team B | Team C
17 | January | 1200 hours | 1350 hours | 1100 hours
18 | February | 1250 hours | 1400 hours | 1150 hours
19 | March | 1180 hours | 1300 hours | 1300 hours
20 | April | 1220 hours | 1280 hours | 1400 hours
21 | May | 1230 hours | 1320 hours | 1450 hours
22 | June | 1200 hours | 1250 hours | 1500 hours """
23 | )
24 |
25 |
26 | if __name__ == "__main__":
27 | asyncio.run(main())
28 |
--------------------------------------------------------------------------------
/app/tool/create_chat_completion.py:
--------------------------------------------------------------------------------
1 | from typing import Any, List, Optional, Type, Union, get_args, get_origin
2 |
3 | from pydantic import BaseModel, Field
4 |
5 | from app.tool import BaseTool
6 |
7 |
8 | class CreateChatCompletion(BaseTool):
9 | name: str = "create_chat_completion"
10 | description: str = (
11 | "Creates a structured completion with specified output formatting."
12 | )
13 |
14 | # Type mapping for JSON schema
15 | type_mapping: dict = {
16 | str: "string",
17 | int: "integer",
18 | float: "number",
19 | bool: "boolean",
20 | dict: "object",
21 | list: "array",
22 | }
23 | response_type: Optional[Type] = None
24 | required: List[str] = Field(default_factory=lambda: ["response"])
25 |
26 | def __init__(self, response_type: Optional[Type] = str):
27 | """Initialize with a specific response type."""
28 | super().__init__()
29 | self.response_type = response_type
30 | self.parameters = self._build_parameters()
31 |
32 | def _build_parameters(self) -> dict:
33 | """Build parameters schema based on response type."""
34 | if self.response_type == str:
35 | return {
36 | "type": "object",
37 | "properties": {
38 | "response": {
39 | "type": "string",
40 | "description": "The response text that should be delivered to the user.",
41 | },
42 | },
43 | "required": self.required,
44 | }
45 |
46 | if isinstance(self.response_type, type) and issubclass(
47 | self.response_type, BaseModel
48 | ):
49 | schema = self.response_type.model_json_schema()
50 | return {
51 | "type": "object",
52 | "properties": schema["properties"],
53 | "required": schema.get("required", self.required),
54 | }
55 |
56 | return self._create_type_schema(self.response_type)
57 |
58 | def _create_type_schema(self, type_hint: Type) -> dict:
59 | """Create a JSON schema for the given type."""
60 | origin = get_origin(type_hint)
61 | args = get_args(type_hint)
62 |
63 | # Handle primitive types
64 | if origin is None:
65 | return {
66 | "type": "object",
67 | "properties": {
68 | "response": {
69 | "type": self.type_mapping.get(type_hint, "string"),
70 | "description": f"Response of type {type_hint.__name__}",
71 | }
72 | },
73 | "required": self.required,
74 | }
75 |
76 | # Handle List type
77 | if origin is list:
78 | item_type = args[0] if args else Any
79 | return {
80 | "type": "object",
81 | "properties": {
82 | "response": {
83 | "type": "array",
84 | "items": self._get_type_info(item_type),
85 | }
86 | },
87 | "required": self.required,
88 | }
89 |
90 | # Handle Dict type
91 | if origin is dict:
92 | value_type = args[1] if len(args) > 1 else Any
93 | return {
94 | "type": "object",
95 | "properties": {
96 | "response": {
97 | "type": "object",
98 | "additionalProperties": self._get_type_info(value_type),
99 | }
100 | },
101 | "required": self.required,
102 | }
103 |
104 | # Handle Union type
105 | if origin is Union:
106 | return self._create_union_schema(args)
107 |
108 | return self._build_parameters()
109 |
110 | def _get_type_info(self, type_hint: Type) -> dict:
111 | """Get type information for a single type."""
112 | if isinstance(type_hint, type) and issubclass(type_hint, BaseModel):
113 | return type_hint.model_json_schema()
114 |
115 | return {
116 | "type": self.type_mapping.get(type_hint, "string"),
117 | "description": f"Value of type {getattr(type_hint, '__name__', 'any')}",
118 | }
119 |
120 | def _create_union_schema(self, types: tuple) -> dict:
121 | """Create schema for Union types."""
122 | return {
123 | "type": "object",
124 | "properties": {
125 | "response": {"anyOf": [self._get_type_info(t) for t in types]}
126 | },
127 | "required": self.required,
128 | }
129 |
130 | async def execute(self, required: list | None = None, **kwargs) -> Any:
131 | """Execute the chat completion with type conversion.
132 |
133 | Args:
134 | required: List of required field names or None
135 | **kwargs: Response data
136 |
137 | Returns:
138 | Converted response based on response_type
139 | """
140 | required = required or self.required
141 |
142 | # Handle case when required is a list
143 | if isinstance(required, list) and len(required) > 0:
144 | if len(required) == 1:
145 | required_field = required[0]
146 | result = kwargs.get(required_field, "")
147 | else:
148 | # Return multiple fields as a dictionary
149 | return {field: kwargs.get(field, "") for field in required}
150 | else:
151 | required_field = "response"
152 | result = kwargs.get(required_field, "")
153 |
154 | # Type conversion logic
155 | if self.response_type == str:
156 | return result
157 |
158 | if isinstance(self.response_type, type) and issubclass(
159 | self.response_type, BaseModel
160 | ):
161 | return self.response_type(**kwargs)
162 |
163 | if get_origin(self.response_type) in (list, dict):
164 | return result # Assuming result is already in correct format
165 |
166 | try:
167 | return self.response_type(result)
168 | except (ValueError, TypeError):
169 | return result
170 |
--------------------------------------------------------------------------------
/app/tool/file_operators.py:
--------------------------------------------------------------------------------
1 | """File operation interfaces and implementations for local and sandbox environments."""
2 |
3 | import asyncio
4 | from pathlib import Path
5 | from typing import Optional, Protocol, Tuple, Union, runtime_checkable
6 |
7 | from app.config import SandboxSettings
8 | from app.exceptions import ToolError
9 | from app.sandbox.client import SANDBOX_CLIENT
10 |
11 |
12 | PathLike = Union[str, Path]
13 |
14 |
15 | @runtime_checkable
16 | class FileOperator(Protocol):
17 | """Interface for file operations in different environments."""
18 |
19 | async def read_file(self, path: PathLike) -> str:
20 | """Read content from a file."""
21 | ...
22 |
23 | async def write_file(self, path: PathLike, content: str) -> None:
24 | """Write content to a file."""
25 | ...
26 |
27 | async def is_directory(self, path: PathLike) -> bool:
28 | """Check if path points to a directory."""
29 | ...
30 |
31 | async def exists(self, path: PathLike) -> bool:
32 | """Check if path exists."""
33 | ...
34 |
35 | async def run_command(
36 | self, cmd: str, timeout: Optional[float] = 120.0
37 | ) -> Tuple[int, str, str]:
38 | """Run a shell command and return (return_code, stdout, stderr)."""
39 | ...
40 |
41 |
42 | class LocalFileOperator(FileOperator):
43 | """File operations implementation for local filesystem."""
44 |
45 | encoding: str = "utf-8"
46 |
47 | async def read_file(self, path: PathLike) -> str:
48 | """Read content from a local file."""
49 | try:
50 | return Path(path).read_text(encoding=self.encoding)
51 | except Exception as e:
52 | raise ToolError(f"Failed to read {path}: {str(e)}") from None
53 |
54 | async def write_file(self, path: PathLike, content: str) -> None:
55 | """Write content to a local file."""
56 | try:
57 | Path(path).write_text(content, encoding=self.encoding)
58 | except Exception as e:
59 | raise ToolError(f"Failed to write to {path}: {str(e)}") from None
60 |
61 | async def is_directory(self, path: PathLike) -> bool:
62 | """Check if path points to a directory."""
63 | return Path(path).is_dir()
64 |
65 | async def exists(self, path: PathLike) -> bool:
66 | """Check if path exists."""
67 | return Path(path).exists()
68 |
69 | async def run_command(
70 | self, cmd: str, timeout: Optional[float] = 120.0
71 | ) -> Tuple[int, str, str]:
72 | """Run a shell command locally."""
73 | process = await asyncio.create_subprocess_shell(
74 | cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
75 | )
76 |
77 | try:
78 | stdout, stderr = await asyncio.wait_for(
79 | process.communicate(), timeout=timeout
80 | )
81 | return (
82 | process.returncode or 0,
83 | stdout.decode(),
84 | stderr.decode(),
85 | )
86 | except asyncio.TimeoutError as exc:
87 | try:
88 | process.kill()
89 | except ProcessLookupError:
90 | pass
91 | raise TimeoutError(
92 | f"Command '{cmd}' timed out after {timeout} seconds"
93 | ) from exc
94 |
95 |
96 | class SandboxFileOperator(FileOperator):
97 | """File operations implementation for sandbox environment."""
98 |
99 | def __init__(self):
100 | self.sandbox_client = SANDBOX_CLIENT
101 |
102 | async def _ensure_sandbox_initialized(self):
103 | """Ensure sandbox is initialized."""
104 | if not self.sandbox_client.sandbox:
105 | await self.sandbox_client.create(config=SandboxSettings())
106 |
107 | async def read_file(self, path: PathLike) -> str:
108 | """Read content from a file in sandbox."""
109 | await self._ensure_sandbox_initialized()
110 | try:
111 | return await self.sandbox_client.read_file(str(path))
112 | except Exception as e:
113 | raise ToolError(f"Failed to read {path} in sandbox: {str(e)}") from None
114 |
115 | async def write_file(self, path: PathLike, content: str) -> None:
116 | """Write content to a file in sandbox."""
117 | await self._ensure_sandbox_initialized()
118 | try:
119 | await self.sandbox_client.write_file(str(path), content)
120 | except Exception as e:
121 | raise ToolError(f"Failed to write to {path} in sandbox: {str(e)}") from None
122 |
123 | async def is_directory(self, path: PathLike) -> bool:
124 | """Check if path points to a directory in sandbox."""
125 | await self._ensure_sandbox_initialized()
126 | result = await self.sandbox_client.run_command(
127 | f"test -d {path} && echo 'true' || echo 'false'"
128 | )
129 | return result.strip() == "true"
130 |
131 | async def exists(self, path: PathLike) -> bool:
132 | """Check if path exists in sandbox."""
133 | await self._ensure_sandbox_initialized()
134 | result = await self.sandbox_client.run_command(
135 | f"test -e {path} && echo 'true' || echo 'false'"
136 | )
137 | return result.strip() == "true"
138 |
139 | async def run_command(
140 | self, cmd: str, timeout: Optional[float] = 120.0
141 | ) -> Tuple[int, str, str]:
142 | """Run a command in sandbox environment."""
143 | await self._ensure_sandbox_initialized()
144 | try:
145 | stdout = await self.sandbox_client.run_command(
146 | cmd, timeout=int(timeout) if timeout else None
147 | )
148 | return (
149 | 0, # Always return 0 since we don't have explicit return code from sandbox
150 | stdout,
151 | "", # No stderr capture in the current sandbox implementation
152 | )
153 | except TimeoutError as exc:
154 | raise TimeoutError(
155 | f"Command '{cmd}' timed out after {timeout} seconds in sandbox"
156 | ) from exc
157 | except Exception as exc:
158 | return 1, "", f"Error executing command in sandbox: {str(exc)}"
159 |
--------------------------------------------------------------------------------
/app/tool/python_execute.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import sys
3 | from io import StringIO
4 | from typing import Dict
5 |
6 | from app.tool.base import BaseTool
7 |
8 |
9 | class PythonExecute(BaseTool):
10 | """A tool for executing Python code with timeout and safety restrictions."""
11 |
12 | name: str = "python_execute"
13 | description: str = "Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results."
14 | parameters: dict = {
15 | "type": "object",
16 | "properties": {
17 | "code": {
18 | "type": "string",
19 | "description": "The Python code to execute.",
20 | },
21 | },
22 | "required": ["code"],
23 | }
24 |
25 | def _run_code(self, code: str, result_dict: dict, safe_globals: dict) -> None:
26 | original_stdout = sys.stdout
27 | try:
28 | output_buffer = StringIO()
29 | sys.stdout = output_buffer
30 | exec(code, safe_globals, safe_globals)
31 | result_dict["observation"] = output_buffer.getvalue()
32 | result_dict["success"] = True
33 | except Exception as e:
34 | result_dict["observation"] = str(e)
35 | result_dict["success"] = False
36 | finally:
37 | sys.stdout = original_stdout
38 |
39 | async def execute(
40 | self,
41 | code: str,
42 | timeout: int = 5,
43 | ) -> Dict:
44 | """
45 | Executes the provided Python code with a timeout.
46 |
47 | Args:
48 | code (str): The Python code to execute.
49 | timeout (int): Execution timeout in seconds.
50 |
51 | Returns:
52 | Dict: Contains 'output' with execution output or error message and 'success' status.
53 | """
54 |
55 | with multiprocessing.Manager() as manager:
56 | result = manager.dict({"observation": "", "success": False})
57 | if isinstance(__builtins__, dict):
58 | safe_globals = {"__builtins__": __builtins__}
59 | else:
60 | safe_globals = {"__builtins__": __builtins__.__dict__.copy()}
61 | proc = multiprocessing.Process(
62 | target=self._run_code, args=(code, result, safe_globals)
63 | )
64 | proc.start()
65 | proc.join(timeout)
66 |
67 | # timeout process
68 | if proc.is_alive():
69 | proc.terminate()
70 | proc.join(1)
71 | return {
72 | "observation": f"Execution timeout after {timeout} seconds",
73 | "success": False,
74 | }
75 | return dict(result)
76 |
--------------------------------------------------------------------------------
/app/tool/search/__init__.py:
--------------------------------------------------------------------------------
1 | from app.tool.search.baidu_search import BaiduSearchEngine
2 | from app.tool.search.base import WebSearchEngine
3 | from app.tool.search.bing_search import BingSearchEngine
4 | from app.tool.search.duckduckgo_search import DuckDuckGoSearchEngine
5 | from app.tool.search.google_search import GoogleSearchEngine
6 |
7 |
8 | __all__ = [
9 | "WebSearchEngine",
10 | "BaiduSearchEngine",
11 | "DuckDuckGoSearchEngine",
12 | "GoogleSearchEngine",
13 | "BingSearchEngine",
14 | ]
15 |
--------------------------------------------------------------------------------
/app/tool/search/baidu_search.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from baidusearch.baidusearch import search
4 |
5 | from app.tool.search.base import SearchItem, WebSearchEngine
6 |
7 |
8 | class BaiduSearchEngine(WebSearchEngine):
9 | def perform_search(
10 | self, query: str, num_results: int = 10, *args, **kwargs
11 | ) -> List[SearchItem]:
12 | """
13 | Baidu search engine.
14 |
15 | Returns results formatted according to SearchItem model.
16 | """
17 | raw_results = search(query, num_results=num_results)
18 |
19 | # Convert raw results to SearchItem format
20 | results = []
21 | for i, item in enumerate(raw_results):
22 | if isinstance(item, str):
23 | # If it's just a URL
24 | results.append(
25 | SearchItem(title=f"Baidu Result {i+1}", url=item, description=None)
26 | )
27 | elif isinstance(item, dict):
28 | # If it's a dictionary with details
29 | results.append(
30 | SearchItem(
31 | title=item.get("title", f"Baidu Result {i+1}"),
32 | url=item.get("url", ""),
33 | description=item.get("abstract", None),
34 | )
35 | )
36 | else:
37 | # Try to get attributes directly
38 | try:
39 | results.append(
40 | SearchItem(
41 | title=getattr(item, "title", f"Baidu Result {i+1}"),
42 | url=getattr(item, "url", ""),
43 | description=getattr(item, "abstract", None),
44 | )
45 | )
46 | except Exception:
47 | # Fallback to a basic result
48 | results.append(
49 | SearchItem(
50 | title=f"Baidu Result {i+1}", url=str(item), description=None
51 | )
52 | )
53 |
54 | return results
55 |
--------------------------------------------------------------------------------
/app/tool/search/base.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from pydantic import BaseModel, Field
4 |
5 |
6 | class SearchItem(BaseModel):
7 | """Represents a single search result item"""
8 |
9 | title: str = Field(description="The title of the search result")
10 | url: str = Field(description="The URL of the search result")
11 | description: Optional[str] = Field(
12 | default=None, description="A description or snippet of the search result"
13 | )
14 |
15 | def __str__(self) -> str:
16 | """String representation of a search result item."""
17 | return f"{self.title} - {self.url}"
18 |
19 |
20 | class WebSearchEngine(BaseModel):
21 | """Base class for web search engines."""
22 |
23 | model_config = {"arbitrary_types_allowed": True}
24 |
25 | def perform_search(
26 | self, query: str, num_results: int = 10, *args, **kwargs
27 | ) -> List[SearchItem]:
28 | """
29 | Perform a web search and return a list of search items.
30 |
31 | Args:
32 | query (str): The search query to submit to the search engine.
33 | num_results (int, optional): The number of search results to return. Default is 10.
34 | args: Additional arguments.
35 | kwargs: Additional keyword arguments.
36 |
37 | Returns:
38 | List[SearchItem]: A list of SearchItem objects matching the search query.
39 | """
40 | raise NotImplementedError
41 |
--------------------------------------------------------------------------------
/app/tool/search/bing_search.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Tuple
2 |
3 | import requests
4 | from bs4 import BeautifulSoup
5 |
6 | from app.logger import logger
7 | from app.tool.search.base import SearchItem, WebSearchEngine
8 |
9 |
10 | ABSTRACT_MAX_LENGTH = 300
11 |
12 | USER_AGENTS = [
13 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
14 | "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
15 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36",
16 | "Mozilla/5.0 (Windows; U; Windows NT 5.1; pt-BR) AppleWebKit/533.3 (KHTML, like Gecko) QtWeb Internet Browser/3.7 http://www.QtWeb.net",
17 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
18 | "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2",
19 | "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.4pre) Gecko/20070404 K-Ninja/2.1.3",
20 | "Mozilla/5.0 (Future Star Technologies Corp.; Star-Blade OS; x86_64; U; en-US) iNet Browser 4.7",
21 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201",
22 | "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080414 Firefox/2.0.0.13 Pogo/2.0.0.13.6866",
23 | ]
24 |
25 | HEADERS = {
26 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
27 | "Content-Type": "application/x-www-form-urlencoded",
28 | "User-Agent": USER_AGENTS[0],
29 | "Referer": "https://www.bing.com/",
30 | "Accept-Encoding": "gzip, deflate",
31 | "Accept-Language": "zh-CN,zh;q=0.9",
32 | }
33 |
34 | BING_HOST_URL = "https://www.bing.com"
35 | BING_SEARCH_URL = "https://www.bing.com/search?q="
36 |
37 |
38 | class BingSearchEngine(WebSearchEngine):
39 | session: Optional[requests.Session] = None
40 |
41 | def __init__(self, **data):
42 | """Initialize the BingSearch tool with a requests session."""
43 | super().__init__(**data)
44 | self.session = requests.Session()
45 | self.session.headers.update(HEADERS)
46 |
47 | def _search_sync(self, query: str, num_results: int = 10) -> List[SearchItem]:
48 | """
49 | Synchronous Bing search implementation to retrieve search results.
50 |
51 | Args:
52 | query (str): The search query to submit to Bing.
53 | num_results (int, optional): Maximum number of results to return. Defaults to 10.
54 |
55 | Returns:
56 | List[SearchItem]: A list of search items with title, URL, and description.
57 | """
58 | if not query:
59 | return []
60 |
61 | list_result = []
62 | first = 1
63 | next_url = BING_SEARCH_URL + query
64 |
65 | while len(list_result) < num_results:
66 | data, next_url = self._parse_html(
67 | next_url, rank_start=len(list_result), first=first
68 | )
69 | if data:
70 | list_result.extend(data)
71 | if not next_url:
72 | break
73 | first += 10
74 |
75 | return list_result[:num_results]
76 |
77 | def _parse_html(
78 | self, url: str, rank_start: int = 0, first: int = 1
79 | ) -> Tuple[List[SearchItem], str]:
80 | """
81 | Parse Bing search result HTML to extract search results and the next page URL.
82 |
83 | Returns:
84 | tuple: (List of SearchItem objects, next page URL or None)
85 | """
86 | try:
87 | res = self.session.get(url=url)
88 | res.encoding = "utf-8"
89 | root = BeautifulSoup(res.text, "lxml")
90 |
91 | list_data = []
92 | ol_results = root.find("ol", id="b_results")
93 | if not ol_results:
94 | return [], None
95 |
96 | for li in ol_results.find_all("li", class_="b_algo"):
97 | title = ""
98 | url = ""
99 | abstract = ""
100 | try:
101 | h2 = li.find("h2")
102 | if h2:
103 | title = h2.text.strip()
104 | url = h2.a["href"].strip()
105 |
106 | p = li.find("p")
107 | if p:
108 | abstract = p.text.strip()
109 |
110 | if ABSTRACT_MAX_LENGTH and len(abstract) > ABSTRACT_MAX_LENGTH:
111 | abstract = abstract[:ABSTRACT_MAX_LENGTH]
112 |
113 | rank_start += 1
114 |
115 | # Create a SearchItem object
116 | list_data.append(
117 | SearchItem(
118 | title=title or f"Bing Result {rank_start}",
119 | url=url,
120 | description=abstract,
121 | )
122 | )
123 | except Exception:
124 | continue
125 |
126 | next_btn = root.find("a", title="Next page")
127 | if not next_btn:
128 | return list_data, None
129 |
130 | next_url = BING_HOST_URL + next_btn["href"]
131 | return list_data, next_url
132 | except Exception as e:
133 | logger.warning(f"Error parsing HTML: {e}")
134 | return [], None
135 |
136 | def perform_search(
137 | self, query: str, num_results: int = 10, *args, **kwargs
138 | ) -> List[SearchItem]:
139 | """
140 | Bing search engine.
141 |
142 | Returns results formatted according to SearchItem model.
143 | """
144 | return self._search_sync(query, num_results=num_results)
145 |
--------------------------------------------------------------------------------
/app/tool/search/duckduckgo_search.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from duckduckgo_search import DDGS
4 |
5 | from app.tool.search.base import SearchItem, WebSearchEngine
6 |
7 |
8 | class DuckDuckGoSearchEngine(WebSearchEngine):
9 | def perform_search(
10 | self, query: str, num_results: int = 10, *args, **kwargs
11 | ) -> List[SearchItem]:
12 | """
13 | DuckDuckGo search engine.
14 |
15 | Returns results formatted according to SearchItem model.
16 | """
17 | raw_results = DDGS().text(query, max_results=num_results)
18 |
19 | results = []
20 | for i, item in enumerate(raw_results):
21 | if isinstance(item, str):
22 | # If it's just a URL
23 | results.append(
24 | SearchItem(
25 | title=f"DuckDuckGo Result {i + 1}", url=item, description=None
26 | )
27 | )
28 | elif isinstance(item, dict):
29 | # Extract data from the dictionary
30 | results.append(
31 | SearchItem(
32 | title=item.get("title", f"DuckDuckGo Result {i + 1}"),
33 | url=item.get("href", ""),
34 | description=item.get("body", None),
35 | )
36 | )
37 | else:
38 | # Try to extract attributes directly
39 | try:
40 | results.append(
41 | SearchItem(
42 | title=getattr(item, "title", f"DuckDuckGo Result {i + 1}"),
43 | url=getattr(item, "href", ""),
44 | description=getattr(item, "body", None),
45 | )
46 | )
47 | except Exception:
48 | # Fallback
49 | results.append(
50 | SearchItem(
51 | title=f"DuckDuckGo Result {i + 1}",
52 | url=str(item),
53 | description=None,
54 | )
55 | )
56 |
57 | return results
58 |
--------------------------------------------------------------------------------
/app/tool/search/google_search.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from googlesearch import search
4 |
5 | from app.tool.search.base import SearchItem, WebSearchEngine
6 |
7 |
8 | class GoogleSearchEngine(WebSearchEngine):
9 | def perform_search(
10 | self, query: str, num_results: int = 10, *args, **kwargs
11 | ) -> List[SearchItem]:
12 | """
13 | Google search engine.
14 |
15 | Returns results formatted according to SearchItem model.
16 | """
17 | raw_results = search(query, num_results=num_results, advanced=True)
18 |
19 | results = []
20 | for i, item in enumerate(raw_results):
21 | if isinstance(item, str):
22 | # If it's just a URL
23 | results.append(
24 | {"title": f"Google Result {i+1}", "url": item, "description": ""}
25 | )
26 | else:
27 | results.append(
28 | SearchItem(
29 | title=item.title, url=item.url, description=item.description
30 | )
31 | )
32 |
33 | return results
34 |
--------------------------------------------------------------------------------
/app/tool/terminate.py:
--------------------------------------------------------------------------------
1 | from app.tool.base import BaseTool
2 |
3 |
4 | _TERMINATE_DESCRIPTION = """Terminate the interaction when the request is met OR if the assistant cannot proceed further with the task.
5 | When you have finished all the tasks, call this tool to end the work."""
6 |
7 |
8 | class Terminate(BaseTool):
9 | name: str = "terminate"
10 | description: str = _TERMINATE_DESCRIPTION
11 | parameters: dict = {
12 | "type": "object",
13 | "properties": {
14 | "status": {
15 | "type": "string",
16 | "description": "The finish status of the interaction.",
17 | "enum": ["success", "failure"],
18 | }
19 | },
20 | "required": ["status"],
21 | }
22 |
23 | async def execute(self, status: str) -> str:
24 | """Finish the current execution"""
25 | return f"The interaction has been completed with status: {status}"
26 |
--------------------------------------------------------------------------------
/app/tool/tool_collection.py:
--------------------------------------------------------------------------------
1 | """Collection classes for managing multiple tools."""
2 | from typing import Any, Dict, List
3 |
4 | from app.exceptions import ToolError
5 | from app.logger import logger
6 | from app.tool.base import BaseTool, ToolFailure, ToolResult
7 |
8 |
9 | class ToolCollection:
10 | """A collection of defined tools."""
11 |
12 | class Config:
13 | arbitrary_types_allowed = True
14 |
15 | def __init__(self, *tools: BaseTool):
16 | self.tools = tools
17 | self.tool_map = {tool.name: tool for tool in tools}
18 |
19 | def __iter__(self):
20 | return iter(self.tools)
21 |
22 | def to_params(self) -> List[Dict[str, Any]]:
23 | return [tool.to_param() for tool in self.tools]
24 |
25 | async def execute(
26 | self, *, name: str, tool_input: Dict[str, Any] = None
27 | ) -> ToolResult:
28 | tool = self.tool_map.get(name)
29 | if not tool:
30 | return ToolFailure(error=f"Tool {name} is invalid")
31 | try:
32 | result = await tool(**tool_input)
33 | return result
34 | except ToolError as e:
35 | return ToolFailure(error=e.message)
36 |
37 | async def execute_all(self) -> List[ToolResult]:
38 | """Execute all tools in the collection sequentially."""
39 | results = []
40 | for tool in self.tools:
41 | try:
42 | result = await tool()
43 | results.append(result)
44 | except ToolError as e:
45 | results.append(ToolFailure(error=e.message))
46 | return results
47 |
48 | def get_tool(self, name: str) -> BaseTool:
49 | return self.tool_map.get(name)
50 |
51 | def add_tool(self, tool: BaseTool):
52 | """Add a single tool to the collection.
53 |
54 | If a tool with the same name already exists, it will be skipped and a warning will be logged.
55 | """
56 | if tool.name in self.tool_map:
57 | logger.warning(f"Tool {tool.name} already exists in collection, skipping")
58 | return self
59 |
60 | self.tools += (tool,)
61 | self.tool_map[tool.name] = tool
62 | return self
63 |
64 | def add_tools(self, *tools: BaseTool):
65 | """Add multiple tools to the collection.
66 |
67 | If any tool has a name conflict with an existing tool, it will be skipped and a warning will be logged.
68 | """
69 | for tool in tools:
70 | self.add_tool(tool)
71 | return self
72 |
--------------------------------------------------------------------------------
/assets/community_group.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FoundationAgents/OpenManus/7cd3057ddab94989ec02f17060b1c7ed13b0bf92/assets/community_group.jpg
--------------------------------------------------------------------------------
/assets/logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FoundationAgents/OpenManus/7cd3057ddab94989ec02f17060b1c7ed13b0bf92/assets/logo.jpg
--------------------------------------------------------------------------------
/config/.gitignore:
--------------------------------------------------------------------------------
1 | # prevent the local config file from being uploaded to the remote repository
2 | config.toml
3 |
--------------------------------------------------------------------------------
/config/config.example-model-anthropic.toml:
--------------------------------------------------------------------------------
1 | # Global LLM configuration
2 | [llm]
3 | model = "claude-3-7-sonnet-latest" # The LLM model to use
4 | base_url = "https://api.anthropic.com/v1/" # API endpoint URL
5 | api_key = "YOUR_API_KEY" # Your API key
6 | max_tokens = 8192 # Maximum number of tokens in the response
7 | temperature = 0.0 # Controls randomness
8 |
9 |
10 | # Optional configuration for specific LLM models
11 | [llm.vision]
12 | model = "claude-3-7-sonnet-20250219" # The vision model to use
13 | base_url = "https://api.anthropic.com/v1/" # API endpoint URL for vision model
14 | api_key = "YOUR_API_KEY" # Your API key for vision model
15 | max_tokens = 8192 # Maximum number of tokens in the response
16 | temperature = 0.0 # Controls randomness for vision model
17 |
--------------------------------------------------------------------------------
/config/config.example-model-azure.toml:
--------------------------------------------------------------------------------
1 | # Global LLM configuration
2 | [llm] #AZURE OPENAI:
3 | api_type= 'azure'
4 | model = "gpt-4o-mini" # The LLM model to use
5 | base_url = "{YOUR_AZURE_ENDPOINT.rstrip('/')}/openai/deployments/{AZURE_DEPLOYMENT_ID}" # API endpoint URL
6 | api_key = "YOUR_API_KEY" # Your API key
7 | max_tokens = 8096 # Maximum number of tokens in the response
8 | temperature = 0.0 # Controls randomness
9 | api_version="AZURE API VERSION" #"2024-08-01-preview" # Azure Openai version if AzureOpenai
10 |
11 |
12 | # Optional configuration for specific LLM models
13 | [llm.vision]
14 | model = "gpt-4o" # The vision model to use
15 | base_url = "{YOUR_AZURE_ENDPOINT.rstrip('/')}/openai/deployments/{AZURE_DEPLOYMENT_ID}"
16 | api_key = "YOUR_API_KEY" # Your API key for vision model
17 | max_tokens = 8192 # Maximum number of tokens in the response
18 | temperature = 0.0 # Controls randomness for vision model
19 |
--------------------------------------------------------------------------------
/config/config.example-model-google.toml:
--------------------------------------------------------------------------------
1 | # Global LLM configuration
2 | [llm]
3 | model = "gemini-2.0-flash" # The LLM model to use
4 | base_url = "https://generativelanguage.googleapis.com/v1beta/openai/" # API endpoint URL
5 | api_key = "YOUR_API_KEY" # Your API key
6 | temperature = 0.0 # Controls randomness
7 | max_tokens = 8096 # Maximum number of tokens in the response
8 |
9 |
10 | # Optional configuration for specific LLM models for Google
11 | [llm.vision]
12 | model = "gemini-2.0-flash-exp" # The vision model to use
13 | base_url = "https://generativelanguage.googleapis.com/v1beta/openai/" # API endpoint URL for vision model
14 | api_key = "YOUR_API_KEY" # Your API key for vision model
15 | max_tokens = 8192 # Maximum number of tokens in the response
16 | temperature = 0.0 # Controls randomness for vision model
17 |
--------------------------------------------------------------------------------
/config/config.example-model-ollama.toml:
--------------------------------------------------------------------------------
1 | # Global LLM configuration
2 | [llm] #OLLAMA:
3 | api_type = 'ollama'
4 | model = "llama3.2" # The LLM model to use
5 | base_url = "http://localhost:11434/v1" # API endpoint URL
6 | api_key = "ollama" # Your API key
7 | max_tokens = 4096 # Maximum number of tokens in the response
8 | temperature = 0.0 # Controls randomness
9 |
10 |
11 | [llm.vision] #OLLAMA VISION:
12 | api_type = 'ollama'
13 | model = "llama3.2-vision" # The vision model to use
14 | base_url = "http://localhost:11434/v1" # API endpoint URL for vision model
15 | api_key = "ollama" # Your API key for vision model
16 | max_tokens = 4096 # Maximum number of tokens in the response
17 | temperature = 0.0 # Controls randomness for vision model
18 |
--------------------------------------------------------------------------------
/config/config.example-model-ppio.toml:
--------------------------------------------------------------------------------
1 | # Global LLM configuration
2 | [llm] #PPIO:
3 | api_type = 'ppio'
4 | model = "deepseek/deepseek-v3-0324" # The LLM model to use
5 | base_url = "https://api.ppinfra.com/v3/openai" # API endpoint URL
6 | api_key = "your ppio api key" # Your API key
7 | max_tokens = 16000 # Maximum number of tokens in the response
8 | temperature = 0.0 # Controls randomness
9 |
10 |
11 | [llm.vision] #PPIO VISION:
12 | api_type = 'ppio'
13 | model = "qwen/qwen2.5-vl-72b-instruct" # The vision model to use
14 | base_url = "https://api.ppinfra.com/v3/openai" # API endpoint URL for vision model
15 | api_key = "your ppio api key" # Your API key for vision model
16 | max_tokens = 96000 # Maximum number of tokens in the response
17 | temperature = 0.0 # Controls randomness for vision model
18 |
--------------------------------------------------------------------------------
/config/config.example.toml:
--------------------------------------------------------------------------------
1 | # Global LLM configuration
2 | [llm]
3 | model = "claude-3-7-sonnet-20250219" # The LLM model to use
4 | base_url = "https://api.anthropic.com/v1/" # API endpoint URL
5 | api_key = "YOUR_API_KEY" # Your API key
6 | max_tokens = 8192 # Maximum number of tokens in the response
7 | temperature = 0.0 # Controls randomness
8 |
9 | # [llm] # Amazon Bedrock
10 | # api_type = "aws" # Required
11 | # model = "us.anthropic.claude-3-7-sonnet-20250219-v1:0" # Bedrock supported modelID
12 | # base_url = "bedrock-runtime.us-west-2.amazonaws.com" # Not used now
13 | # max_tokens = 8192
14 | # temperature = 1.0
15 | # api_key = "bear" # Required but not used for Bedrock
16 |
17 | # [llm] #AZURE OPENAI:
18 | # api_type= 'azure'
19 | # model = "YOUR_MODEL_NAME" #"gpt-4o-mini"
20 | # base_url = "{YOUR_AZURE_ENDPOINT.rstrip('/')}/openai/deployments/{AZURE_DEPLOYMENT_ID}"
21 | # api_key = "AZURE API KEY"
22 | # max_tokens = 8096
23 | # temperature = 0.0
24 | # api_version="AZURE API VERSION" #"2024-08-01-preview"
25 |
26 | # [llm] #OLLAMA:
27 | # api_type = 'ollama'
28 | # model = "llama3.2"
29 | # base_url = "http://localhost:11434/v1"
30 | # api_key = "ollama"
31 | # max_tokens = 4096
32 | # temperature = 0.0
33 |
34 | # Optional configuration for specific LLM models
35 | [llm.vision]
36 | model = "claude-3-7-sonnet-20250219" # The vision model to use
37 | base_url = "https://api.anthropic.com/v1/" # API endpoint URL for vision model
38 | api_key = "YOUR_API_KEY" # Your API key for vision model
39 | max_tokens = 8192 # Maximum number of tokens in the response
40 | temperature = 0.0 # Controls randomness for vision model
41 |
42 | # [llm.vision] #OLLAMA VISION:
43 | # api_type = 'ollama'
44 | # model = "llama3.2-vision"
45 | # base_url = "http://localhost:11434/v1"
46 | # api_key = "ollama"
47 | # max_tokens = 4096
48 | # temperature = 0.0
49 |
50 | # Optional configuration for specific browser configuration
51 | # [browser]
52 | # Whether to run browser in headless mode (default: false)
53 | #headless = false
54 | # Disable browser security features (default: true)
55 | #disable_security = true
56 | # Extra arguments to pass to the browser
57 | #extra_chromium_args = []
58 | # Path to a Chrome instance to use to connect to your normal browser
59 | # e.g. '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
60 | #chrome_instance_path = ""
61 | # Connect to a browser instance via WebSocket
62 | #wss_url = ""
63 | # Connect to a browser instance via CDP
64 | #cdp_url = ""
65 |
66 | # Optional configuration, Proxy settings for the browser
67 | # [browser.proxy]
68 | # server = "http://proxy-server:port"
69 | # username = "proxy-username"
70 | # password = "proxy-password"
71 |
72 | # Optional configuration, Search settings.
73 | # [search]
74 | # Search engine for agent to use. Default is "Google", can be set to "Baidu" or "DuckDuckGo" or "Bing".
75 | #engine = "Google"
76 | # Fallback engine order. Default is ["DuckDuckGo", "Baidu", "Bing"] - will try in this order after primary engine fails.
77 | #fallback_engines = ["DuckDuckGo", "Baidu", "Bing"]
78 | # Seconds to wait before retrying all engines again when they all fail due to rate limits. Default is 60.
79 | #retry_delay = 60
80 | # Maximum number of times to retry all engines when all fail. Default is 3.
81 | #max_retries = 3
82 | # Language code for search results. Options: "en" (English), "zh" (Chinese), etc.
83 | #lang = "en"
84 | # Country code for search results. Options: "us" (United States), "cn" (China), etc.
85 | #country = "us"
86 |
87 |
88 | ## Sandbox configuration
89 | #[sandbox]
90 | #use_sandbox = false
91 | #image = "python:3.12-slim"
92 | #work_dir = "/workspace"
93 | #memory_limit = "1g" # 512m
94 | #cpu_limit = 2.0
95 | #timeout = 300
96 | #network_enabled = true
97 |
98 | # MCP (Model Context Protocol) configuration
99 | [mcp]
100 | server_reference = "app.mcp.server" # default server module reference
101 |
102 | # Optional Runflow configuration
103 | # Your can add additional agents into run-flow workflow to solve different-type tasks.
104 | [runflow]
105 | use_data_analysis_agent = false # The Data Analysi Agent to solve various data analysis tasks
106 |
--------------------------------------------------------------------------------
/config/mcp.example.json:
--------------------------------------------------------------------------------
1 | {
2 | "mcpServers": {
3 | "server1": {
4 | "type": "sse",
5 | "url": "http://localhost:8000/sse"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/examples/benchmarks/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | OpenManus benchmark system for standardized agent evaluation.
3 | """
4 |
--------------------------------------------------------------------------------
/examples/use_case/japan-travel-plan/japan_travel_guide_instructions.txt:
--------------------------------------------------------------------------------
1 | JAPAN TRAVEL HANDBOOK - GUIDE TO VERSIONS
2 |
3 | Location: D:/OpenManus/
4 |
5 | 1. DETAILED DIGITAL VERSION
6 | File: japan_travel_handbook.html
7 | Best for: Desktop/laptop viewing
8 | Features:
9 | - Complete comprehensive guide
10 | - Detailed itinerary
11 | - Full proposal planning section
12 | - All hotel recommendations
13 | - Comprehensive budget breakdown
14 | Usage: Open in web browser for trip planning and detailed reference
15 |
16 | 2. PRINT-FRIENDLY VERSION
17 | File: japan_travel_handbook_print.html
18 | Best for: Physical reference during travel
19 | Features:
20 | - Condensed essential information
21 | - Optimized for paper printing
22 | - Clear, printer-friendly formatting
23 | - Quick reference tables
24 | Usage: Print and keep in travel documents folder
25 |
26 | 3. MOBILE-OPTIMIZED VERSION
27 | File: japan_travel_handbook_mobile.html
28 | Best for: On-the-go reference during trip
29 | Features:
30 | - Touch-friendly interface
31 | - Collapsible sections
32 | - Quick access emergency buttons
33 | - Dark mode support
34 | - Responsive design
35 | Usage: Save to phone's browser bookmarks for quick access
36 |
37 | RECOMMENDED SETUP:
38 | 1. Before Trip:
39 | - Use detailed version for planning
40 | - Print the print-friendly version
41 | - Save mobile version to phone
42 |
43 | 2. During Trip:
44 | - Keep printed version with travel documents
45 | - Use mobile version for daily reference
46 | - Access detailed version when needed for specific information
47 |
48 | 3. Emergency Access:
49 | - Mobile version has quick-access emergency information
50 | - Keep printed version as backup
51 | - All emergency numbers and contacts in both versions
52 |
53 | Note: All versions contain the same core information but are formatted differently for optimal use in different situations.
54 |
55 | IMPORTANT DATES:
56 | - Trip Duration: April 15-23, 2024
57 | - Proposal Day: April 19, 2024
58 | - Key Reservation Deadlines:
59 | * Flights: Book by January 2024
60 | * Hotels: Book by February 2024
61 | * Restaurant Reservations: Book by January 2024
62 | * JR Pass: Purchase by March 2024
63 |
--------------------------------------------------------------------------------
/examples/use_case/japan-travel-plan/japan_travel_handbook.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Japan Travel Handbook - April 15-23, 2024
7 |
24 |
25 |
26 |
27 | [Previous content remains the same...]
28 |
29 |
30 |
🌸 Proposal Planning Guide 🌸
31 |
32 |
Ring Security & Transport
33 |
34 | Carrying the Ring:
35 |
36 | Always keep the ring in your carry-on luggage, never in checked bags
37 | Use a discrete, non-branded box or case
38 | Consider travel insurance that covers jewelry
39 | Keep receipt/appraisal documentation separate from the ring
40 |
41 |
42 | Airport Security Tips:
43 |
44 | No need to declare the ring unless value exceeds ¥1,000,000 (~$6,700)
45 | If asked, simply state it's "personal jewelry"
46 | Consider requesting private screening to maintain surprise
47 | Keep ring in original box until through security, then transfer to more discrete case
48 |
49 |
50 |
51 |
52 |
Proposal Location Details - Maruyama Park
53 |
54 | Best Timing:
55 |
56 | Date: April 19 (Day 5)
57 | Time: 5:30 PM (30 minutes before sunset)
58 | Park closes at 8:00 PM in April
59 |
60 |
61 | Specific Spot Recommendations:
62 |
63 | Primary Location: Near the famous weeping cherry tree
64 | - Less crowded in early evening
65 | - Beautiful illumination starts at dusk
66 | - Iconic Kyoto backdrop
67 |
68 | Backup Location: Gion Shirakawa area
69 | - Atmospheric stone-paved street
70 | - Traditional buildings and cherry trees
71 | - Beautiful in light rain
72 |
73 |
74 |
75 |
76 |
77 |
Proposal Day Planning
78 |
79 | Morning Preparation:
80 |
81 | Confirm weather forecast
82 | Transfer ring to secure pocket/bag
83 | Have backup indoor location details ready
84 |
85 |
86 | Suggested Timeline:
87 |
88 | 4:00 PM: Start heading to Maruyama Park area
89 | 4:30 PM: Light refreshments at nearby tea house
90 | 5:15 PM: Begin walk through park
91 | 5:30 PM: Arrive at proposal spot
92 | 6:00 PM: Sunset and illumination begins
93 | 7:00 PM: Celebratory dinner reservation
94 |
95 |
96 |
97 |
98 |
Celebration Dinner Options
99 |
100 | Traditional Japanese: Kikunoi Roan
101 | - Intimate 2-star Michelin restaurant
102 | - Advance reservation required (3 months)
103 | - Price: ¥15,000-20,000 per person
104 |
105 | Modern Fusion: The Sodoh
106 | - Beautiful garden views
107 | - Western-style seating available
108 | - Price: ¥12,000-15,000 per person
109 |
110 |
111 |
112 |
113 |
Important Notes:
114 |
115 | Keep proposal plans in separate notes from shared itinerary
116 | Have a backup plan in case of rain (indoor locations listed above)
117 | Consider hiring a local photographer to capture the moment
118 | Save restaurant staff contact info in case of timing changes
119 |
120 |
121 |
122 |
123 |
124 |
125 |
--------------------------------------------------------------------------------
/examples/use_case/japan-travel-plan/japan_travel_handbook_print.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Japan Travel Handbook (Print Version) - April 15-23, 2024
6 |
81 |
82 |
83 | Japan Travel Handbook (Print Version)
84 | Trip Dates: April 15-23, 2024
85 |
86 |
87 |
Emergency Contacts & Important Information
88 |
89 | Emergency in Japan: 119 (Ambulance/Fire) / 110 (Police)
90 | US Embassy Tokyo: +81-3-3224-5000
91 | Tourist Information Hotline: 03-3201-3331
92 | Your Travel Insurance: [Write number here]
93 |
94 |
95 |
96 |
97 |
Daily Itinerary Summary
98 |
99 | Date Location Key Activities
100 | Apr 15 Tokyo Arrival, Shinjuku area exploration
101 | Apr 16 Tokyo Meiji Shrine, Harajuku, Senso-ji, Skytree
102 | Apr 17 Tokyo Tea Ceremony, Budokan, Yanaka Ginza
103 | Apr 18 Kyoto Travel to Kyoto, Kinkaku-ji, Gion
104 | Apr 19 Kyoto Fushimi Inari, Arashiyama, Evening Proposal
105 | Apr 20 Nara/Kyoto Nara Park day trip, deer feeding
106 | Apr 21 Tokyo Return to Tokyo, bay cruise
107 |
108 |
109 |
110 |
111 |
112 |
113 |
Essential Japanese Phrases
114 |
115 | English Japanese When to Use
116 | Arigatou gozaimasu ありがとうございます Thank you (formal)
117 | Sumimasen すみません Excuse me/Sorry
118 | Onegaishimasu お願いします Please
119 | Toire wa doko desu ka? トイレはどこですか? Where is the bathroom?
120 | Eigo ga hanasemasu ka? 英語が話せますか? Do you speak English?
121 |
122 |
123 |
124 |
125 |
Transportation Notes
126 |
127 | JR Pass: Activate on April 15
128 | Tokyo-Kyoto Shinkansen: ~2h15m
129 | Kyoto-Nara Local Train: ~45m
130 | Last trains: Usually around midnight
131 | Keep ¥3000 for unexpected taxi rides
132 |
133 |
134 |
135 |
136 |
137 |
138 |
Proposal Day Timeline (April 19)
139 |
140 | Time Activity Notes
141 | 4:00 PM Head to Maruyama Park Check weather first
142 | 4:30 PM Tea house visit Light refreshments
143 | 5:15 PM Park walk begins Head to weeping cherry tree
144 | 5:30 PM Arrive at spot Find quiet area
145 | 7:00 PM Dinner reservation Kikunoi Roan
146 |
147 |
Backup Location: Gion Shirakawa area (in case of rain)
148 |
149 |
150 |
151 |
Quick Reference Budget
152 |
153 | Item Budget (USD) Notes
154 | Hotels 1500-2000 Pre-booked
155 | Transport 600-800 Including JR Pass
156 | Food 800-1000 ~$60/person/day
157 | Activities 600-800 Including tea ceremony
158 | Shopping 500-400 Souvenirs/gifts
159 |
160 |
161 |
162 |
163 |
--------------------------------------------------------------------------------
/examples/use_case/pictures/japan-travel-plan-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FoundationAgents/OpenManus/7cd3057ddab94989ec02f17060b1c7ed13b0bf92/examples/use_case/pictures/japan-travel-plan-1.png
--------------------------------------------------------------------------------
/examples/use_case/pictures/japan-travel-plan-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FoundationAgents/OpenManus/7cd3057ddab94989ec02f17060b1c7ed13b0bf92/examples/use_case/pictures/japan-travel-plan-2.png
--------------------------------------------------------------------------------
/examples/use_case/readme.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | We put some examples in the `examples` directory. All the examples use the same prompt
4 | as [Manus](https://manus.im/?utm_source=ai-bot.cn).
5 |
6 | The Model we use is `claude3.5`.
7 |
8 | ## Japan Travel Plan
9 | **Prompt**:
10 | ```
11 | I need a 7-day Japan itinerary for April 15-23 from Seattle, with a $2500-5000 budget for my fiancée and me. We love historical sites, hidden gems, and Japanese culture (kendo, tea ceremonies, Zen meditation). We want to see Nara's deer and explore cities on foot. I plan to propose during this trip and need a special location recommendation. Please provide a detailed itinerary and a simple HTML travel handbook with maps, attraction descriptions, essential Japanese phrases, and travel tips we can reference throughout our journey.
12 | ```
13 | **preview**:
14 | 
15 |
16 | 
17 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from app.agent.manus import Manus
4 | from app.logger import logger
5 |
6 |
7 | async def main():
8 | # Create and initialize Manus agent
9 | agent = await Manus.create()
10 | try:
11 | prompt = input("Enter your prompt: ")
12 | if not prompt.strip():
13 | logger.warning("Empty prompt provided.")
14 | return
15 |
16 | logger.warning("Processing your request...")
17 | await agent.run(prompt)
18 | logger.info("Request processing completed.")
19 | except KeyboardInterrupt:
20 | logger.warning("Operation interrupted.")
21 | finally:
22 | # Ensure agent resources are cleaned up before exiting
23 | await agent.cleanup()
24 |
25 |
26 | if __name__ == "__main__":
27 | asyncio.run(main())
28 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pydantic~=2.10.6
2 | openai~=1.66.3
3 | tenacity~=9.0.0
4 | pyyaml~=6.0.2
5 | loguru~=0.7.3
6 | numpy
7 | datasets~=3.4.1
8 | fastapi~=0.115.11
9 | tiktoken~=0.9.0
10 |
11 | html2text~=2024.2.26
12 | gymnasium~=1.1.1
13 | pillow~=11.1.0
14 | browsergym~=0.13.3
15 | uvicorn~=0.34.0
16 | unidiff~=0.7.5
17 | browser-use~=0.1.40
18 | googlesearch-python~=1.3.0
19 | baidusearch~=1.0.3
20 | duckduckgo_search~=7.5.3
21 |
22 | aiofiles~=24.1.0
23 | pydantic_core~=2.27.2
24 | colorama~=0.4.6
25 | playwright~=1.51.0
26 |
27 | docker~=7.1.0
28 | pytest~=8.3.5
29 | pytest-asyncio~=0.25.3
30 |
31 | mcp~=1.5.0
32 | httpx>=0.27.0
33 | tomli>=2.0.0
34 |
35 | boto3~=1.37.18
36 |
37 | requests~=2.32.3
38 | beautifulsoup4~=4.13.3
39 |
40 | huggingface-hub~=0.29.2
41 | setuptools~=75.8.0
42 |
--------------------------------------------------------------------------------
/run_flow.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import time
3 |
4 | from app.agent.data_analysis import DataAnalysis
5 | from app.agent.manus import Manus
6 | from app.config import config
7 | from app.flow.flow_factory import FlowFactory, FlowType
8 | from app.logger import logger
9 |
10 |
11 | async def run_flow():
12 | agents = {
13 | "manus": Manus(),
14 | }
15 | if config.run_flow_config.use_data_analysis_agent:
16 | agents["data_analysis"] = DataAnalysis()
17 | try:
18 | prompt = input("Enter your prompt: ")
19 |
20 | if prompt.strip().isspace() or not prompt:
21 | logger.warning("Empty prompt provided.")
22 | return
23 |
24 | flow = FlowFactory.create_flow(
25 | flow_type=FlowType.PLANNING,
26 | agents=agents,
27 | )
28 | logger.warning("Processing your request...")
29 |
30 | try:
31 | start_time = time.time()
32 | result = await asyncio.wait_for(
33 | flow.execute(prompt),
34 | timeout=3600, # 60 minute timeout for the entire execution
35 | )
36 | elapsed_time = time.time() - start_time
37 | logger.info(f"Request processed in {elapsed_time:.2f} seconds")
38 | logger.info(result)
39 | except asyncio.TimeoutError:
40 | logger.error("Request processing timed out after 1 hour")
41 | logger.info(
42 | "Operation terminated due to timeout. Please try a simpler request."
43 | )
44 |
45 | except KeyboardInterrupt:
46 | logger.info("Operation cancelled by user.")
47 | except Exception as e:
48 | logger.error(f"Error: {str(e)}")
49 |
50 |
51 | if __name__ == "__main__":
52 | asyncio.run(run_flow())
53 |
--------------------------------------------------------------------------------
/run_mcp.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import argparse
3 | import asyncio
4 | import sys
5 |
6 | from app.agent.mcp import MCPAgent
7 | from app.config import config
8 | from app.logger import logger
9 |
10 |
11 | class MCPRunner:
12 | """Runner class for MCP Agent with proper path handling and configuration."""
13 |
14 | def __init__(self):
15 | self.root_path = config.root_path
16 | self.server_reference = config.mcp_config.server_reference
17 | self.agent = MCPAgent()
18 |
19 | async def initialize(
20 | self,
21 | connection_type: str,
22 | server_url: str | None = None,
23 | ) -> None:
24 | """Initialize the MCP agent with the appropriate connection."""
25 | logger.info(f"Initializing MCPAgent with {connection_type} connection...")
26 |
27 | if connection_type == "stdio":
28 | await self.agent.initialize(
29 | connection_type="stdio",
30 | command=sys.executable,
31 | args=["-m", self.server_reference],
32 | )
33 | else: # sse
34 | await self.agent.initialize(connection_type="sse", server_url=server_url)
35 |
36 | logger.info(f"Connected to MCP server via {connection_type}")
37 |
38 | async def run_interactive(self) -> None:
39 | """Run the agent in interactive mode."""
40 | print("\nMCP Agent Interactive Mode (type 'exit' to quit)\n")
41 | while True:
42 | user_input = input("\nEnter your request: ")
43 | if user_input.lower() in ["exit", "quit", "q"]:
44 | break
45 | response = await self.agent.run(user_input)
46 | print(f"\nAgent: {response}")
47 |
48 | async def run_single_prompt(self, prompt: str) -> None:
49 | """Run the agent with a single prompt."""
50 | await self.agent.run(prompt)
51 |
52 | async def run_default(self) -> None:
53 | """Run the agent in default mode."""
54 | prompt = input("Enter your prompt: ")
55 | if not prompt.strip():
56 | logger.warning("Empty prompt provided.")
57 | return
58 |
59 | logger.warning("Processing your request...")
60 | await self.agent.run(prompt)
61 | logger.info("Request processing completed.")
62 |
63 | async def cleanup(self) -> None:
64 | """Clean up agent resources."""
65 | await self.agent.cleanup()
66 | logger.info("Session ended")
67 |
68 |
69 | def parse_args() -> argparse.Namespace:
70 | """Parse command line arguments."""
71 | parser = argparse.ArgumentParser(description="Run the MCP Agent")
72 | parser.add_argument(
73 | "--connection",
74 | "-c",
75 | choices=["stdio", "sse"],
76 | default="stdio",
77 | help="Connection type: stdio or sse",
78 | )
79 | parser.add_argument(
80 | "--server-url",
81 | default="http://127.0.0.1:8000/sse",
82 | help="URL for SSE connection",
83 | )
84 | parser.add_argument(
85 | "--interactive", "-i", action="store_true", help="Run in interactive mode"
86 | )
87 | parser.add_argument("--prompt", "-p", help="Single prompt to execute and exit")
88 | return parser.parse_args()
89 |
90 |
91 | async def run_mcp() -> None:
92 | """Main entry point for the MCP runner."""
93 | args = parse_args()
94 | runner = MCPRunner()
95 |
96 | try:
97 | await runner.initialize(args.connection, args.server_url)
98 |
99 | if args.prompt:
100 | await runner.run_single_prompt(args.prompt)
101 | elif args.interactive:
102 | await runner.run_interactive()
103 | else:
104 | await runner.run_default()
105 |
106 | except KeyboardInterrupt:
107 | logger.info("Program interrupted by user")
108 | except Exception as e:
109 | logger.error(f"Error running MCPAgent: {str(e)}", exc_info=True)
110 | sys.exit(1)
111 | finally:
112 | await runner.cleanup()
113 |
114 |
115 | if __name__ == "__main__":
116 | asyncio.run(run_mcp())
117 |
--------------------------------------------------------------------------------
/run_mcp_server.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | # A shortcut to launch OpenManus MCP server, where its introduction also solves other import issues.
3 | from app.mcp.server import MCPServer, parse_args
4 |
5 |
6 | if __name__ == "__main__":
7 | args = parse_args()
8 |
9 | # Create and run server (maintaining original flow)
10 | server = MCPServer()
11 | server.run(transport=args.transport)
12 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import find_packages, setup
2 |
3 |
4 | with open("README.md", "r", encoding="utf-8") as fh:
5 | long_description = fh.read()
6 |
7 | setup(
8 | name="openmanus",
9 | version="0.1.0",
10 | author="mannaandpoem and OpenManus Team",
11 | author_email="mannaandpoem@gmail.com",
12 | description="A versatile agent that can solve various tasks using multiple tools",
13 | long_description=long_description,
14 | long_description_content_type="text/markdown",
15 | url="https://github.com/FoundationAgents/OpenManus",
16 | packages=find_packages(),
17 | install_requires=[
18 | "pydantic~=2.10.4",
19 | "openai>=1.58.1,<1.67.0",
20 | "tenacity~=9.0.0",
21 | "pyyaml~=6.0.2",
22 | "loguru~=0.7.3",
23 | "numpy",
24 | "datasets>=3.2,<3.5",
25 | "html2text~=2024.2.26",
26 | "gymnasium>=1.0,<1.2",
27 | "pillow>=10.4,<11.2",
28 | "browsergym~=0.13.3",
29 | "uvicorn~=0.34.0",
30 | "unidiff~=0.7.5",
31 | "browser-use~=0.1.40",
32 | "googlesearch-python~=1.3.0",
33 | "aiofiles~=24.1.0",
34 | "pydantic_core>=2.27.2,<2.28.0",
35 | "colorama~=0.4.6",
36 | ],
37 | classifiers=[
38 | "Programming Language :: Python :: 3",
39 | "Programming Language :: Python :: 3.12",
40 | "License :: OSI Approved :: MIT License",
41 | "Operating System :: OS Independent",
42 | ],
43 | python_requires=">=3.12",
44 | entry_points={
45 | "console_scripts": [
46 | "openmanus=main:main",
47 | ],
48 | },
49 | )
50 |
--------------------------------------------------------------------------------
/tests/sandbox/test_client.py:
--------------------------------------------------------------------------------
1 | import tempfile
2 | from pathlib import Path
3 | from typing import AsyncGenerator
4 |
5 | import pytest
6 | import pytest_asyncio
7 |
8 | from app.config import SandboxSettings
9 | from app.sandbox.client import LocalSandboxClient, create_sandbox_client
10 |
11 |
12 | @pytest_asyncio.fixture(scope="function")
13 | async def local_client() -> AsyncGenerator[LocalSandboxClient, None]:
14 | """Creates a local sandbox client for testing."""
15 | client = create_sandbox_client()
16 | try:
17 | yield client
18 | finally:
19 | await client.cleanup()
20 |
21 |
22 | @pytest.fixture(scope="function")
23 | def temp_dir() -> Path:
24 | """Creates a temporary directory for testing."""
25 | with tempfile.TemporaryDirectory() as tmp_dir:
26 | yield Path(tmp_dir)
27 |
28 |
29 | @pytest.mark.asyncio
30 | async def test_sandbox_creation(local_client: LocalSandboxClient):
31 | """Tests sandbox creation with specific configuration."""
32 | config = SandboxSettings(
33 | image="python:3.12-slim",
34 | work_dir="/workspace",
35 | memory_limit="512m",
36 | cpu_limit=0.5,
37 | )
38 |
39 | await local_client.create(config)
40 | result = await local_client.run_command("python3 --version")
41 | assert "Python 3.10" in result
42 |
43 |
44 | @pytest.mark.asyncio
45 | async def test_local_command_execution(local_client: LocalSandboxClient):
46 | """Tests command execution in local sandbox."""
47 | await local_client.create()
48 |
49 | result = await local_client.run_command("echo 'test'")
50 | assert result.strip() == "test"
51 |
52 | with pytest.raises(Exception):
53 | await local_client.run_command("sleep 10", timeout=1)
54 |
55 |
56 | @pytest.mark.asyncio
57 | async def test_local_file_operations(local_client: LocalSandboxClient, temp_dir: Path):
58 | """Tests file operations in local sandbox."""
59 | await local_client.create()
60 |
61 | # Test write and read operations
62 | test_content = "Hello, World!"
63 | await local_client.write_file("/workspace/test.txt", test_content)
64 | content = await local_client.read_file("/workspace/test.txt")
65 | assert content.strip() == test_content
66 |
67 | # Test copying file to container
68 | src_file = temp_dir / "src.txt"
69 | src_file.write_text("Copy to container")
70 | await local_client.copy_to(str(src_file), "/workspace/copied.txt")
71 | content = await local_client.read_file("/workspace/copied.txt")
72 | assert content.strip() == "Copy to container"
73 |
74 | # Test copying file from container
75 | dst_file = temp_dir / "dst.txt"
76 | await local_client.copy_from("/workspace/test.txt", str(dst_file))
77 | assert dst_file.read_text().strip() == test_content
78 |
79 |
80 | @pytest.mark.asyncio
81 | async def test_local_volume_binding(local_client: LocalSandboxClient, temp_dir: Path):
82 | """Tests volume binding in local sandbox."""
83 | bind_path = str(temp_dir)
84 | volume_bindings = {bind_path: "/data"}
85 |
86 | await local_client.create(volume_bindings=volume_bindings)
87 |
88 | test_file = temp_dir / "test.txt"
89 | test_file.write_text("Volume test")
90 |
91 | content = await local_client.read_file("/data/test.txt")
92 | assert "Volume test" in content
93 |
94 |
95 | @pytest.mark.asyncio
96 | async def test_local_error_handling(local_client: LocalSandboxClient):
97 | """Tests error handling in local sandbox."""
98 | await local_client.create()
99 |
100 | with pytest.raises(Exception) as exc:
101 | await local_client.read_file("/nonexistent.txt")
102 | assert "not found" in str(exc.value).lower()
103 |
104 | with pytest.raises(Exception) as exc:
105 | await local_client.copy_from("/nonexistent.txt", "local.txt")
106 | assert "not found" in str(exc.value).lower()
107 |
108 |
109 | if __name__ == "__main__":
110 | pytest.main(["-v", __file__])
111 |
--------------------------------------------------------------------------------
/tests/sandbox/test_docker_terminal.py:
--------------------------------------------------------------------------------
1 | """Tests for the AsyncDockerizedTerminal implementation."""
2 |
3 | import docker
4 | import pytest
5 | import pytest_asyncio
6 |
7 | from app.sandbox.core.terminal import AsyncDockerizedTerminal
8 |
9 |
10 | @pytest.fixture(scope="module")
11 | def docker_client():
12 | """Fixture providing a Docker client."""
13 | return docker.from_env()
14 |
15 |
16 | @pytest_asyncio.fixture(scope="module")
17 | async def docker_container(docker_client):
18 | """Fixture providing a test Docker container."""
19 | container = docker_client.containers.run(
20 | "python:3.12-slim",
21 | "tail -f /dev/null",
22 | name="test_container",
23 | detach=True,
24 | remove=True,
25 | )
26 | yield container
27 | container.stop()
28 |
29 |
30 | @pytest_asyncio.fixture
31 | async def terminal(docker_container):
32 | """Fixture providing an initialized AsyncDockerizedTerminal instance."""
33 | terminal = AsyncDockerizedTerminal(
34 | docker_container,
35 | working_dir="/workspace",
36 | env_vars={"TEST_VAR": "test_value"},
37 | default_timeout=30,
38 | )
39 | await terminal.init()
40 | yield terminal
41 | await terminal.close()
42 |
43 |
44 | class TestAsyncDockerizedTerminal:
45 | """Test cases for AsyncDockerizedTerminal."""
46 |
47 | @pytest.mark.asyncio
48 | async def test_basic_command_execution(self, terminal):
49 | """Test basic command execution functionality."""
50 | result = await terminal.run_command("echo 'Hello World'")
51 | assert "Hello World" in result
52 |
53 | @pytest.mark.asyncio
54 | async def test_environment_variables(self, terminal):
55 | """Test environment variable setting and access."""
56 | result = await terminal.run_command("echo $TEST_VAR")
57 | assert "test_value" in result
58 |
59 | @pytest.mark.asyncio
60 | async def test_working_directory(self, terminal):
61 | """Test working directory setup."""
62 | result = await terminal.run_command("pwd")
63 | assert "/workspace" == result
64 |
65 | @pytest.mark.asyncio
66 | async def test_command_timeout(self, docker_container):
67 | """Test command timeout functionality."""
68 | terminal = AsyncDockerizedTerminal(docker_container, default_timeout=1)
69 | await terminal.init()
70 | try:
71 | with pytest.raises(TimeoutError):
72 | await terminal.run_command("sleep 5")
73 | finally:
74 | await terminal.close()
75 |
76 | @pytest.mark.asyncio
77 | async def test_multiple_commands(self, terminal):
78 | """Test execution of multiple commands in sequence."""
79 | cmd1 = await terminal.run_command("echo 'First'")
80 | cmd2 = await terminal.run_command("echo 'Second'")
81 | assert "First" in cmd1
82 | assert "Second" in cmd2
83 |
84 | @pytest.mark.asyncio
85 | async def test_session_cleanup(self, docker_container):
86 | """Test proper cleanup of resources."""
87 | terminal = AsyncDockerizedTerminal(docker_container)
88 | await terminal.init()
89 | assert terminal.session is not None
90 | await terminal.close()
91 | # Verify session is properly cleaned up
92 | # Note: session object still exists, but internal connection is closed
93 | assert terminal.session is not None
94 |
95 |
96 | # Configure pytest-asyncio
97 | def pytest_configure(config):
98 | """Configure pytest-asyncio."""
99 | config.addinivalue_line("asyncio_mode", "strict")
100 | config.addinivalue_line("asyncio_default_fixture_loop_scope", "function")
101 |
102 |
103 | if __name__ == "__main__":
104 | pytest.main(["-v", __file__])
105 |
--------------------------------------------------------------------------------
/tests/sandbox/test_sandbox.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import pytest_asyncio
3 |
4 | from app.sandbox.core.sandbox import DockerSandbox, SandboxSettings
5 |
6 |
7 | @pytest.fixture(scope="module")
8 | def sandbox_config():
9 | """Creates sandbox configuration for testing."""
10 | return SandboxSettings(
11 | image="python:3.12-slim",
12 | work_dir="/workspace",
13 | memory_limit="1g",
14 | cpu_limit=0.5,
15 | network_enabled=True,
16 | )
17 |
18 |
19 | @pytest_asyncio.fixture(scope="module")
20 | async def sandbox(sandbox_config):
21 | """Creates and manages a test sandbox instance."""
22 | sandbox = DockerSandbox(sandbox_config)
23 | await sandbox.create()
24 | try:
25 | yield sandbox
26 | finally:
27 | await sandbox.cleanup()
28 |
29 |
30 | @pytest.mark.asyncio
31 | async def test_sandbox_working_directory(sandbox):
32 | """Tests sandbox working directory configuration."""
33 | result = await sandbox.terminal.run_command("pwd")
34 | assert result.strip() == "/workspace"
35 |
36 |
37 | @pytest.mark.asyncio
38 | async def test_sandbox_file_operations(sandbox):
39 | """Tests sandbox file read/write operations."""
40 | # Test file writing
41 | test_content = "Hello from sandbox!"
42 | await sandbox.write_file("/workspace/test.txt", test_content)
43 |
44 | # Test file reading
45 | content = await sandbox.read_file("/workspace/test.txt")
46 | assert content.strip() == test_content
47 |
48 |
49 | @pytest.mark.asyncio
50 | async def test_sandbox_python_execution(sandbox):
51 | """Tests Python code execution in sandbox."""
52 | # Write test file
53 | await sandbox.write_file("/workspace/test.txt", "Hello from file!")
54 |
55 | # Write Python script
56 | python_code = """
57 | print("Hello from Python!")
58 | with open('/workspace/test.txt') as f:
59 | print(f.read())
60 | """
61 | await sandbox.write_file("/workspace/test.py", python_code)
62 |
63 | # Execute script and verify output
64 | result = await sandbox.terminal.run_command("python3 /workspace/test.py")
65 | assert "Hello from Python!" in result
66 | assert "Hello from file!" in result
67 |
68 |
69 | @pytest.mark.asyncio
70 | async def test_sandbox_file_persistence(sandbox):
71 | """Tests file persistence in sandbox."""
72 | # Create multiple files
73 | files = {
74 | "file1.txt": "Content 1",
75 | "file2.txt": "Content 2",
76 | "nested/file3.txt": "Content 3",
77 | }
78 |
79 | # Write files
80 | for path, content in files.items():
81 | await sandbox.write_file(f"/workspace/{path}", content)
82 |
83 | # Verify file contents
84 | for path, expected_content in files.items():
85 | content = await sandbox.read_file(f"/workspace/{path}")
86 | assert content.strip() == expected_content
87 |
88 |
89 | @pytest.mark.asyncio
90 | async def test_sandbox_python_environment(sandbox):
91 | """Tests Python environment configuration."""
92 | # Test Python version
93 | result = await sandbox.terminal.run_command("python3 --version")
94 | assert "Python 3.10" in result
95 |
96 | # Test basic module imports
97 | python_code = """
98 | import sys
99 | import os
100 | import json
101 | print("Python is working!")
102 | """
103 | await sandbox.write_file("/workspace/env_test.py", python_code)
104 | result = await sandbox.terminal.run_command("python3 /workspace/env_test.py")
105 | assert "Python is working!" in result
106 |
107 |
108 | @pytest.mark.asyncio
109 | async def test_sandbox_network_access(sandbox):
110 | """Tests sandbox network access."""
111 | if not sandbox.config.network_enabled:
112 | pytest.skip("Network access is disabled")
113 |
114 | # Test network connectivity
115 | await sandbox.terminal.run_command("apt update && apt install curl -y")
116 | result = await sandbox.terminal.run_command("curl -I https://www.example.com")
117 | assert "HTTP/2 200" in result
118 |
119 |
120 | @pytest.mark.asyncio
121 | async def test_sandbox_cleanup(sandbox_config):
122 | """Tests sandbox cleanup process."""
123 | sandbox = DockerSandbox(sandbox_config)
124 | await sandbox.create()
125 |
126 | # Create test files
127 | await sandbox.write_file("/workspace/test.txt", "test")
128 | container_id = sandbox.terminal.container.id
129 | # Perform cleanup
130 | await sandbox.cleanup()
131 |
132 | # Verify container has been removed
133 | import docker
134 |
135 | client = docker.from_env()
136 | containers = client.containers.list(all=True)
137 | assert not any(c.id == container_id for c in containers)
138 |
139 |
140 | @pytest.mark.asyncio
141 | async def test_sandbox_error_handling():
142 | """Tests error handling with invalid configuration."""
143 | # Test invalid configuration
144 | invalid_config = SandboxSettings(image="nonexistent:latest", work_dir="/invalid")
145 |
146 | sandbox = DockerSandbox(invalid_config)
147 | with pytest.raises(Exception):
148 | await sandbox.create()
149 |
150 |
151 | if __name__ == "__main__":
152 | pytest.main(["-v", __file__])
153 |
--------------------------------------------------------------------------------
/tests/sandbox/test_sandbox_manager.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 | import tempfile
4 | from typing import AsyncGenerator
5 |
6 | import pytest
7 | import pytest_asyncio
8 |
9 | from app.sandbox.core.manager import SandboxManager
10 |
11 |
12 | @pytest_asyncio.fixture(scope="function")
13 | async def manager() -> AsyncGenerator[SandboxManager, None]:
14 | """Creates a sandbox manager instance.
15 |
16 | Uses function scope to ensure each test case has its own manager instance.
17 | """
18 | manager = SandboxManager(max_sandboxes=2, idle_timeout=60, cleanup_interval=30)
19 | try:
20 | yield manager
21 | finally:
22 | # Ensure all resources are cleaned up
23 | await manager.cleanup()
24 |
25 |
26 | @pytest.fixture
27 | def temp_file():
28 | """Creates a temporary test file."""
29 | with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
30 | f.write("test content")
31 | path = f.name
32 | try:
33 | yield path
34 | finally:
35 | if os.path.exists(path):
36 | os.unlink(path)
37 |
38 |
39 | @pytest.mark.asyncio
40 | async def test_create_sandbox(manager):
41 | """Tests sandbox creation."""
42 | # Create default sandbox
43 | sandbox_id = await manager.create_sandbox()
44 | assert sandbox_id in manager._sandboxes
45 | assert sandbox_id in manager._last_used
46 |
47 | # Verify sandbox functionality
48 | sandbox = await manager.get_sandbox(sandbox_id)
49 | result = await sandbox.run_command("echo 'test'")
50 | assert result.strip() == "test"
51 |
52 |
53 | @pytest.mark.asyncio
54 | async def test_max_sandboxes_limit(manager):
55 | """Tests maximum sandbox limit enforcement."""
56 | created_sandboxes = []
57 | try:
58 | # Create maximum number of sandboxes
59 | for _ in range(manager.max_sandboxes):
60 | sandbox_id = await manager.create_sandbox()
61 | created_sandboxes.append(sandbox_id)
62 |
63 | # Verify created sandbox count
64 | assert len(manager._sandboxes) == manager.max_sandboxes
65 |
66 | # Attempting to create additional sandbox should fail
67 | with pytest.raises(RuntimeError) as exc_info:
68 | await manager.create_sandbox()
69 |
70 | # Verify error message
71 | expected_message = (
72 | f"Maximum number of sandboxes ({manager.max_sandboxes}) reached"
73 | )
74 | assert str(exc_info.value) == expected_message
75 |
76 | finally:
77 | # Clean up all created sandboxes
78 | for sandbox_id in created_sandboxes:
79 | try:
80 | await manager.delete_sandbox(sandbox_id)
81 | except Exception as e:
82 | print(f"Failed to cleanup sandbox {sandbox_id}: {e}")
83 |
84 |
85 | @pytest.mark.asyncio
86 | async def test_get_nonexistent_sandbox(manager):
87 | """Tests retrieving a non-existent sandbox."""
88 | with pytest.raises(KeyError, match="Sandbox .* not found"):
89 | await manager.get_sandbox("nonexistent-id")
90 |
91 |
92 | @pytest.mark.asyncio
93 | async def test_sandbox_cleanup(manager):
94 | """Tests sandbox cleanup functionality."""
95 | sandbox_id = await manager.create_sandbox()
96 | assert sandbox_id in manager._sandboxes
97 |
98 | await manager.delete_sandbox(sandbox_id)
99 | assert sandbox_id not in manager._sandboxes
100 | assert sandbox_id not in manager._last_used
101 |
102 |
103 | @pytest.mark.asyncio
104 | async def test_idle_sandbox_cleanup(manager):
105 | """Tests automatic cleanup of idle sandboxes."""
106 | # Set short idle timeout
107 | manager.idle_timeout = 0.1
108 |
109 | sandbox_id = await manager.create_sandbox()
110 | assert sandbox_id in manager._sandboxes
111 |
112 | # Wait longer than idle timeout
113 | await asyncio.sleep(0.2)
114 |
115 | # Trigger cleanup
116 | await manager._cleanup_idle_sandboxes()
117 | assert sandbox_id not in manager._sandboxes
118 |
119 |
120 | @pytest.mark.asyncio
121 | async def test_manager_cleanup(manager):
122 | """Tests manager cleanup functionality."""
123 | # Create multiple sandboxes
124 | sandbox_ids = []
125 | for _ in range(2):
126 | sandbox_id = await manager.create_sandbox()
127 | sandbox_ids.append(sandbox_id)
128 |
129 | # Clean up all resources
130 | await manager.cleanup()
131 |
132 | # Verify all sandboxes have been cleaned up
133 | assert not manager._sandboxes
134 | assert not manager._last_used
135 |
136 |
137 | if __name__ == "__main__":
138 | pytest.main(["-v", __file__])
139 |
--------------------------------------------------------------------------------
/workspace/example.txt:
--------------------------------------------------------------------------------
1 | This is a sample file. Files generated by OpenManus are stored in the current folder by default.
2 |
--------------------------------------------------------------------------------