├── .github
└── workflows
│ └── lint-check.yml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── assets
└── demo.mp4
├── cloudbuild.yaml
├── poetry.lock
├── pyproject.toml
├── pytest.ini
├── scripts
└── lint.py
├── surfkit
├── __init__.py
├── agent.py
├── auth
│ ├── default.py
│ ├── key.py
│ ├── provider.py
│ ├── transport.py
│ └── util.py
├── cli
│ ├── main.py
│ ├── new.py
│ ├── templates
│ │ ├── agent.py
│ │ ├── agents
│ │ │ ├── base.py
│ │ │ ├── surf4v.py
│ │ │ └── surfskelly.py
│ │ └── device.py
│ ├── util.py
│ └── view.py
├── client.py
├── config.py
├── db
│ ├── conn.py
│ └── models.py
├── env.py
├── env_opts.py
├── hub.py
├── learn
│ └── base.py
├── prompt
│ └── annots.py
├── runtime
│ ├── agent
│ │ ├── base.py
│ │ ├── docker.py
│ │ ├── hub.py
│ │ ├── kube.py
│ │ ├── load.py
│ │ ├── process.py
│ │ └── util.py
│ ├── container
│ │ ├── base.py
│ │ ├── docker.py
│ │ ├── kube.py
│ │ └── load.py
│ └── vm
│ │ └── base.py
├── server
│ ├── models.py
│ └── routes.py
├── skill.py
├── types.py
└── util.py
└── ui
└── surfkit
├── .gitignore
├── Dockerfile
├── Makefile
├── README.md
├── package-lock.json
├── package.json
├── public
├── favicon.ico
├── favicon_a2.ico
├── index.html
├── logo.svg
├── logo192.png
├── logo512.png
├── manifest.json
└── robots.txt
├── src
├── App.js
├── api
│ ├── Tasks.js
│ └── agentd.js
├── components
│ ├── Layout.js
│ ├── Nav.js
│ ├── Recording.js
│ ├── RoleThread.js
│ ├── RoleThreads.js
│ └── Task.js
├── index.css
├── index.js
├── pages
│ ├── ContainerDesktopPage.js
│ └── DesktopPage.js
└── server
│ └── Routes.js
└── tailwind.config.js
/.github/workflows/lint-check.yml:
--------------------------------------------------------------------------------
1 | name: Lint Check
2 |
3 | on:
4 | pull_request:
5 | types: [opened, synchronize]
6 |
7 | jobs:
8 | lint:
9 | runs-on: ubuntu-latest
10 |
11 | steps:
12 | - uses: actions/checkout@v4
13 |
14 | - name: Setup Python
15 | uses: actions/setup-python@v5
16 | with:
17 | python-version: "3.11"
18 |
19 | - name: Install poetry
20 | run: |
21 | python -m pip install poetry==1.8.3
22 |
23 | - name: Configure poetry
24 | run: |
25 | python -m poetry config virtualenvs.in-project true
26 |
27 | - name: Cache the virtualenv
28 | uses: actions/cache@v4
29 | with:
30 | path: ./.venv
31 | key: ${{ runner.os }}-venv-${{ hashFiles('**/poetry.lock') }}
32 |
33 | - name: Install dependencies
34 | run: |
35 | python -m poetry install
36 |
37 | - name: Run linter check
38 | run: |
39 | python -m poetry run lint --check
40 |
41 | # - name: Run tests
42 | # run: |
43 | # python -m poetry run python -m pytest -sxv
44 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
162 | scratch/
163 | .data/
164 | scratch.ipynb
165 | cidata.iso
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
6 |
7 | ## Our Standards
8 |
9 | Examples of behavior that contributes to creating a positive environment include:
10 |
11 | - Using welcoming and inclusive language
12 | - Being respectful of differing viewpoints and experiences
13 | - Gracefully accepting constructive criticism
14 | - Focusing on what is best for the community
15 | - Showing empathy towards other community members
16 |
17 | Examples of unacceptable behavior by participants include:
18 |
19 | - The use of sexualized language or imagery and unwelcome sexual attention or advances
20 | - Trolling, insulting/derogatory comments, and personal or political attacks
21 | - Public or private harassment
22 | - Publishing others' private information, such as a physical or email address, without explicit permission
23 | - Other conduct which could reasonably be considered inappropriate in a professional setting
24 |
25 | ## Our Responsibilities
26 |
27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
28 |
29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned with this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
30 |
31 | ## Scope
32 |
33 | This Code of Conduct applies within all project spaces, including GitHub, and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.
34 |
35 | ## Enforcement
36 |
37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at github@kentauros.ai. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality regarding the reporter of an incident. Further details of specific enforcement policies may be posted separately.
38 |
39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
40 |
41 | ## Attribution
42 |
43 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
44 |
45 | Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
46 |
47 | For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.
48 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | First off, thank you for considering contributing to this project. It's people like you that make it such a great tool.
4 |
5 | ## Code of Conduct
6 |
7 | This project adheres to a Code of Conduct that we expect project participants to adhere to. Please read [the full text](CODE_OF_CONDUCT.md) so that you can understand what actions will and will not be tolerated.
8 |
9 | ## What we are looking for
10 |
11 | This is an open-source project, and we welcome contributions of all kinds: new features, bug fixes, documentation, examples, or enhancements to existing features. We are always thrilled to receive contributions from the community.
12 |
13 | ## How to contribute
14 |
15 | If you've never contributed to an open-source project before, here are a few steps to get you started:
16 |
17 | ### Reporting Issues
18 |
19 | Before submitting a bug report or feature request, check to make sure it hasn't already been submitted. You can search through existing issues and pull requests to see if someone has reported one similar to yours.
20 |
21 | When you are creating a bug report, please include as much detail as possible.
22 |
23 | ### Pull Requests
24 |
25 | - Fork the repository and create your branch from `main`.
26 | - If you've added code that should be tested, add tests.
27 | - If you've changed APIs, update the documentation.
28 | - Ensure the test suite passes.
29 | - Make sure your code lints.
30 | - Issue that pull request!
31 |
32 | ### Getting started
33 |
34 | For something that is bigger than a one or two-line fix:
35 |
36 | 1. Create your own fork of the code.
37 | 2. Do the changes in your fork.
38 | 3. If you like the change and think the project could use it:
39 | - Be sure you have followed the code style for the project.
40 | - Note the Code of Conduct.
41 | - Send a pull request.
42 |
43 | ## How to report a bug
44 |
45 | If you find a security vulnerability, do NOT open an issue. Email github@kentauros.ai instead.
46 |
47 | In order to help us understand and resolve your issue quickly, please include as much information as possible, including:
48 |
49 | - A quick summary and/or background
50 | - Steps to reproduce
51 | - Be specific!
52 | - Give a sample code if you can.
53 | - What you expected would happen
54 | - What actually happens
55 | - Notes (possibly including why you think this might be happening or stuff you tried that didn't work)
56 |
57 | People *love* thorough bug reports. I'm not even kidding.
58 |
59 | ## How to suggest a feature or enhancement
60 |
61 | If you find yourself wishing for a feature that doesn't exist in the project, you are probably not alone. There are bound to be others out there with similar needs. Open an issue on our issues list on GitHub, which describes the feature you would like to see, why you need it, and how it should work.
62 |
63 | ## Code review process
64 |
65 | The core team looks at Pull Requests on a regular basis in a bi-weekly triage meeting. After feedback has been given, we expect responses within two weeks. After two weeks, we may close the pull request if it isn't showing any activity.
66 |
67 | ## Community
68 |
69 | Discussions about the project take place in this repository's Issues and Pull Requests sections. Anybody is welcome to join these conversations.
70 |
71 | Wherever possible, we use GitHub to discuss changes and keep the decision-making process open.
72 |
73 | ## Thank you!
74 |
75 | Thank you for contributing!
76 |
77 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Kentauros AI
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
7 |
8 |
Surfkit
9 |
10 |
11 | A toolkit for building and sharing AI agents that operate on devices
12 |
13 | Explore the docs »
14 |
15 |
16 | View Demo
17 | ·
18 | Report Bug
19 | ·
20 | Request Feature
21 |
22 |
23 |
24 |
25 | ## Features
26 |
27 | - **Build** multimodal agents that can operate on devices
28 | - **Share** agents with the community
29 | - **Run** agents and devices locally or in the cloud
30 | - **Manage** agent tasks at scale
31 | - **Track** and observe agent actions
32 |
33 | ## Demo
34 |
35 | https://github.com/agentsea/surfkit/assets/5533189/98b7714d-9692-4369-8fbf-88aff61e741c
36 |
37 | ## Installation
38 |
39 | ```sh
40 | pip install surfkit
41 | ```
42 |
43 | ## Quickstart
44 |
45 | ### Prerequisites
46 |
47 | - Docker
48 | - Python >= 3.10
49 | - MacOS or Linux
50 |
51 | ### Python
52 |
53 | Use an agent to solve a task
54 |
55 | ```python
56 | from surfkit import solve
57 |
58 | task = solve(
59 | "Search for the most common variety of french duck",
60 | agent_type="pbarker/SurfPizza",
61 | device_type="desktop",
62 | )
63 |
64 | task.wait_for_done()
65 |
66 | result = task.result
67 | ```
68 |
69 | ### CLI
70 |
71 | #### Create an Agent
72 |
73 | Find available agents on the Hub
74 |
75 | ```
76 | surfkit find
77 | ```
78 |
79 | Create a new agent
80 |
81 | ```
82 | surfkit create agent -t pbarker/SurfPizza -n agent01
83 | ```
84 |
85 | List running agents
86 |
87 | ```
88 | surfkit list agents
89 | ```
90 |
91 | #### Create a Device
92 |
93 | Create an Ubuntu desktop for our agent to use.
94 |
95 | ```
96 | surfkit create device --provider docker -n desktop01
97 | ```
98 |
99 | List running devices
100 |
101 | ```
102 | surfkit list devices
103 | ```
104 |
105 | #### Solve a task
106 |
107 | Use the agent to solve a task on the device
108 |
109 | ```
110 | surfkit solve "Search for the most common variety of french duck" \
111 | --agent agent01 \
112 | --device desktop01
113 | ```
114 |
115 | ## Documentation
116 |
117 | View our [documentation](https://docs.hub.agentsea.ai) for more in depth information.
118 |
119 | ## Usage
120 |
121 | ### Building Agents
122 |
123 | Initialize a new project
124 |
125 | ```sh
126 | surfkit new
127 | ```
128 |
129 | Build a docker container for the agent
130 |
131 | ```sh
132 | surfkit build
133 | ```
134 |
135 | ### Running Agents
136 |
137 | Create an agent locally
138 |
139 | ```sh
140 | surfkit create agent --name foo -t pbarker/SurfPizza
141 | ```
142 |
143 | Create an agent on kubernetes
144 |
145 | ```sh
146 | surfkit create agent --runtime kube -t pbarker/SurfPizza
147 | ```
148 |
149 | List running agents
150 |
151 | ```sh
152 | surfkit list agents
153 | ```
154 |
155 | Get details about a specific agent
156 |
157 | ```sh
158 | surfkit get agent foo
159 | ```
160 |
161 | Fetch logs for a specific agent
162 |
163 | ```sh
164 | surfkit logs foo
165 | ```
166 |
167 | Delete an agent
168 |
169 | ```sh
170 | surfkit delete agent foo
171 | ```
172 |
173 | ### Managing Devices
174 |
175 | Create a device
176 |
177 | ```sh
178 | surfkit create device --type desktop --provicer gce --name bar
179 | ```
180 |
181 | List devices
182 |
183 | ```sh
184 | surfkit list devices
185 | ```
186 |
187 | View device in UI
188 |
189 | ```sh
190 | surfkit view device bar
191 | ```
192 |
193 | Delete a device
194 |
195 | ```sh
196 | surfkit delete device bar
197 | ```
198 |
199 | ### Tracking Tasks
200 |
201 | Create a tracker
202 |
203 | ```sh
204 | surfkit create tracker
205 | ```
206 |
207 | List trackers
208 |
209 | ```sh
210 | surfkit list trackers
211 | ```
212 |
213 | Delete a tracker
214 |
215 | ```sh
216 | surfkit delete tracker foo
217 | ```
218 |
219 | ### Solving Tasks
220 |
221 | Solve a task with an existing setup
222 |
223 | ```sh
224 | surfkit solve "search for common french ducks" --agent foo --device bar
225 | ```
226 |
227 | Solve a task creating the agent ad hoc
228 |
229 | ```sh
230 | surfkit solve "search for alpaca sweaters" \
231 | --device bar --agent-file ./agent.yaml
232 | ```
233 |
234 | List tasks
235 |
236 | ```sh
237 | surfkit list tasks
238 | ```
239 |
240 | ### Publishing Agents
241 |
242 | Login to the hub
243 |
244 | ```sh
245 | surfkit login
246 | ```
247 |
248 | Publish the agent
249 |
250 | ```sh
251 | surfkit publish
252 | ```
253 |
254 | List published agent types
255 |
256 | ```sh
257 | surfkit find
258 | ```
259 |
260 | ## Integrations
261 |
262 | Skillpacks is integrated with:
263 |
264 | - [MLLM](https://github.com/agentsea/mllm) A prompt management, routing, and schema validation library for multimodal LLMs
265 | - [Taskara](https://github.com/agentsea/taskara) A task management library for AI agents
266 | - [Skillpacks](https://github.com/agentsea/skillpacks) A library to fine tune AI agents on tasks.
267 | - [Threadmem](https://github.com/agentsea/threadmem) A thread management library for AI agents
268 |
269 | ## Community
270 |
271 | Come join us on [Discord](https://discord.gg/hhaq7XYPS6).
272 |
273 | ## Developing
274 |
275 | Add the following function to your `~/.zshrc` (or similar)
276 |
277 | ```sh
278 | function sk() {
279 | local project_dir="/path/to/surfkit/repo"
280 | local venv_dir="$project_dir/.venv"
281 | local ssh_auth_sock="$SSH_AUTH_SOCK"
282 | local ssh_agent_pid="$SSH_AGENT_PID"
283 |
284 | export SSH_AUTH_SOCK="$ssh_auth_sock"
285 | export SSH_AGENT_PID="$ssh_agent_pid"
286 |
287 | # Add the Poetry environment's bin directory to the PATH
288 | export PATH="$venv_dir/bin:$PATH"
289 |
290 | # Execute the surfkit.cli.main module using python -m
291 | surfkit "$@"
292 | }
293 | ```
294 |
295 | Replacing `/path/to/surfkit/repo` with the absolute path to your local repo.
296 |
297 | Then calling `sk` will execute the working code in your repo from any location.
298 |
--------------------------------------------------------------------------------
/assets/demo.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agentsea/surfkit/8708abd995d45cad5e0a94a9606d6ada6b8a94dd/assets/demo.mp4
--------------------------------------------------------------------------------
/cloudbuild.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | # Build and push the image using Buildx
3 | - name: "gcr.io/cloud-builders/docker"
4 | entrypoint: "bash"
5 | args:
6 | - "-c"
7 | - |
8 | docker buildx create --name mybuilder --use
9 | docker buildx build \
10 | --platform linux/amd64,linux/arm64 \
11 | -t us-central1-docker.pkg.dev/agentsea-dev/guisurfer/surfkit-ui:$SHORT_SHA \
12 | --push \
13 | --cache-from=type=registry,ref=us-central1-docker.pkg.dev/agentsea-dev/guisurfer/surfkit-ui:cache \
14 | --cache-to=type=registry,ref=us-central1-docker.pkg.dev/agentsea-dev/guisurfer/surfkit-ui:cache,mode=max \
15 | ./ui/surfkit
16 | if [ "$BRANCH_NAME" == "main" ]; then
17 | docker buildx build \
18 | --platform linux/amd64,linux/arm64 \
19 | -t us-central1-docker.pkg.dev/agentsea-dev/guisurfer/surfkit-ui:latest \
20 | --push \
21 | --cache-from=type=registry,ref=us-central1-docker.pkg.dev/agentsea-dev/guisurfer/surfkit-ui:cache \
22 | --cache-to=type=registry,ref=us-central1-docker.pkg.dev/agentsea-dev/guisurfer/surfkit-ui:cache,mode=max \
23 | ./ui/surfkit
24 | fi
25 |
26 | # Removed the images field
27 | timeout: "1200s"
28 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "surfkit"
3 | version = "0.1.423"
4 | description = "A toolkit for building AI agents that use devices"
5 | authors = ["Patrick Barker ", "Jeffrey Huckabay "]
6 | license = "MIT"
7 | readme = "README.md"
8 |
9 | [tool.poetry.dependencies]
10 | python = "^3.10"
11 | pydantic = "^2.6.4"
12 | rootpath = "^0.1.1"
13 | docker = "^7.0.0"
14 | namesgenerator = "^0.3"
15 | pyyaml = "^6.0.1"
16 | toolfuse = "^0.1.15"
17 | devicebay = "^0.1.11"
18 | litellm = "^1.35.8"
19 | rich = "^13.7.1"
20 | tqdm = "^4.66.4"
21 | agentcore = "^0.1.2"
22 | agentdesk = "^0.2.135"
23 | taskara = "^0.1.246"
24 |
25 |
26 | [tool.poetry.group.dev.dependencies]
27 | ipykernel = "^6.29.4"
28 |
29 | black = "^24.4.2"
30 | isort = "^5.13.2"
31 | google-cloud-aiplatform = "^1.53.0"
32 | ruff = "^0.6.5"
33 |
34 | [build-system]
35 | requires = ["poetry-core"]
36 | build-backend = "poetry.core.masonry.api"
37 |
38 | [tool.poetry.scripts]
39 | surfkit = "surfkit.cli.main:app"
40 | lint = "scripts.lint:main"
41 |
42 |
43 | [tool.pyright]
44 | reportUnknownParameterType = false
45 | reportMissingTypeArgument = false
46 | reportUnknownMemberType = false
47 | reportUnknownVariableType = false
48 | reportUnknownArgumentType = false
49 |
50 |
51 | [tool.isort]
52 | line_length = 88
53 | profile = "black"
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | pythonpath = .
3 |
4 | env =
5 | D:DB_TYPE=sqlite
6 | D:AGENTSEA_DB_TEST=true
7 | D:AGENTSEA_HOME=./.agentsea
8 | D:AGENTSEA_DB_DIR=./.agentsea/data/test
9 |
--------------------------------------------------------------------------------
/scripts/lint.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import subprocess
3 |
4 |
5 | def main():
6 | parser = argparse.ArgumentParser(description="Run linters.")
7 | parser.add_argument("--check", action="store_true", help="Run in check mode.")
8 | args = parser.parse_args()
9 |
10 | black_command = ["black", "."]
11 | isort_command = ["isort", "."]
12 |
13 | if args.check:
14 | black_command = ["black", "--check", "--diff", "."]
15 | isort_command = ["isort", "--check", "--diff", "."]
16 |
17 | subprocess.run(black_command, check=True)
18 | # subprocess.run(isort_command, check=True) # TODO: this is hard to sync with local editor
19 |
20 |
21 | if __name__ == "__main__":
22 | main()
23 |
--------------------------------------------------------------------------------
/surfkit/__init__.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | from cryptography.utils import CryptographyDeprecationWarning
3 |
4 | # Suppress specific cryptography deprecation warnings
5 | warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning)
6 |
7 | # Suppress only UserWarnings coming from Pydantic's _fields.py
8 | warnings.filterwarnings(
9 | "ignore", category=UserWarning, module="pydantic._internal._fields"
10 | )
11 | warnings.filterwarnings(
12 | "ignore", category=UserWarning, module="pydantic._internal._fields"
13 | )
14 |
15 | from surfkit.client import solve
16 | from taskara import Task, TaskStatus
17 |
--------------------------------------------------------------------------------
/surfkit/agent.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Generic, List, Optional, Type, TypeVar
3 |
4 | from agentcore.models import V1UserProfile
5 | from devicebay import Device
6 | from pydantic import BaseModel
7 | from taskara import Task
8 |
9 | from .skill import Skill
10 |
11 | C = TypeVar("C", bound="BaseModel")
12 | T = TypeVar("T", bound="TaskAgent")
13 |
14 |
15 | class TaskAgent(Generic[C, T], ABC):
16 | """An agent that works on tasks"""
17 |
18 | @classmethod
19 | def name(cls) -> str:
20 | return cls.__name__
21 |
22 | def learn_task(
23 | self,
24 | task: Task,
25 | skill: Skill,
26 | user: V1UserProfile,
27 | ):
28 | """Learn a task
29 |
30 | Args:
31 | skill (Skill): The skill
32 | user (V1UserProfile): The user
33 | task (Task): The task
34 | """
35 | raise NotImplementedError("Subclasses must implement this method")
36 |
37 | @abstractmethod
38 | def solve_task(
39 | self,
40 | task: Task,
41 | device: Optional[Device] = None,
42 | max_steps: int = 30,
43 | ) -> Task:
44 | """Solve a task on a device
45 |
46 | Args:
47 | task (Task): The task
48 | device (Device, optional): Device to perform the task on. Default to None.
49 | max_steps (int, optional): Max steps allowed. Defaults to 30.
50 |
51 | Returns:
52 | Task: A task
53 | """
54 | pass
55 |
56 | @classmethod
57 | @abstractmethod
58 | def supported_devices(cls) -> List[Type[Device]]:
59 | """Devices this agent supports
60 |
61 | Returns:
62 | List[Type[Device]]: A list of supported devices
63 | """
64 | pass
65 |
66 | @classmethod
67 | def is_supported(cls, device: Device) -> bool:
68 | """Is the given device supported by this agent
69 |
70 | Args:
71 | device (Device): The device to check
72 |
73 | Returns:
74 | bool: Whether its supported
75 | """
76 | return type(device) in cls.supported_devices()
77 |
78 | @classmethod
79 | @abstractmethod
80 | def config_type(cls) -> Type[C]:
81 | """Type to configure the agent
82 |
83 | Returns:
84 | Type[C]: A configuration type
85 | """
86 | pass
87 |
88 | @classmethod
89 | @abstractmethod
90 | def from_config(cls, config: C) -> T:
91 | """Create an agent from a config
92 |
93 | Args:
94 | config (C): Config to create the agent from
95 |
96 | Returns:
97 | T: The Agent
98 | """
99 | pass
100 |
101 | @classmethod
102 | @abstractmethod
103 | def default(cls) -> T:
104 | """Create a default agent with no params
105 |
106 | Returns:
107 | T: The Agent
108 | """
109 | pass
110 |
111 | @classmethod
112 | def init(cls) -> None:
113 | """Initialize the Agent type"""
114 | pass
115 |
--------------------------------------------------------------------------------
/surfkit/auth/default.py:
--------------------------------------------------------------------------------
1 | COMMON_USER = "common"
2 |
--------------------------------------------------------------------------------
/surfkit/auth/key.py:
--------------------------------------------------------------------------------
1 | import os
2 | from abc import ABC, abstractmethod
3 | from typing import Optional
4 |
5 | import requests
6 | from agentcore.models import V1UserProfile
7 | from requests.exceptions import RequestException
8 | from threadmem.db.conn import WithDB
9 |
10 | from surfkit.config import AGENTSEA_AUTH_URL
11 |
12 |
13 | class KeyProvider(ABC):
14 | """API key provider"""
15 |
16 | @abstractmethod
17 | def create_key(self) -> str:
18 | pass
19 |
20 | @abstractmethod
21 | def is_key(self, token: str) -> bool:
22 | pass
23 |
24 | @abstractmethod
25 | def validate(self, token: str) -> V1UserProfile:
26 | pass
27 |
28 |
29 | class MockProvider(KeyProvider):
30 | """Mock key provider"""
31 |
32 | _key = "k.mock"
33 |
34 | def create_key(self) -> str:
35 | return self._key
36 |
37 | def is_key(self, token: str) -> bool:
38 | if token.startswith("k."):
39 | return True
40 | return False
41 |
42 | def validate(self, token: str) -> V1UserProfile:
43 | if self._key == token:
44 | return V1UserProfile(
45 | email="tom@myspace.com",
46 | display_name="tom",
47 | picture="https://i.insider.com/4efd9b8b69bedd682c000022?width=750&format=jpeg&auto=webp",
48 | )
49 | raise ValueError("Invalid token")
50 |
51 |
52 | class HubKeyProvider(KeyProvider, WithDB):
53 | """AgentSea Hub provider"""
54 |
55 | def __init__(self) -> None:
56 | self.hub_url = AGENTSEA_AUTH_URL
57 |
58 | def create_key(self) -> str:
59 | raise NotImplementedError("create_key is not implemented")
60 |
61 | def is_key(self, token: str) -> bool:
62 | if token.startswith("k."):
63 | return True
64 | return False
65 |
66 | def validate(self, token: str) -> V1UserProfile:
67 | headers = {"Authorization": f"Bearer {token}"}
68 | try:
69 | response = requests.get(f"{self.hub_url}/v1/users/me", headers=headers)
70 | response.raise_for_status() # Raise an HTTPError if one occurred.
71 |
72 | user_data = response.json()
73 | prof = V1UserProfile(**user_data)
74 | return prof
75 |
76 | except RequestException as e:
77 | raise ValueError(f"Failed to validate token. Error: {e}")
78 |
79 |
80 | def get_key() -> Optional[str]:
81 | return os.environ.get("AGENTSEA_KEY")
82 |
83 |
84 | def ensure_key() -> str:
85 | key = get_key()
86 | if not key:
87 | raise ValueError("$AGENTSEA_KEY must be provided to use hub components")
88 | return key
89 |
90 |
91 | def default_key_provider() -> KeyProvider:
92 | return HubKeyProvider()
93 |
--------------------------------------------------------------------------------
/surfkit/auth/provider.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from abc import ABC, abstractmethod
4 | from typing import Optional
5 |
6 | import requests
7 | from agentcore.models import V1UserProfile
8 |
9 | from .key import KeyProvider, MockProvider, default_key_provider
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 |
14 | class AuthProvider(ABC):
15 | @abstractmethod
16 | def key_provider(self) -> KeyProvider:
17 | pass
18 |
19 | @abstractmethod
20 | def get_user_auth(self, token: str) -> V1UserProfile:
21 | pass
22 |
23 |
24 | class HubAuthProvider(AuthProvider):
25 | """Hub user auth"""
26 |
27 | _key_provider: KeyProvider
28 |
29 | def __init__(self, key_provider: Optional[KeyProvider] = None) -> None:
30 | if not key_provider:
31 | key_provider = default_key_provider()
32 | self.hub_url = os.environ.get("AGENTSEA_HUB_URL")
33 | if not self.hub_url:
34 | raise ValueError(
35 | "$AGENTSEA_HUB_URL must be set to user the Hub key provider"
36 | )
37 |
38 | self._key_provider = key_provider
39 |
40 | def key_provider(self) -> KeyProvider:
41 | return self._key_provider
42 |
43 | def get_user_auth(self, token: str) -> V1UserProfile:
44 | try:
45 | if self._key_provider.is_key(token):
46 | user = self._key_provider.validate(token)
47 | logger.debug(f"found user: {user}")
48 |
49 | return user
50 |
51 | else:
52 | headers = {"Authorization": f"Bearer {token}"}
53 | headers.update(
54 | {
55 | "User-Agent": "My User Agent 1.0",
56 | }
57 | )
58 | auth_url = f"{self.hub_url}/v1/users/me"
59 | print(f"authorizing token with: {auth_url}", flush=True)
60 | response = requests.get(auth_url, headers=headers)
61 | response.raise_for_status()
62 |
63 | user_data = response.json()
64 | user_schema = V1UserProfile(**user_data)
65 | user_schema.token = token
66 | return user_schema
67 |
68 | except Exception as e:
69 | logging.error(f"Problem fetching user auth {e}")
70 | raise Exception(
71 | "ID token was unauthorized, please log in",
72 | )
73 |
74 |
75 | class MockAuthProvider(AuthProvider):
76 | """Mock user auth"""
77 |
78 | _key_provider: KeyProvider = MockProvider()
79 |
80 | def key_provider(self) -> KeyProvider:
81 | return self._key_provider
82 |
83 | def get_user_auth(self, token: str) -> V1UserProfile:
84 | try:
85 | if self._key_provider.is_key(token):
86 | user = self._key_provider.validate(token)
87 |
88 | return user
89 |
90 | else:
91 | return V1UserProfile(
92 | email="tom@myspace.com",
93 | display_name="tom",
94 | picture="https://i.insider.com/4efd9b8b69bedd682c000022?width=750&format=jpeg&auto=webp",
95 | )
96 |
97 | except Exception as e:
98 | logging.error(f"Problem fetching user auth {e}")
99 | raise Exception(
100 | "ID token was unauthorized, please log in",
101 | )
102 |
103 |
104 | def default_auth_provider() -> AuthProvider:
105 | return HubAuthProvider()
106 |
--------------------------------------------------------------------------------
/surfkit/auth/transport.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from typing import Annotated
4 |
5 | from agentcore.models import V1UserProfile
6 | from fastapi import Depends, HTTPException
7 | from fastapi.security import OAuth2PasswordBearer
8 |
9 | from .provider import default_auth_provider
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 | if os.getenv("AGENT_NO_AUTH", "false").lower() == "true":
14 | user_auth = None
15 | else:
16 | user_auth = default_auth_provider()
17 |
18 | oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
19 |
20 |
21 | async def get_current_user(
22 | token: Annotated[str, Depends(oauth2_scheme)],
23 | ) -> V1UserProfile:
24 | if not user_auth:
25 | raise SystemError("user auth is not configured")
26 | try:
27 | print(f"checking user token: {token}", flush=True)
28 | user = user_auth.get_user_auth(token)
29 | user.token = token
30 | except Exception as e:
31 | logging.error(e)
32 | raise HTTPException(
33 | status_code=401,
34 | detail=f"-ID token was unauthorized, please log in: {e}",
35 | )
36 |
37 | return user
38 |
39 |
40 | async def get_user_mock_auth() -> V1UserProfile:
41 | # Return a dummy user profile when authentication is disabled
42 | return V1UserProfile(
43 | email="tom@myspace.com",
44 | display_name="tom",
45 | picture="https://i.insider.com/4efd9b8b69bedd682c000022?width=750&format=jpeg&auto=webp",
46 | )
47 |
48 |
49 | def get_user_dependency():
50 | if os.getenv("AGENT_NO_AUTH", "false").lower() == "true":
51 | print("using mock auth", flush=True)
52 | return get_user_mock_auth
53 | else:
54 | print("using current user", flush=True)
55 | return get_current_user
56 |
57 |
58 | def get_current_user_sync(
59 | token: str,
60 | ) -> V1UserProfile:
61 | if not user_auth:
62 | raise SystemError("user auth is not configured")
63 | try:
64 | print(f"checking user token: {token}", flush=True)
65 | user = user_auth.get_user_auth(token)
66 | user.token = token
67 |
68 | except Exception as e:
69 | logging.error(e)
70 | raise HTTPException(
71 | status_code=401,
72 | detail=f"-ID token was unauthorized, please log in: {e}",
73 | )
74 |
75 | return user
76 |
77 |
78 | def get_user_mock_auth_sync() -> V1UserProfile:
79 | # Return a dummy user profile when authentication is disabled
80 | return V1UserProfile(
81 | email="tom@myspace.com",
82 | display_name="tom",
83 | picture="https://i.insider.com/4efd9b8b69bedd682c000022?width=750&format=jpeg&auto=webp",
84 | )
85 |
86 |
87 | def get_user_dependency_sync():
88 | if os.getenv("AGENT_NO_AUTH", "false").lower() == "true":
89 | print("using mock auth", flush=True)
90 | return get_user_mock_auth_sync
91 | else:
92 | print("using current user", flush=True)
93 | return get_current_user_sync
94 |
--------------------------------------------------------------------------------
/surfkit/auth/util.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from agentcore.models import V1UserProfile
3 |
4 | from surfkit.config import AGENTSEA_AUTH_URL
5 |
6 |
7 | def get_user_info(token: str) -> V1UserProfile:
8 | response = requests.get(
9 | f"{AGENTSEA_AUTH_URL}/v1/users/me", headers={"Authorization": f"Bearer {token}"}
10 | )
11 | return V1UserProfile.model_validate(response.json())
12 |
--------------------------------------------------------------------------------
/surfkit/cli/new.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 |
4 | from surfkit.cli.templates.agent import (
5 | generate_agent,
6 | generate_agentfile,
7 | generate_dir,
8 | generate_dockerfile,
9 | generate_gitignore,
10 | generate_pyproject,
11 | generate_readme,
12 | generate_server,
13 | )
14 |
15 | from .util import (
16 | is_docker_installed,
17 | is_poetry_installed,
18 | pkg_from_name,
19 | run_poetry_install,
20 | )
21 |
22 |
23 | def create_git_repository(repo_path):
24 | """
25 | Create and initialize a local Git repository.
26 |
27 | Parameters:
28 | - repo_path: Path where the Git repository will be created.
29 | """
30 | os.makedirs(repo_path, exist_ok=True)
31 | os.chdir(repo_path)
32 | subprocess.run(["git", "init"], check=True)
33 |
34 |
35 | def new_agent(
36 | name: str,
37 | description: str,
38 | git_user_ref: str,
39 | img_repo: str,
40 | icon_url: str,
41 | template: str,
42 | ) -> None:
43 | if not is_poetry_installed():
44 | raise SystemError(
45 | "Poetry not found on system, please install at https://python-poetry.org/docs/#installation"
46 | )
47 |
48 | if not is_docker_installed():
49 | raise SystemError(
50 | "Docker not found on system, please install at https://docs.docker.com/engine/install/"
51 | )
52 |
53 | generate_dir(name)
54 | generate_dockerfile(name)
55 | generate_pyproject(name, description, git_user_ref)
56 | generate_agent(name, template)
57 | generate_server(name)
58 | generate_gitignore()
59 | generate_agentfile(
60 | name, description=description, image_repo=img_repo, icon_url=icon_url
61 | )
62 | generate_readme(name, description)
63 |
64 | run_poetry_install()
65 |
--------------------------------------------------------------------------------
/surfkit/cli/templates/agent.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 |
4 | from surfkit.cli.util import pkg_from_name
5 |
6 |
7 | def generate_dockerfile(agent_name: str) -> None:
8 | out = f"""
9 | FROM thehale/python-poetry:1.8.2-py3.10-slim
10 |
11 | COPY . /app
12 | WORKDIR /app
13 |
14 | RUN apt-get update && apt-get install -y openssh-client ntp
15 | RUN poetry install
16 |
17 | EXPOSE 9090
18 |
19 | # Run the application
20 | CMD ["poetry", "run", "python", "-m", "{pkg_from_name(agent_name)}.server"]
21 | """
22 | with open(f"Dockerfile", "w") as f:
23 | f.write(out)
24 |
25 |
26 | def generate_server(agent_name: str) -> None:
27 | out = f"""
28 | import logging
29 | import os
30 | import sys
31 | from contextlib import asynccontextmanager
32 | from typing import Final
33 |
34 | import uvicorn
35 | from fastapi import FastAPI
36 | from fastapi.middleware.cors import CORSMiddleware
37 | from surfkit.server.routes import task_router
38 |
39 | from .agent import Agent, router
40 |
41 | # Configure logging
42 | logger: Final = logging.getLogger("{pkg_from_name(agent_name)}")
43 | logger.setLevel(int(os.getenv("LOG_LEVEL", str(logging.DEBUG))))
44 | handler = logging.StreamHandler(sys.stdout)
45 | handler.setLevel(logging.INFO)
46 | formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
47 | handler.setFormatter(formatter)
48 |
49 | # Ensure logs are flushed immediately
50 | handler.flush = sys.stdout.flush
51 | logger.addHandler(handler)
52 | logger.propagate = False
53 |
54 | ALLOW_ORIGINS = os.getenv("ALLOW_ORIGINS", "*").split(",")
55 | ALLOW_METHODS = os.getenv("ALLOW_METHODS", "*").split(",")
56 | ALLOW_HEADERS = os.getenv("ALLOW_HEADERS", "*").split(",")
57 |
58 |
59 | @asynccontextmanager
60 | async def lifespan(app: FastAPI):
61 | # Initialize the agent type before the server comes live
62 | Agent.init()
63 | yield
64 |
65 |
66 | app = FastAPI(lifespan=lifespan) # type: ignore
67 |
68 | app.add_middleware(
69 | CORSMiddleware,
70 | allow_origins=ALLOW_ORIGINS,
71 | allow_credentials=True,
72 | allow_methods=ALLOW_METHODS,
73 | allow_headers=ALLOW_HEADERS,
74 | )
75 |
76 | app.include_router(task_router(Agent, router))
77 |
78 | if __name__ == "__main__":
79 | port = os.getenv("SERVER_PORT", "9090")
80 | reload = os.getenv("SERVER_RELOAD", "false") == "true"
81 | host = os.getenv("SERVER_HOST", "0.0.0.0")
82 |
83 | uvicorn.run(
84 | "{pkg_from_name(agent_name)}.server:app",
85 | host=host,
86 | port=int(port),
87 | reload=reload,
88 | reload_excludes=[".data"],
89 | log_config=None,
90 | )
91 | """
92 | with open(f"{pkg_from_name(agent_name)}/server.py", "w") as f:
93 | f.write(out)
94 |
95 | print(f"wrote {pkg_from_name(agent_name)}/server.py")
96 |
97 |
98 | def generate_agent(agent_name: str, template: str = "surf4v") -> None:
99 | from .agents.surf4v import Surf4v
100 | from .agents.surfskelly import SurfSkelly
101 |
102 | if template == "surf4v":
103 | fourv = Surf4v()
104 | out = fourv.template(agent_name)
105 | elif template == "surfskelly":
106 | skelly = SurfSkelly()
107 | out = skelly.template(agent_name)
108 | else:
109 | raise ValueError(f"Unknown template: {template}")
110 |
111 | with open(f"{pkg_from_name(agent_name)}/agent.py", "w") as f:
112 | f.write(out)
113 |
114 | print(f"wrote {pkg_from_name(agent_name)}/agent.py")
115 |
116 |
117 | def generate_dir(agent_name: str) -> None:
118 | os.mkdir(pkg_from_name(agent_name))
119 |
120 |
121 | def generate_pyproject(agent_name: str, description, git_user_ref: str) -> None:
122 | out = f"""
123 | [tool.poetry]
124 | name = "{agent_name}"
125 | version = "0.1.0"
126 | description = "AI agent for {description}"
127 | authors = ["{git_user_ref}"]
128 | license = "MIT"
129 | readme = "README.md"
130 | packages = [{{include = "{pkg_from_name(agent_name)}"}}]
131 |
132 | [tool.poetry.dependencies]
133 | python = "^3.10"
134 | sqlalchemy = "^2.0.27"
135 | pydantic = "^2.6.3"
136 | requests = "^2.31.0"
137 | fastapi = {{version = "^0.109", extras = ["all"]}}
138 | surfkit = "^0.1.195"
139 |
140 |
141 | [tool.poetry.group.dev.dependencies]
142 | flake8 = "^7.0.0"
143 | black = "^24.2.0"
144 | pytest = "^8.0.2"
145 | ipykernel = "^6.29.3"
146 | pytest-env = "^1.1.3"
147 |
148 |
149 | [build-system]
150 | requires = ["poetry-core"]
151 | build-backend = "poetry.core.masonry.api"
152 | """
153 | with open(f"pyproject.toml", "w") as f:
154 | f.write(out)
155 |
156 | print("wrote pyproject.toml")
157 |
158 |
159 | def generate_agentfile(
160 | name: str, description: str, image_repo: str, icon_url: str
161 | ) -> None:
162 |
163 | out = f"""
164 | api_version: v1
165 | kind: TaskAgent
166 | name: "{name}"
167 | description: "{description}"
168 | cmd: "poetry run python -m {pkg_from_name(name)}.server"
169 | img_repo: "{image_repo}"
170 | tags:
171 | - "gui"
172 | supports:
173 | - "desktop"
174 | versions:
175 | latest: "{image_repo}:latest"
176 | runtimes:
177 | - type: "agent"
178 | preference:
179 | - "process"
180 | - "docker"
181 | - "kube"
182 | llm_providers:
183 | preference:
184 | - "gpt-4o"
185 | - "gpt-4-turbo"
186 | - "anthropic/claude-3-opus-20240229"
187 | public: True
188 | icon: {icon_url}
189 | resource_requests:
190 | cpu: "1"
191 | memory: "2Gi"
192 | resource_limits:
193 | cpu: "2"
194 | memory: "4Gi"
195 | """
196 | with open(f"agent.yaml", "w") as f:
197 | f.write(out)
198 |
199 | print("wrote agent.yaml")
200 |
201 |
202 | def generate_gitignore() -> None:
203 |
204 | out = """
205 | # Byte-compiled / optimized / DLL files
206 | __pycache__/
207 | *.py[cod]
208 | *$py.class
209 |
210 | # C extensions
211 | *.so
212 |
213 | # Distribution / packaging
214 | .Python
215 | build/
216 | develop-eggs/
217 | dist/
218 | downloads/
219 | eggs/
220 | .eggs/
221 | lib/
222 | lib64/
223 | parts/
224 | sdist/
225 | var/
226 | wheels/
227 | share/python-wheels/
228 | *.egg-info/
229 | .installed.cfg
230 | *.egg
231 | MANIFEST
232 |
233 | # PyInstaller
234 | # Usually these files are written by a python script from a template
235 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
236 | *.manifest
237 | *.spec
238 |
239 | # Installer logs
240 | pip-log.txt
241 | pip-delete-this-directory.txt
242 |
243 | # Unit test / coverage reports
244 | htmlcov/
245 | .tox/
246 | .nox/
247 | .coverage
248 | .coverage.*
249 | .cache
250 | nosetests.xml
251 | coverage.xml
252 | *.cover
253 | *.py,cover
254 | .hypothesis/
255 | .pytest_cache/
256 | cover/
257 |
258 | # Translations
259 | *.mo
260 | *.pot
261 |
262 | # Django stuff:
263 | *.log
264 | local_settings.py
265 | db.sqlite3
266 | db.sqlite3-journal
267 |
268 | # Flask stuff:
269 | instance/
270 | .webassets-cache
271 |
272 | # Scrapy stuff:
273 | .scrapy
274 |
275 | # Sphinx documentation
276 | docs/_build/
277 |
278 | # PyBuilder
279 | .pybuilder/
280 | target/
281 |
282 | # Jupyter Notebook
283 | .ipynb_checkpoints
284 |
285 | # IPython
286 | profile_default/
287 | ipython_config.py
288 |
289 | # pyenv
290 | # For a library or package, you might want to ignore these files since the code is
291 | # intended to run in multiple environments; otherwise, check them in:
292 | # .python-version
293 |
294 | # pipenv
295 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
296 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
297 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
298 | # install all needed dependencies.
299 | #Pipfile.lock
300 |
301 | # poetry
302 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
303 | # This is especially recommended for binary packages to ensure reproducibility, and is more
304 | # commonly ignored for libraries.
305 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
306 | #poetry.lock
307 |
308 | # pdm
309 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
310 | #pdm.lock
311 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
312 | # in version control.
313 | # https://pdm.fming.dev/#use-with-ide
314 | .pdm.toml
315 |
316 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
317 | __pypackages__/
318 |
319 | # Celery stuff
320 | celerybeat-schedule
321 | celerybeat.pid
322 |
323 | # SageMath parsed files
324 | *.sage.py
325 |
326 | # Environments
327 | .env
328 | .venv
329 | env/
330 | venv/
331 | ENV/
332 | env.bak/
333 | venv.bak/
334 |
335 | # Spyder project settings
336 | .spyderproject
337 | .spyproject
338 |
339 | # Rope project settings
340 | .ropeproject
341 |
342 | # mkdocs documentation
343 | /site
344 |
345 | # mypy
346 | .mypy_cache/
347 | .dmypy.json
348 | dmypy.json
349 |
350 | # Pyre type checker
351 | .pyre/
352 |
353 | # pytype static type analyzer
354 | .pytype/
355 |
356 | # Cython debug symbols
357 | cython_debug/
358 |
359 | # PyCharm
360 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
361 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
362 | # and can be added to the global gitignore or merged into this file. For a more nuclear
363 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
364 | #.idea/
365 |
366 | .data/
367 | cidata.iso
368 | .agentsea
369 | """
370 | file_path = Path(".gitignore")
371 |
372 | if file_path.exists():
373 | with file_path.open("a") as file:
374 | file.write("\ndata/\n")
375 | else:
376 | with file_path.open("w") as file:
377 | file.write(out)
378 |
379 | print("wrote .gitignore")
380 |
381 |
382 | def generate_readme(agent_name: str, description: str) -> None:
383 |
384 | out = f"""# {agent_name}
385 |
386 | {description}
387 |
388 | ## Install
389 | ```sh
390 | pip install surfkit
391 | ```
392 |
393 | ## Usage
394 |
395 | Create an agent
396 | ```sh
397 | surfkit create agent -f ./agent.yaml --runtime {{ process | docker | kube }} --name foo
398 | ```
399 |
400 | List running agents
401 | ```sh
402 | surfkit list agents
403 | ```
404 |
405 | Use the agent to solve a task
406 | ```sh
407 | surfkit solve --agent foo --description "Search for french ducks" --device-type desktop
408 | ```
409 |
410 | Get the agent logs
411 | ```sh
412 | surfkit logs --name foo
413 | ```
414 |
415 | Delete the agent
416 | ```sh
417 | surfkit delete agent --name foo
418 | ```
419 |
420 | """
421 | file_path = Path("README.md")
422 |
423 | if not file_path.exists():
424 | with file_path.open("w") as file:
425 | file.write(out)
426 |
427 | print("wrote README.md")
428 |
--------------------------------------------------------------------------------
/surfkit/cli/templates/agents/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 |
4 | class AgentTemplate(ABC):
5 | @abstractmethod
6 | def template(self, agent_name: str) -> str:
7 | pass
8 |
--------------------------------------------------------------------------------
/surfkit/cli/templates/agents/surf4v.py:
--------------------------------------------------------------------------------
1 | from .base import AgentTemplate
2 |
3 |
4 | class Surf4v(AgentTemplate):
5 | def template(self, agent_name: str) -> str:
6 | return f"""
7 | from typing import List, Type, Tuple, Optional
8 | import logging
9 | from typing import Final
10 | import traceback
11 | import time
12 | import os
13 |
14 | from devicebay import Device
15 | from agentdesk.device_v1 import Desktop
16 | from toolfuse.util import AgentUtils
17 | from pydantic import BaseModel
18 | from surfkit.agent import TaskAgent
19 | from taskara import Task, TaskStatus
20 | from mllm import Router
21 | from skillpacks import EnvState
22 | from skillpacks.server.models import V1ActionSelection, V1EnvState
23 | from threadmem import RoleThread, RoleMessage
24 | from tenacity import (
25 | retry,
26 | stop_after_attempt,
27 | before_sleep_log,
28 | )
29 | from rich.json import JSON
30 | from rich.console import Console
31 |
32 | logging.basicConfig(level=logging.INFO)
33 | logger: Final = logging.getLogger(__name__)
34 | logger.setLevel(int(os.getenv("LOG_LEVEL", str(logging.DEBUG))))
35 |
36 | console = Console(force_terminal=True)
37 |
38 | router = Router.from_env()
39 |
40 |
41 | class {agent_name}Config(BaseModel):
42 | pass
43 |
44 |
45 | class {agent_name}(TaskAgent):
46 | \"""A desktop agent that uses GPT-4V to solve tasks\"""
47 |
48 | def solve_task(
49 | self,
50 | task: Task,
51 | device: Optional[Device] = None,
52 | max_steps: int = 30,
53 | ) -> Task:
54 | \"""Solve a task
55 |
56 | Args:
57 | task (Task): Task to solve.
58 | device (Device): Device to perform the task on. Defaults to None.
59 | max_steps (int, optional): Max steps to try and solve. Defaults to 30.
60 |
61 | Returns:
62 | Task: The task
63 | \"""
64 |
65 | # Post a message to the default thread to let the user know the task is in progress
66 | task.post_message("assistant", f"Starting task '{{task.description}}'")
67 |
68 | # Create threads in the task to update the user
69 | console.print("creating threads...")
70 | task.ensure_thread("debug")
71 | task.post_message("assistant", "I'll post debug messages here", thread="debug")
72 |
73 | # Check that the device we received is one we support
74 | if not isinstance(device, Desktop):
75 | raise ValueError("Only desktop devices supported")
76 |
77 | # Add standard agent utils to the device
78 | device.merge(AgentUtils())
79 |
80 | # Open a site if that is in the parameters
81 | site = task._parameters.get("site") if task._parameters else None
82 | if site:
83 | console.print(f"▶️ opening site url: {{site}}", style="blue")
84 | task.post_message("assistant", f"opening site url {{site}}...")
85 | device.open_url(site)
86 | console.print("waiting for browser to open...", style="blue")
87 | time.sleep(5)
88 |
89 | # Get the json schema for the tools
90 | tools = device.json_schema()
91 | console.print("tools: ", style="purple")
92 | console.print(JSON.from_data(tools))
93 |
94 | # Get info about the desktop
95 | info = device.info()
96 | screen_size = info["screen_size"]
97 | console.print(f"Screen size: {{screen_size}}")
98 |
99 | # Create our thread and start with a system prompt
100 | thread = RoleThread()
101 | thread.post(
102 | role="user",
103 | msg=(
104 | "You are an AI assistant which uses a devices to accomplish tasks. "
105 | f"Your current task is {{task.description}}, and your available tools are {{device.json_schema()}} "
106 | "For each screenshot I will send you please return the result chosen action as "
107 | f"raw JSON adhearing to the schema {{V1ActionSelection.model_json_schema()}} "
108 | "Let me know when you are ready and I'll send you the first screenshot"
109 | ),
110 | )
111 | response = router.chat(thread, namespace="system")
112 | console.print(f"system prompt response: {{response}}", style="blue")
113 | thread.add_msg(response.msg)
114 |
115 | # Loop to run actions
116 | for i in range(max_steps):
117 | console.print(f"-------step {{i + 1}}", style="green")
118 |
119 | try:
120 | thread, done = self.take_action(device, task, thread)
121 | except Exception as e:
122 | console.print(f"Error: {{e}}", style="red")
123 | task.status = TaskStatus.FAILED
124 | task.error = str(e)
125 | task.save()
126 | task.post_message("assistant", f"❗ Error taking action: {{e}}")
127 | return task
128 |
129 | if done:
130 | console.print("task is done", style="green")
131 | # TODO: remove
132 | time.sleep(10)
133 | return task
134 |
135 | time.sleep(2)
136 |
137 | task.status = TaskStatus.FAILED
138 | task.save()
139 | task.post_message("assistant", "❗ Max steps reached without solving task")
140 | console.print("Reached max steps without solving task", style="red")
141 |
142 | return task
143 |
144 | @retry(
145 | stop=stop_after_attempt(5),
146 | before_sleep=before_sleep_log(logger, logging.INFO),
147 | )
148 | def take_action(
149 | self,
150 | desktop: Desktop,
151 | task: Task,
152 | thread: RoleThread,
153 | ) -> Tuple[RoleThread, bool]:
154 | \"""Take an action
155 |
156 | Args:
157 | desktop (Desktop): Desktop to use
158 | task (str): Task to accomplish
159 | thread (RoleThread): Role thread for the task
160 |
161 | Returns:
162 | bool: Whether the task is complete
163 | \"""
164 | try:
165 | # Check to see if the task has been cancelled
166 | if task.remote:
167 | task.refresh()
168 | if task.status == TaskStatus.CANCELING or task.status == TaskStatus.CANCELED:
169 | console.print(f"task is {{task.status}}", style="red")
170 | if task.status == TaskStatus.CANCELING:
171 | task.status = TaskStatus.CANCELED
172 | task.save()
173 | return thread, True
174 |
175 | console.print("taking action...", style="white")
176 |
177 | # Create a copy of the thread, and remove old images
178 | _thread = thread.copy()
179 | _thread.remove_images()
180 |
181 | # Take a screenshot of the desktop and post a message with it
182 | screenshot = desktop.take_screenshots()[0]
183 | task.post_message(
184 | "assistant",
185 | "current image",
186 | images=[screenshot],
187 | thread="debug",
188 | )
189 |
190 | # Get the current mouse coordinates
191 | x, y = desktop.mouse_coordinates()
192 | console.print(f"mouse coordinates: ({{x}}, {{y}})", style="white")
193 |
194 | # Craft the message asking the MLLM for an action
195 | msg = RoleMessage(
196 | role="user",
197 | text=(
198 | f"Here is a screenshot of the current desktop with the mouse coordinates ({{x}}, {{y}}). "
199 | "Please select an action from the provided schema."
200 | ),
201 | images=[screenshot],
202 | )
203 | _thread.add_msg(msg)
204 |
205 | # Make the action selection
206 | response = router.chat(
207 | _thread, namespace="action", expect=V1ActionSelection
208 | )
209 |
210 | try:
211 | # Post to the user letting them know what the modle selected
212 | selection = response.parsed
213 | if not selection:
214 | raise ValueError("No action selection parsed")
215 |
216 | task.post_message("assistant", f"👁️ {{selection.observation}}")
217 | task.post_message("assistant", f"💡 {{selection.reason}}")
218 | console.print(f"action selection: ", style="white")
219 | console.print(JSON.from_data(selection.model_dump()))
220 |
221 | task.post_message(
222 | "assistant",
223 | f"▶️ Taking action '{{selection.action.name}}' with parameters: {{selection.action.parameters}}",
224 | )
225 |
226 | except Exception as e:
227 | console.print(f"Response failed to parse: {{e}}", style="red")
228 | raise
229 |
230 | # The agent will return 'result' if it believes it's finished
231 | if selection.action.name == "result":
232 | console.print("final result: ", style="green")
233 | console.print(JSON.from_data(selection.action.parameters))
234 | task.post_message(
235 | "assistant",
236 | f"✅ I think the task is done, please review the result: {{selection.action.parameters['value']}}",
237 | )
238 | task.status = TaskStatus.FINISHED
239 | task.save()
240 | return _thread, True
241 |
242 | # Find the selected action in the tool
243 | action = desktop.find_action(selection.action.name)
244 | console.print(f"found action: {{action}}", style="blue")
245 | if not action:
246 | console.print(f"action returned not found: {{selection.action.name}}")
247 | raise SystemError("action not found")
248 |
249 | # Take the selected action
250 | try:
251 | action_response = desktop.use(action, **selection.action.parameters)
252 | except Exception as e:
253 | raise ValueError(f"Trouble using action: {{e}}")
254 |
255 | console.print(f"action output: {{action_response}}", style="blue")
256 | if action_response:
257 | task.post_message(
258 | "assistant", f"👁️ Result from taking action: {{action_response}}"
259 | )
260 |
261 | # Record the action for feedback and tuning
262 | task.record_action(
263 | state=EnvState(images=screenshot),
264 | prompt=response.prompt,
265 | action=selection.action,
266 | tool=desktop.ref(),
267 | result=action_response,
268 | agent_id=self.name(),
269 | model=response.model,
270 | )
271 |
272 | _thread.add_msg(response.msg)
273 | return _thread, False
274 |
275 | except Exception as e:
276 | print("Exception taking action: ", e)
277 | traceback.print_exc()
278 | task.post_message("assistant", f"⚠️ Error taking action: {{e}} -- retrying...")
279 | raise e
280 |
281 | @classmethod
282 | def supported_devices(cls) -> List[Type[Device]]:
283 | \"""Devices this agent supports
284 |
285 | Returns:
286 | List[Type[Device]]: A list of supported devices
287 | \"""
288 | return [Desktop]
289 |
290 | @classmethod
291 | def config_type(cls) -> Type[{agent_name}Config]:
292 | \"""Type of config
293 |
294 | Returns:
295 | Type[DinoConfig]: Config type
296 | \"""
297 | return {agent_name}Config
298 |
299 | @classmethod
300 | def from_config(cls, config: {agent_name}Config) -> "{agent_name}":
301 | \"""Create an agent from a config
302 |
303 | Args:
304 | config (DinoConfig): Agent config
305 |
306 | Returns:
307 | {agent_name}: The agent
308 | \"""
309 | return {agent_name}()
310 |
311 | @classmethod
312 | def default(cls) -> "{agent_name}":
313 | \"""Create a default agent
314 |
315 | Returns:
316 | {agent_name}: The agent
317 | \"""
318 | return {agent_name}()
319 |
320 | @classmethod
321 | def init(cls) -> None:
322 | \"""Initialize the agent class\"""
323 | #
324 | return
325 |
326 |
327 | Agent = {agent_name}
328 |
329 | """
330 |
--------------------------------------------------------------------------------
/surfkit/cli/templates/agents/surfskelly.py:
--------------------------------------------------------------------------------
1 | from .base import AgentTemplate
2 |
3 |
4 | class SurfSkelly(AgentTemplate):
5 | def template(self, agent_name: str) -> str:
6 | return f"""
7 | from typing import List, Type, Tuple, Optional
8 | import logging
9 | from typing import Final
10 | import traceback
11 | import time
12 | import os
13 |
14 | from devicebay import Device
15 | from agentdesk.device_v1 import Desktop
16 | from rich.console import Console
17 | from pydantic import BaseModel
18 | from surfkit.agent import TaskAgent
19 | from taskara import Task
20 | from mllm import Router
21 |
22 | logging.basicConfig(level=logging.INFO)
23 | logger: Final = logging.getLogger(__name__)
24 | logger.setLevel(int(os.getenv("LOG_LEVEL", str(logging.DEBUG))))
25 |
26 | console = Console(force_terminal=True)
27 |
28 | router = Router.from_env()
29 |
30 |
31 | class {agent_name}Config(BaseModel):
32 | pass
33 |
34 |
35 | class {agent_name}(TaskAgent):
36 | \"""A desktop agent that uses GPT-4V augmented with OCR and Grounding Dino to solve tasks\"""
37 |
38 | def solve_task(
39 | self,
40 | task: Task,
41 | device: Optional[Device] = None,
42 | max_steps: int = 30,
43 | ) -> Task:
44 | \"""Solve a task
45 |
46 | Args:
47 | task (Task): Task to solve.
48 | device (Device, optional): Device to perform the task on. Defaults to None.
49 | max_steps (int, optional): Max steps to try and solve. Defaults to 30.
50 |
51 | Returns:
52 | Task: The task
53 | \"""
54 |
55 | #
56 | pass
57 |
58 | @classmethod
59 | def supported_devices(cls) -> List[Type[Device]]:
60 | \"""Devices this agent supports
61 |
62 | Returns:
63 | List[Type[Device]]: A list of supported devices
64 | \"""
65 | return [Desktop]
66 |
67 | @classmethod
68 | def config_type(cls) -> Type[{agent_name}Config]:
69 | \"""Type of config
70 |
71 | Returns:
72 | Type[DinoConfig]: Config type
73 | \"""
74 | return {agent_name}Config
75 |
76 | @classmethod
77 | def from_config(cls, config: {agent_name}Config) -> "{agent_name}":
78 | \"""Create an agent from a config
79 |
80 | Args:
81 | config (DinoConfig): Agent config
82 |
83 | Returns:
84 | {agent_name}: The agent
85 | \"""
86 | return {agent_name}()
87 |
88 | @classmethod
89 | def default(cls) -> "{agent_name}":
90 | \"""Create a default agent
91 |
92 | Returns:
93 | {agent_name}: The agent
94 | \"""
95 | return {agent_name}()
96 |
97 | @classmethod
98 | def init(cls) -> None:
99 | \"""Initialize the agent class\"""
100 | #
101 | return
102 |
103 |
104 | Agent = SurfPizza
105 |
106 | """
107 |
--------------------------------------------------------------------------------
/surfkit/cli/templates/device.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agentsea/surfkit/8708abd995d45cad5e0a94a9606d6ada6b8a94dd/surfkit/cli/templates/device.py
--------------------------------------------------------------------------------
/surfkit/cli/util.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import sys
3 | from typing import Optional
4 |
5 | from taskara.runtime.base import Tracker, TrackerRuntime
6 |
7 | from surfkit.util import find_open_port
8 |
9 |
10 | def get_git_global_user_config():
11 | # Command to get the global user name
12 | name_command = ["git", "config", "--global", "user.name"]
13 | # Command to get the global user email
14 | email_command = ["git", "config", "--global", "user.email"]
15 |
16 | try:
17 | # Execute the commands
18 | name = subprocess.run(
19 | name_command,
20 | stdout=subprocess.PIPE,
21 | stderr=subprocess.PIPE,
22 | text=True,
23 | check=True,
24 | ).stdout.strip()
25 | email = subprocess.run(
26 | email_command,
27 | stdout=subprocess.PIPE,
28 | stderr=subprocess.PIPE,
29 | text=True,
30 | check=True,
31 | ).stdout.strip()
32 |
33 | return f"{name} <{email}>"
34 | except subprocess.CalledProcessError as e:
35 | print("Error getting git global user config: ", e)
36 | raise
37 |
38 |
39 | def pkg_from_name(name: str) -> str:
40 | """Return package name from module name"""
41 | name = name.replace("-", "_")
42 | return name.lower()
43 |
44 |
45 | def is_poetry_installed():
46 | """Check if 'poetry' command is available on the system."""
47 | try:
48 | subprocess.run(
49 | ["poetry", "--version"],
50 | check=True,
51 | stdout=subprocess.PIPE,
52 | stderr=subprocess.PIPE,
53 | )
54 | return True
55 | except subprocess.CalledProcessError:
56 | return False
57 | except FileNotFoundError:
58 | return False
59 |
60 |
61 | def run_poetry_install():
62 | """Run 'poetry install' using the subprocess module."""
63 | try:
64 | print("Running 'poetry install'...")
65 | subprocess.run(["poetry", "install"], check=True)
66 | print("'poetry install' executed successfully.")
67 | except subprocess.CalledProcessError as e:
68 | print(f"An error occurred while running 'poetry install': {e}")
69 | raise
70 | except Exception as e:
71 | print(f"An unexpected error occurred: {e}")
72 | raise
73 |
74 |
75 | def is_docker_installed():
76 | """Check if 'docker' command is available on the system."""
77 | try:
78 | subprocess.run(
79 | ["docker", "version"],
80 | check=True,
81 | stdout=subprocess.PIPE,
82 | stderr=subprocess.PIPE,
83 | )
84 | return True
85 | except subprocess.CalledProcessError:
86 | return False
87 | except FileNotFoundError:
88 | return False
89 |
90 |
91 | def build_docker_image(
92 | dockerfile_path: str,
93 | tag: str,
94 | push: bool = True,
95 | builder: str = "surfbuilder",
96 | platforms: str = "linux/amd64,linux/arm64",
97 | ):
98 | try:
99 | # Check Docker version
100 | result = subprocess.run(
101 | ["docker", "--version"],
102 | stdout=subprocess.PIPE,
103 | stderr=subprocess.PIPE,
104 | )
105 | if result.returncode != 0:
106 | raise RuntimeError("Docker is not installed or not running.")
107 | print(result.stdout.decode().strip())
108 |
109 | # Ensure using the correct Docker context
110 | result = subprocess.run(
111 | ["docker", "context", "use", "default"],
112 | check=True,
113 | stdout=subprocess.PIPE,
114 | stderr=subprocess.PIPE,
115 | )
116 | if result.returncode != 0:
117 | raise RuntimeError(
118 | f"Error setting Docker context: {result.stderr.decode()}"
119 | )
120 |
121 | # Check if the buildx builder exists
122 | result = subprocess.run(
123 | ["docker", "buildx", "inspect", builder],
124 | stdout=subprocess.PIPE,
125 | stderr=subprocess.PIPE,
126 | )
127 | if result.returncode != 0:
128 | print(f"Builder '{builder}' not found. Creating a new builder.")
129 | result = subprocess.run(
130 | ["docker", "buildx", "create", "--name", builder, "--use"],
131 | stdout=subprocess.PIPE,
132 | stderr=subprocess.PIPE,
133 | )
134 | if result.returncode != 0:
135 | raise RuntimeError(
136 | f"Error creating buildx builder: {result.stderr.decode()}"
137 | )
138 | else:
139 | # Use the existing builder
140 | result = subprocess.run(
141 | ["docker", "buildx", "use", builder],
142 | check=True,
143 | stdout=subprocess.PIPE,
144 | stderr=subprocess.PIPE,
145 | )
146 | if result.returncode != 0:
147 | raise RuntimeError(
148 | f"Error using buildx builder: {result.stderr.decode()}"
149 | )
150 |
151 | # Ensure the builder is bootstrapped
152 | result = subprocess.run(
153 | ["docker", "buildx", "inspect", "--bootstrap"],
154 | stdout=subprocess.PIPE,
155 | stderr=subprocess.PIPE,
156 | )
157 | if result.returncode != 0:
158 | raise RuntimeError(
159 | f"Error bootstrapping buildx builder: {result.stderr.decode()}"
160 | )
161 |
162 | # Prepare the command for building the image
163 | command = ["docker", "buildx", "build"]
164 | if push:
165 | command.append("--push")
166 | command.extend(
167 | ["--platform", platforms, "--tag", tag, "--file", dockerfile_path, "."]
168 | )
169 |
170 | # Building (and optionally pushing) the Docker image
171 | result = subprocess.run(
172 | command, check=True, stdout=sys.stdout, stderr=subprocess.STDOUT
173 | )
174 | if result.returncode != 0:
175 | raise RuntimeError(
176 | f"Error building the Docker image: {result.stderr.decode()}"
177 | )
178 |
179 | print(
180 | f"Docker image tagged as {tag} has been successfully built{' and pushed' if push else ''} for platforms {platforms}."
181 | )
182 |
183 | except subprocess.CalledProcessError as e:
184 | print(f"Subprocess error: {e.stderr.decode() if e.stderr else str(e)}")
185 | raise
186 | except RuntimeError as e:
187 | print(f"Runtime error: {e}")
188 | raise
189 | except Exception as e:
190 | print(f"An unexpected error occurred: {e}")
191 | raise
192 |
193 |
194 | def tracker_addr_agent(
195 | tracker: Tracker,
196 | agent_runtime: str,
197 | ) -> str:
198 | if agent_runtime == "process":
199 | if tracker.runtime.name() == "process":
200 | return tracker.runtime.runtime_local_addr(tracker.name, tracker.owner_id)
201 | elif tracker.runtime.name() == "docker":
202 | return f"http://localhost:{tracker.port}"
203 | elif tracker.runtime.name() == "kube":
204 | port = find_open_port(9070, 9090)
205 | if not port:
206 | raise Exception("No open port found for tracker")
207 | tracker.proxy(port)
208 | return f"http://localhost:{port}"
209 | else:
210 | raise ValueError(f"Unknown agent runtime: {agent_runtime}")
211 | elif agent_runtime == "docker":
212 | if tracker.runtime.name() == "process":
213 | raise ValueError("Cannot use Docker agent with a process tracker")
214 | elif tracker.runtime.name() == "docker":
215 | return tracker.runtime.runtime_local_addr(tracker.name, tracker.owner_id)
216 | elif tracker.runtime.name() == "kube":
217 | raise ValueError("Cannot use Docker agent with a Kubernetes tracker")
218 | else:
219 | raise ValueError(f"Unknown agent runtime: {agent_runtime}")
220 | elif agent_runtime == "kube":
221 | if tracker.runtime.name() == "process":
222 | raise ValueError("Cannot use Kubernetes agent with a process tracker")
223 | elif tracker.runtime.name() == "docker":
224 | raise ValueError("Cannot use Kubernetes agent with a Docker tracker")
225 | elif tracker.runtime.name() == "kube":
226 | return tracker.runtime.runtime_local_addr(tracker.name, tracker.owner_id)
227 | else:
228 | raise ValueError(f"Unknown agent runtime: {agent_runtime}")
229 | else:
230 | raise ValueError(f"Unknown agent runtime: {agent_runtime}")
231 |
232 |
233 | def tracker_addr_local(
234 | tracker: Tracker,
235 | ) -> str:
236 | local_port = tracker.port
237 | if tracker.runtime.requires_proxy():
238 | local_port = find_open_port(9070, 10070)
239 | if not local_port:
240 | raise SystemError("No available ports found")
241 | tracker.proxy(local_port=local_port)
242 | return f"http://localhost:{local_port}"
243 |
--------------------------------------------------------------------------------
/surfkit/cli/view.py:
--------------------------------------------------------------------------------
1 | # from __future__ import annotations
2 | import atexit
3 | import os
4 | import time
5 | import urllib.parse
6 | import webbrowser
7 | from typing import Optional
8 |
9 | import docker
10 | from agentdesk.key import SSHKeyPair
11 | from agentdesk.proxy import cleanup_proxy, ensure_ssh_proxy
12 | from agentdesk.util import check_command_availability, find_open_port, get_docker_host
13 | from agentdesk.runtime.base import DesktopInstance
14 | from docker.api.client import APIClient
15 | from docker.models.containers import Container
16 |
17 | from surfkit.runtime.agent.base import AgentInstance
18 | from surfkit.runtime.agent.util import pull_image
19 |
20 | UI_IMG = "us-central1-docker.pkg.dev/agentsea-dev/guisurfer/surfkit-ui:latest"
21 |
22 |
23 | def view(
24 | desk: DesktopInstance,
25 | agent: AgentInstance,
26 | tracker_addr: str,
27 | task_id: str,
28 | background: bool = False,
29 | auth_token: Optional[str] = None,
30 | ) -> None:
31 | """Opens the desktop in a browser window"""
32 |
33 | is_container = False
34 | desk_url = "ws://localhost:6080"
35 | if desk.provider and desk.provider.type == "kube":
36 | from agentdesk.runtime.kube import KubernetesProvider, KubeConnectConfig
37 |
38 | if not desk.provider.args:
39 | raise ValueError(f"No args for kube provider while deleting {desk.id}")
40 |
41 | cfg = KubeConnectConfig.model_validate_json(desk.provider.args["cfg"])
42 | provider = KubernetesProvider(cfg=cfg)
43 |
44 | desk_port, _ = provider.proxy(desk.name)
45 | print(f"Desktop proxy created on port {desk_port}")
46 |
47 | time.sleep(2)
48 | desk_url = f"http://localhost:{desk_port}"
49 | is_container = True
50 |
51 | elif desk.provider and desk.provider.type == "docker":
52 | desk_url = f"http://localhost:{desk.vnc_port}"
53 | is_container = True
54 |
55 | elif desk.requires_proxy:
56 | keys = SSHKeyPair.find(name=desk.key_pair_name)
57 | if not keys:
58 | raise ValueError(
59 | f"No key pair found with name {desk.key_pair_name} and is required for this desktop"
60 | )
61 | key_pair = keys[0]
62 |
63 | desk_port = find_open_port(6080, 7080)
64 | if not desk_port:
65 | raise ValueError("Could not find an open port for the desktop proxy")
66 | proxy_pid = ensure_ssh_proxy(
67 | desk_port,
68 | 6080,
69 | desk.ssh_port,
70 | "agentsea",
71 | desk.addr or "",
72 | key_pair.decrypt_private_key(key_pair.private_key),
73 | )
74 | atexit.register(cleanup_proxy, proxy_pid)
75 | desk_url = f"http://localhost:{desk_port}"
76 |
77 | agent_port = agent.port
78 | agent_proxy_pid = None
79 | if agent.runtime.requires_proxy():
80 | agent_port = find_open_port(9090, 10090)
81 | print(f"proxying agent to port {agent_port}...")
82 | if not agent_port:
83 | raise ValueError("Could not find an open port for the agent proxy")
84 |
85 | agent_proxy_pid = agent.proxy(agent_port)
86 |
87 | check_command_availability("docker")
88 |
89 | host = get_docker_host()
90 | os.environ["DOCKER_HOST"] = host
91 | client = docker.from_env()
92 |
93 | host_port = None
94 | ui_container: Optional[Container] = None
95 |
96 | for container in client.containers.list():
97 | if container.image.tags and container.image.tags[0] == UI_IMG: # type: ignore
98 | print("found running UI container")
99 | # Retrieve the host port for the existing container
100 | host_port = container.attrs["NetworkSettings"]["Ports"]["3000/tcp"][0][ # type: ignore
101 | "HostPort"
102 | ]
103 | ui_container = container # type: ignore
104 | break
105 |
106 | if not ui_container:
107 | print("creating UI container...")
108 | host_port = find_open_port(9614, 9618)
109 | if not host_port:
110 | raise ValueError("Could not find an open port for the UI")
111 |
112 | api_client = APIClient()
113 | pull_image(UI_IMG, api_client)
114 | ui_container = client.containers.run( # type: ignore
115 | UI_IMG,
116 | ports={"3000/tcp": host_port},
117 | detach=True,
118 | )
119 | print("waiting for UI container to start...")
120 | time.sleep(10)
121 |
122 | encoded_agent_addr = urllib.parse.quote(f"http://localhost:{agent_port}")
123 | encoded_task_addr = urllib.parse.quote(tracker_addr)
124 | encoded_vnc_addr = urllib.parse.quote(desk_url)
125 | encoded_task_id = urllib.parse.quote(task_id)
126 |
127 | if is_container:
128 | # Construct the URL with the encoded parameters
129 | url = f"http://localhost:{host_port}/container?agentAddr={encoded_agent_addr}&vncAddr={encoded_vnc_addr}&taskAddr={encoded_task_addr}&taskID={encoded_task_id}"
130 | else:
131 | url = f"http://localhost:{host_port}/?agentAddr={encoded_agent_addr}&vncAddr={encoded_vnc_addr}&taskAddr={encoded_task_addr}&taskID={encoded_task_id}"
132 |
133 | if auth_token:
134 | encoded_auth_token = urllib.parse.quote(auth_token)
135 | url += f"&authToken={encoded_auth_token}"
136 |
137 | webbrowser.open(url)
138 |
139 | if background:
140 | return
141 |
142 | def onexit():
143 | nonlocal proxy_pid, agent_proxy_pid
144 | print("Cleaning up resources...")
145 |
146 | # Check if the UI container still exists and stop/remove it if so
147 | if ui_container:
148 | try:
149 | container_status = client.containers.get(ui_container.id).status # type: ignore
150 | if container_status in ["running", "paused"]:
151 | print("stopping UI container...")
152 | ui_container.stop()
153 | print("removing UI container...")
154 | ui_container.remove()
155 | except docker.errors.NotFound: # type: ignore
156 | print("UI container already stopped/removed.")
157 |
158 | # Stop the SSH proxy if required and not already stopped
159 | if desk.requires_proxy and proxy_pid:
160 | try:
161 | print("stopping ssh proxy...")
162 | cleanup_proxy(proxy_pid)
163 | except Exception as e:
164 | print(f"Error stopping SSH proxy: {e}")
165 | finally:
166 | proxy_pid = None # Ensure we don't try to stop it again
167 |
168 | # Stop the agent proxy if required and not already stopped
169 | if agent.runtime.requires_proxy() and agent_proxy_pid:
170 | try:
171 | print("stopping agent proxy...")
172 | cleanup_proxy(agent_proxy_pid)
173 | except Exception as e:
174 | print(f"Error stopping agent proxy: {e}")
175 | finally:
176 | agent_proxy_pid = None
177 |
178 | atexit.register(onexit)
179 | try:
180 | while True:
181 | print(f"proxying desktop vnc '{desk.name}' to localhost:6080...")
182 | time.sleep(20)
183 | except KeyboardInterrupt:
184 | print("Keyboard interrupt received, exiting...")
185 | onexit()
186 |
--------------------------------------------------------------------------------
/surfkit/config.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 | import time
5 | from dataclasses import dataclass
6 | from typing import Optional
7 |
8 | import rootpath
9 | import yaml
10 |
11 | from .env import (
12 | AGENTSEA_AUTH_URL_ENV,
13 | AGENTSEA_HUB_API_URL_ENV,
14 | AGENTSEA_HUB_URL_ENV,
15 | NEBU_SERVER_ENV,
16 | ORIGN_SERVER_ENV,
17 | )
18 |
19 | AGENTSEA_HUB_URL = os.getenv(AGENTSEA_HUB_URL_ENV, "https://hub.agentsea.ai")
20 | AGENTSEA_HUB_API_URL = os.getenv(
21 | AGENTSEA_HUB_API_URL_ENV, "https://api.hub.agentsea.ai"
22 | )
23 | AGENTSEA_AUTH_URL = os.getenv(AGENTSEA_AUTH_URL_ENV, "https://auth.hub.agentsea.ai")
24 | ORIGN_SERVER = os.getenv(ORIGN_SERVER_ENV, "https://api.orign.sh")
25 | AGENTSEA_HOME = os.path.expanduser(os.environ.get("AGENTSEA_HOME", "~/.agentsea"))
26 | AGENTSEA_DB_DIR = os.path.expanduser(
27 | os.environ.get("AGENTSEA_DB_DIR", os.path.join(AGENTSEA_HOME, "data"))
28 | )
29 | AGENTSEA_LOG_DIR = os.path.expanduser(
30 | os.environ.get("AGENTSEA_LOG_DIR", os.path.join(AGENTSEA_HOME, "logs"))
31 | )
32 |
33 | NEBU_SERVER = os.environ.get(NEBU_SERVER_ENV, "https://api.nebulous.sh")
34 | DB_TEST = os.environ.get("AGENTSEA_DB_TEST", "false") == "true"
35 | DB_NAME = os.environ.get("SURFKIT_DB_NAME", "surfkit.db")
36 | if DB_TEST:
37 | DB_NAME = f"surfkit_test_{int(time.time())}.db" # type: ignore
38 |
39 |
40 | @dataclass
41 | class GlobalConfig:
42 | api_key: Optional[str] = None
43 | hub_address: str = AGENTSEA_HUB_URL
44 |
45 | def write(self) -> None:
46 | home = os.path.expanduser("~")
47 | dir = os.path.join(home, ".agentsea")
48 | os.makedirs(dir, exist_ok=True)
49 | path = os.path.join(dir, "config.yaml")
50 |
51 | with open(path, "w") as yaml_file:
52 | yaml.dump(self.__dict__, yaml_file)
53 | yaml_file.flush()
54 | yaml_file.close()
55 |
56 | @classmethod
57 | def read(cls) -> GlobalConfig:
58 | home = os.path.expanduser("~")
59 | dir = os.path.join(home, ".agentsea")
60 | os.makedirs(dir, exist_ok=True)
61 | path = os.path.join(dir, "config.yaml")
62 |
63 | if not os.path.exists(path):
64 | return GlobalConfig()
65 |
66 | with open(path, "r") as yaml_file:
67 | config = yaml.safe_load(yaml_file)
68 | return GlobalConfig(**config)
69 |
70 |
71 | @dataclass
72 | class Config:
73 | name: str
74 | summary: str
75 | description: str
76 |
77 | @classmethod
78 | def from_project(cls) -> Config:
79 | path = rootpath.detect()
80 | if not path:
81 | raise SystemError("could not detect root python path")
82 |
83 | config_path = os.path.join(path, "agent.yaml")
84 |
85 | if not os.path.exists(config_path):
86 | raise SystemError("could not detect agent.yaml in project root")
87 |
88 | with open(config_path, "r") as yaml_file:
89 | config = yaml.safe_load(yaml_file)
90 |
91 | return Config(
92 | name=config["name"],
93 | summary=config["summary"],
94 | description=config["description"],
95 | )
96 |
--------------------------------------------------------------------------------
/surfkit/db/conn.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | from sqlalchemy import Engine, create_engine
5 | from sqlalchemy.orm import sessionmaker
6 |
7 | from surfkit.config import AGENTSEA_DB_DIR, DB_NAME
8 |
9 | from .models import Base
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 | DB_TYPE = os.environ.get("DB_TYPE", "sqlite")
14 |
15 |
16 | def get_pg_conn() -> Engine:
17 | # Helper function to get environment variable with fallback
18 | def get_env_var(key: str) -> str:
19 | task_key = f"SURFKIT_{key}"
20 | value = os.environ.get(task_key)
21 | if value is None:
22 | value = os.environ.get(key)
23 | if value is None:
24 | raise ValueError(f"${key} must be set")
25 | return value
26 |
27 | # Retrieve environment variables with fallbacks
28 | db_user = get_env_var("DB_USER")
29 | db_pass = get_env_var("DB_PASS")
30 | db_host = get_env_var("DB_HOST")
31 | db_name = get_env_var("DB_NAME")
32 |
33 | logging.debug(f"connecting to db on postgres host '{db_host}' with db '{db_name}'")
34 | engine = create_engine(
35 | f"postgresql+psycopg2://{db_user}:{db_pass}@{db_host}/{db_name}",
36 | client_encoding="utf8",
37 | pool_pre_ping=True,
38 | pool_recycle=300
39 | )
40 |
41 | return engine
42 |
43 |
44 | def get_sqlite_conn() -> Engine:
45 | db_path = os.path.join(AGENTSEA_DB_DIR, DB_NAME)
46 | logger.debug(f"connecting to local sqlite db {db_path}")
47 | os.makedirs(AGENTSEA_DB_DIR, exist_ok=True)
48 | engine = create_engine(f"sqlite:///{db_path}")
49 | return engine
50 |
51 |
52 | if DB_TYPE == "postgres":
53 | engine = get_pg_conn()
54 | else:
55 | engine = get_sqlite_conn()
56 | SessionLocal = sessionmaker(bind=engine)
57 |
58 | Base.metadata.create_all(bind=engine)
59 |
60 |
61 | class WithDB:
62 | @staticmethod
63 | def get_db():
64 | """Get a database connection
65 |
66 | Example:
67 | ```
68 | for session in self.get_db():
69 | session.add(foo)
70 | ```
71 | """
72 | db = SessionLocal()
73 | try:
74 | yield db
75 | finally:
76 | db.close()
77 |
78 |
79 | def get_db():
80 | """Get a database connection
81 |
82 | Example:
83 | ```
84 | for session in get_db():
85 | session.add(foo)
86 | ```
87 | """
88 | db = SessionLocal()
89 | try:
90 | yield db
91 | finally:
92 | db.close()
93 |
--------------------------------------------------------------------------------
/surfkit/db/models.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 |
4 | from sqlalchemy import Boolean, Column, Float, Index, Integer, String
5 | from sqlalchemy.orm import declarative_base
6 | from sqlalchemy.inspection import inspect
7 |
8 | def to_dict(instance):
9 | return {
10 | c.key: getattr(instance, c.key)
11 | for c in inspect(instance).mapper.column_attrs
12 | }
13 |
14 | Base = declarative_base()
15 |
16 |
17 | class SkillRecord(Base):
18 | __tablename__ = "skills"
19 | __table_args__ = (Index("idx_skill_owner_id", "owner_id"),)
20 | id = Column(String, primary_key=True)
21 | owner_id = Column(String, nullable=False)
22 | name = Column(String, nullable=False)
23 | status = Column(String, nullable=False)
24 | description = Column(String, nullable=False)
25 | requirements = Column(String, nullable=True)
26 | max_steps = Column(Integer, nullable=False)
27 | review_requirements = Column(String, nullable=True)
28 | agent_type = Column(String, nullable=False)
29 | threads = Column(String, nullable=True)
30 | generating_tasks = Column(Boolean, default=False, server_default="false")
31 | example_tasks = Column(String, nullable=True)
32 | tasks = Column(String, nullable=True)
33 | public = Column(Boolean, nullable=True, default=False)
34 | min_demos = Column(Integer, nullable=False)
35 | demos_outstanding = Column(Integer, nullable=False)
36 | demo_queue_size = Column(Integer, nullable=False)
37 | kvs = Column(String, nullable=True)
38 | created = Column(Float, default=time.time)
39 | updated = Column(Float, default=time.time)
40 |
41 |
42 | class AgentTypeRecord(Base):
43 | __tablename__ = "agent_types"
44 |
45 | id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
46 | name = Column(String, unique=True, index=True)
47 | description = Column(String)
48 | kind = Column(String)
49 | cmd = Column(String)
50 | img_repo = Column(String)
51 | versions = Column(String, nullable=True)
52 | repo = Column(String, nullable=True)
53 | env_opts = Column(String)
54 | supports = Column(String)
55 | runtimes = Column(String)
56 | owner_id = Column(String)
57 | public = Column(Boolean)
58 | icon = Column(String)
59 | created = Column(Float, default=time.time)
60 | updated = Column(Float, default=time.time)
61 | resource_requests = Column(String, nullable=True)
62 | resource_limits = Column(String, nullable=True)
63 | llm_providers = Column(String, nullable=True)
64 | devices = Column(String, nullable=True)
65 | meters = Column(String, nullable=True)
66 | tags = Column(String, nullable=True)
67 | labels = Column(String, nullable=True)
68 | namespace = Column(String, nullable=True)
69 |
70 |
71 | class AgentStatusRecord(Base):
72 | __tablename__ = "agent_status"
73 |
74 | agent_id = Column(String, primary_key=True)
75 | status = Column(String)
76 | task_id = Column(String, nullable=True)
77 |
78 |
79 | class AgentInstanceRecord(Base):
80 | __tablename__ = "agent_instances"
81 |
82 | id = Column(String, primary_key=True)
83 | name = Column(String, unique=True, index=True)
84 | full_name = Column(String)
85 | type = Column(String)
86 | runtime_name = Column(String)
87 | runtime_config = Column(String)
88 | version = Column(String, nullable=True)
89 | status = Column(String)
90 | tags = Column(String, nullable=True)
91 | labels = Column(String, nullable=True)
92 | port = Column(Integer)
93 | icon = Column(String, nullable=True)
94 | owner_id = Column(String, nullable=True)
95 | created = Column(Float, default=time.time)
96 | updated = Column(Float, default=time.time)
97 |
--------------------------------------------------------------------------------
/surfkit/env.py:
--------------------------------------------------------------------------------
1 | AGENTD_ADDR_ENV = "AGENTD_ADDR"
2 | AGENTD_PRIVATE_SSH_KEY_ENV = "AGENTD_PRIVATE_SSH_KEY"
3 | AGENTSEA_HUB_URL_ENV = "AGENTSEA_HUB_URL"
4 | AGENTSEA_HUB_API_URL_ENV = "AGENTSEA_HUB_API_URL"
5 | AGENTSEA_AUTH_URL_ENV = "AGENTSEA_AUTH_URL"
6 | AGENTESEA_HUB_API_KEY_ENV = "AGENTESEA_HUB_API_KEY"
7 | NEBU_SERVER_ENV = "NEBU_SERVER"
8 | ORIGN_SERVER_ENV = "ORIGN_SERVER"
9 |
--------------------------------------------------------------------------------
/surfkit/env_opts.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Optional
3 |
4 | import typer
5 | import openai
6 | from mllm import Router
7 |
8 | from surfkit.types import AgentType
9 |
10 |
11 | def find_local_llm_keys(typ: AgentType) -> Optional[dict]:
12 |
13 | env_vars = None
14 |
15 | if typ.llm_providers and typ.llm_providers.preference:
16 | found = {}
17 | for provider_name in typ.llm_providers.preference:
18 | api_key_env = Router.provider_api_keys.get(provider_name)
19 | if not api_key_env:
20 | raise ValueError(f"no api key env for provider {provider_name}")
21 | key = os.getenv(api_key_env)
22 | if not key:
23 | print("no api key found locally for provider: ", provider_name)
24 | continue
25 |
26 | typer.echo(f"Added local API key for {provider_name}")
27 | found[api_key_env] = key
28 |
29 | if not found:
30 | raise ValueError(
31 | "no api keys found locally for any of the providers in the agent type"
32 | )
33 | env_vars = found
34 |
35 | return env_vars
36 |
37 |
38 | def is_api_key_valid(api_key: str) -> bool:
39 | client = openai.OpenAI(api_key=api_key)
40 | try:
41 | client.models.list()
42 | except openai.AuthenticationError:
43 | return False
44 | else:
45 | return True
46 |
47 |
48 | def find_llm_keys(typ: AgentType, llm_providers_local: bool) -> Optional[dict]:
49 | env_vars = None
50 | if typ.llm_providers and typ.llm_providers.preference:
51 | found = {}
52 |
53 | if llm_providers_local:
54 | found = find_local_llm_keys(typ)
55 |
56 | if found is None:
57 | found = {}
58 | else:
59 | env_vars = found
60 |
61 | if not found:
62 | typer.echo("\nThis agent requires one of the following LLM API keys:")
63 | for provider_name in typ.llm_providers.preference:
64 | api_key_env = Router.provider_api_keys.get(provider_name)
65 | if api_key_env:
66 | typer.echo(f" - {api_key_env}")
67 | typer.echo("")
68 |
69 | for provider_name in typ.llm_providers.preference:
70 | api_key_env = Router.provider_api_keys.get(provider_name)
71 | if not api_key_env:
72 | raise ValueError(f"No API key env for provider {provider_name}")
73 |
74 | if found.get(api_key_env):
75 | continue
76 |
77 | key = os.getenv(api_key_env)
78 | if not key:
79 | continue
80 |
81 | add = typer.confirm(
82 | f"Would you like to add your local API key for '{provider_name}'"
83 | )
84 | if add:
85 | found[api_key_env] = key
86 |
87 | if not found:
88 | for provider_name in typ.llm_providers.preference:
89 | while True:
90 | add = typer.confirm(
91 | f"Would you like to enter an API key for '{provider_name}'"
92 | )
93 | if add:
94 | api_key_env = Router.provider_api_keys.get(provider_name)
95 | if not api_key_env:
96 | continue
97 | response = typer.prompt(api_key_env)
98 | # TODO: validate other providers
99 | if api_key_env != "OPENAI_API_KEY":
100 | found[api_key_env] = response
101 | break
102 | if is_api_key_valid(response):
103 | found[api_key_env] = response
104 | break
105 | else:
106 | typer.echo(
107 | f"The API Key is not valid for '{provider_name}'. Please try again."
108 | )
109 | else:
110 | break
111 |
112 | if not found:
113 | raise ValueError(
114 | "No valid API keys given for any of the llm providers in the agent type"
115 | )
116 |
117 | env_vars = found
118 |
119 | return env_vars
120 |
121 |
122 | def find_env_opts(typ: AgentType, use_local: bool) -> Optional[dict]:
123 | env_vars = None
124 |
125 | for env_opt in typ.env_opts:
126 | found = {}
127 | key = os.getenv(env_opt.name)
128 | if key:
129 | if use_local:
130 | found[env_opt.name] = key
131 | typer.echo(f"Added local API key for {env_opt.name}")
132 | else:
133 | add = typer.confirm(
134 | f"Would you like to add your local API key for '{env_opt.name}'"
135 | )
136 | if add:
137 | found[env_opt.name] = key
138 |
139 | if not found:
140 | if not env_opt.required:
141 | add = typer.confirm(
142 | f"Would you like to enter an API key for '{env_opt.name}'"
143 | )
144 | if add:
145 | response = typer.prompt(f"{env_opt.name}")
146 | found[env_opt.name] = response
147 | else:
148 | response = typer.prompt(f"Please enter {env_opt.name}")
149 | found[env_opt.name] = response
150 |
151 | if found:
152 | env_vars = found
153 |
154 | return env_vars
155 |
156 |
157 | def find_envs(typ: AgentType, use_local: bool) -> Optional[dict]:
158 | env_vars = None
159 | llm_envs = find_llm_keys(typ, use_local)
160 | if llm_envs:
161 | env_vars = llm_envs
162 | envs = find_env_opts(typ, use_local)
163 | if envs:
164 | if env_vars:
165 | env_vars.update(envs)
166 | else:
167 | env_vars = envs
168 |
169 | return env_vars
170 |
--------------------------------------------------------------------------------
/surfkit/hub.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import requests
4 |
5 | from .config import AGENTSEA_AUTH_URL
6 | from .server.models import V1UserProfile
7 |
8 |
9 | class HubAuth:
10 | """The Agentsea Hub Auth"""
11 |
12 | def __init__(self, hub_auth_url: str = AGENTSEA_AUTH_URL) -> None:
13 | self.hub_auth_url = hub_auth_url
14 |
15 | def get_api_key(self, token: str) -> str:
16 | """Get the api key from the hub"""
17 |
18 | hub_key_url = f"{self.hub_auth_url}/v1/users/me/keys"
19 | headers = {"Authorization": f"Bearer {token}"}
20 | response = requests.get(hub_key_url, headers=headers)
21 | response.raise_for_status()
22 | key_data = response.json()
23 |
24 | return key_data["keys"][0]["key"]
25 |
26 | def get_user_info(self, token: str) -> V1UserProfile:
27 | """Get user info from the hub"""
28 |
29 | hub_user_url = f"{self.hub_auth_url}/v1/users/me"
30 | headers = {"Authorization": f"Bearer {token}"}
31 | response = requests.get(hub_user_url, headers=headers)
32 | response.raise_for_status()
33 | user_data = response.json()
34 |
35 | return V1UserProfile.model_validate(user_data)
36 |
--------------------------------------------------------------------------------
/surfkit/learn/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 |
4 | class Teacher(ABC):
5 |
6 | @abstractmethod
7 | def teach(self, *args, **kwargs):
8 | pass
9 |
10 |
11 | class LLMTeach(Teacher):
12 | pass
13 |
--------------------------------------------------------------------------------
/surfkit/prompt/annots.py:
--------------------------------------------------------------------------------
1 | from skillpacks.chat import (
2 | ChatRequest,
3 | ContentItem,
4 | ImageUrlContent,
5 | MessageItem,
6 | Prompt,
7 | )
8 |
9 |
10 | def create_description_text(action: str, with_image: bool = False) -> str:
11 | """
12 | Create a text prompt describing what's happened between two images.
13 | """
14 | prompt_text = (
15 | f"The first image is the before image, and the second image is the after\n"
16 | f"image of a GUI interaction. The action that occurred is: {action}. "
17 | "Can you give a task description for what was accomplished?\n"
18 | "The goal would be for an agent to look at the first image and the task "
19 | "description which would result in the second image, for example "
20 | '"click on login button" would be a good description, or '
21 | '"move mouse to be over user icon", or "type text \'good fellas\'"\n'
22 | )
23 | if with_image:
24 | prompt_text += " "
25 | return prompt_text
26 |
27 |
28 | def create_reason_text(
29 | action: str, task_description: str, with_image: bool = False
30 | ) -> str:
31 | """
32 | Create a text prompt describing the reasoning chain needed to connect an action
33 | and a desired task outcome.
34 | """
35 | prompt_text = (
36 | f"The first image is the before image, and the second image is the after\n"
37 | f"image of a GUI interaction. The action that occurred is: {action}. "
38 | "Can you give a reasoning chain for what the user would need to think\n"
39 | "through in order to take the correct action with respect to the task? "
40 | f"The current task is: {task_description}\n"
41 | )
42 | if with_image:
43 | prompt_text += " "
44 | return prompt_text
45 |
46 |
47 | def create_validation_text(
48 | action: str, task_description: str, with_image: bool = False
49 | ) -> str:
50 | """
51 | Create a text prompt asking the LLM to validate whether the action completed
52 | successfully for a given task.
53 | """
54 | prompt_text = (
55 | f"The first image is the before image, and the second image is the after\n"
56 | f"image of a GUI interaction. The action that occurred is: {action}. "
57 | "Considering the task we want to accomplish,\n"
58 | "please give me the reason why this action completed successfully or not. "
59 | f"The current task is: {task_description}\n"
60 | )
61 | if with_image:
62 | prompt_text += " "
63 | return prompt_text
64 |
65 |
66 | def create_swift_description_prompt(
67 | image1: str,
68 | image2: str,
69 | action: str,
70 | answer: str,
71 | ) -> dict:
72 | """
73 | Create a text prompt and attach images in a separate field.
74 | This might be used by a Swift client or a different consumer that
75 | expects separate 'messages' and 'images' structure.
76 | """
77 | prompt_text = create_description_text(action, with_image=True)
78 | return {
79 | "messages": [
80 | {
81 | "role": "user",
82 | "content": prompt_text,
83 | },
84 | {
85 | "role": "assistant",
86 | "content": answer,
87 | },
88 | ],
89 | "images": [image1, image2],
90 | }
91 |
92 |
93 | def create_swift_reason_prompt(
94 | image1: str,
95 | image2: str,
96 | action: str,
97 | task_description: str,
98 | answer: str,
99 | ) -> dict:
100 | prompt_text = create_reason_text(action, task_description, with_image=True)
101 | return {
102 | "messages": [
103 | {
104 | "role": "user",
105 | "content": prompt_text,
106 | },
107 | {
108 | "role": "assistant",
109 | "content": answer,
110 | },
111 | ],
112 | "images": [image1, image2],
113 | }
114 |
115 |
116 | def create_swift_validation_prompt(
117 | image1: str,
118 | image2: str,
119 | action: str,
120 | task_description: str,
121 | answer: str,
122 | ) -> dict:
123 | prompt_text = create_validation_text(action, task_description, with_image=True)
124 | return {
125 | "messages": [
126 | {
127 | "role": "user",
128 | "content": prompt_text,
129 | },
130 | {
131 | "role": "assistant",
132 | "content": answer,
133 | },
134 | ],
135 | "images": [image1, image2],
136 | }
137 |
138 |
139 | def create_orign_description_prompt(
140 | image1: str,
141 | image2: str,
142 | action: str,
143 | ) -> ChatRequest:
144 | """
145 | Create a single batch task for the OpenAI API, returning a ChatRequest
146 | describing the difference between two images based on an action.
147 | """
148 | prompt_text = create_description_text(action, with_image=False)
149 | return ChatRequest(
150 | prompt=Prompt(
151 | messages=[
152 | MessageItem(
153 | role="user",
154 | content=[
155 | ContentItem(type="text", text=prompt_text),
156 | ContentItem(
157 | type="image_url", image_url=ImageUrlContent(url=image1)
158 | ),
159 | ContentItem(
160 | type="image_url", image_url=ImageUrlContent(url=image2)
161 | ),
162 | ],
163 | )
164 | ]
165 | ),
166 | max_tokens=500,
167 | )
168 |
169 |
170 | def create_orign_reason_prompt(
171 | image1: str,
172 | image2: str,
173 | action: str,
174 | task_description: str,
175 | ) -> ChatRequest:
176 | """
177 | Create a single batch task for the OpenAI API, returning a ChatRequest
178 | that requests a reasoning chain tying the action and the task.
179 | """
180 | prompt_text = create_reason_text(action, task_description, with_image=False)
181 | return ChatRequest(
182 | prompt=Prompt(
183 | messages=[
184 | MessageItem(
185 | role="user",
186 | content=[
187 | ContentItem(type="text", text=prompt_text),
188 | ContentItem(
189 | type="image_url", image_url=ImageUrlContent(url=image1)
190 | ),
191 | ContentItem(
192 | type="image_url", image_url=ImageUrlContent(url=image2)
193 | ),
194 | ],
195 | )
196 | ]
197 | ),
198 | max_tokens=500,
199 | )
200 |
201 |
202 | def create_orign_validation_prompt(
203 | image1: str,
204 | image2: str,
205 | action: str,
206 | task_description: str,
207 | ) -> ChatRequest:
208 | """
209 | Create a single batch task for the OpenAI API, returning a ChatRequest
210 | that asks for validation on whether the action matched the desired task outcome.
211 | """
212 | prompt_text = create_validation_text(action, task_description, with_image=False)
213 | return ChatRequest(
214 | prompt=Prompt(
215 | messages=[
216 | MessageItem(
217 | role="user",
218 | content=[
219 | ContentItem(type="text", text=prompt_text),
220 | ContentItem(
221 | type="image_url", image_url=ImageUrlContent(url=image1)
222 | ),
223 | ContentItem(
224 | type="image_url", image_url=ImageUrlContent(url=image2)
225 | ),
226 | ],
227 | )
228 | ]
229 | ),
230 | max_tokens=500,
231 | )
232 |
--------------------------------------------------------------------------------
/surfkit/runtime/agent/hub.py:
--------------------------------------------------------------------------------
1 | # type: ignore
2 | from typing import Iterator, List, Optional, Type, Union
3 |
4 | from pydantic import BaseModel
5 | from taskara import V1Task
6 |
7 | from surfkit.server.models import V1AgentInstance, V1SolveTask
8 | from surfkit.types import AgentType
9 |
10 | from .base import AgentInstance, AgentRuntime
11 |
12 |
13 | class ConnectConfig(BaseModel):
14 | timeout: Optional[int] = None
15 |
16 |
17 | class HubAgentRuntime(AgentRuntime):
18 | @classmethod
19 | def name(cls) -> str:
20 | return cls.__name__
21 |
22 | @classmethod
23 | def connect_config_type(cls) -> Type[ConnectConfig]:
24 | return ConnectConfig
25 |
26 | @classmethod
27 | def connect(cls, cfg: ConnectConfig) -> "HubAgentRuntime":
28 | pass
29 |
30 | def run(
31 | self,
32 | agent_type: AgentType,
33 | name: str,
34 | version: Optional[str] = None,
35 | env_vars: Optional[dict] = None,
36 | llm_providers_local: bool = False,
37 | owner_id: Optional[str] = None,
38 | ) -> AgentInstance:
39 | pass
40 |
41 | def solve_task(
42 | self,
43 | agent_name: str,
44 | task: V1SolveTask,
45 | follow_logs: bool = False,
46 | attach: bool = False,
47 | ) -> None:
48 | pass
49 |
50 | def list(self) -> List[AgentInstance]:
51 | pass
52 |
53 | def get(self, name: str) -> AgentInstance:
54 | pass
55 |
56 | def proxy(
57 | self,
58 | name: str,
59 | local_port: Optional[int] = None,
60 | pod_port: int = 8000,
61 | background: bool = True,
62 | ) -> None:
63 | pass
64 |
65 | def delete(self, name: str) -> None:
66 | pass
67 |
68 | def clean(self) -> None:
69 | pass
70 |
71 | def logs(self, name: str, follow: bool = False) -> Union[str, Iterator[str]]:
72 | """
73 | Fetches the logs from the specified pod.
74 |
75 | Parameters:
76 | name (str): The name of the pod.
77 |
78 | Returns:
79 | str: The logs from the pod.
80 | """
81 | pass
82 |
83 | def runtime_local_addr(self, name: str, owner_id: Optional[str] = None) -> str:
84 | """
85 | Returns the local address of the agent with respect to the runtime
86 | """
87 | instances = AgentInstance.find(name=name, owner_id=owner_id)
88 | if not instances:
89 | raise ValueError(f"No instances found for name '{name}'")
90 | instance = instances[0]
91 | pass
92 |
--------------------------------------------------------------------------------
/surfkit/runtime/agent/load.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Type
2 |
3 | from pydantic import BaseModel
4 |
5 | from surfkit.server.models import V1RuntimeConnect
6 |
7 | from .base import AgentRuntime
8 | from .docker import DockerAgentRuntime, DockerConnectConfig
9 | from .kube import KubeAgentRuntime, KubeConnectConfig
10 | from .process import ProcessAgentRuntime, ProcessConnectConfig
11 |
12 |
13 | class AgentRuntimeConfig(BaseModel):
14 | provider: Optional[str] = None
15 | docker_config: Optional[DockerConnectConfig] = None
16 | kube_config: Optional[KubeConnectConfig] = None
17 | process_config: Optional[ProcessConnectConfig] = None
18 | preference: List[str] = ["kube", "docker", "process"]
19 |
20 |
21 | def runtime_from_name(name: str) -> Type[AgentRuntime]:
22 | for runt in RUNTIMES:
23 | if runt.name() == name:
24 | return runt
25 | raise ValueError(f"Unknown runtime '{name}'")
26 |
27 |
28 | def load_agent_runtime(cfg: AgentRuntimeConfig) -> AgentRuntime:
29 | for pref in cfg.preference:
30 | if pref == KubeAgentRuntime.name() and cfg.kube_config:
31 | return KubeAgentRuntime.connect(cfg.kube_config)
32 | elif pref == DockerAgentRuntime.name() and cfg.docker_config:
33 | return DockerAgentRuntime.connect(cfg.docker_config)
34 | elif pref == ProcessAgentRuntime.name() and cfg.process_config:
35 | return ProcessAgentRuntime.connect(cfg.process_config)
36 | raise ValueError(f"Unknown provider: {cfg.provider}")
37 |
38 |
39 | RUNTIMES: List[Type[AgentRuntime]] = [DockerAgentRuntime, KubeAgentRuntime, ProcessAgentRuntime] # type: ignore
40 |
41 |
42 | def load_from_connect(connect: V1RuntimeConnect) -> AgentRuntime:
43 | for runt in RUNTIMES:
44 | if connect.name == runt.name():
45 | print("connect config: ", connect.connect_config)
46 | print("type: ", type(connect.connect_config))
47 | cfg = runt.connect_config_type().model_validate(connect.connect_config)
48 | return runt.connect(cfg)
49 |
50 | raise ValueError(f"Unknown runtime: {connect.name}")
51 |
--------------------------------------------------------------------------------
/surfkit/runtime/agent/util.py:
--------------------------------------------------------------------------------
1 | import random
2 | import string
3 |
4 | from docker.api.client import APIClient
5 | from docker.errors import APIError
6 | from namesgenerator import get_random_name
7 | from tqdm import tqdm
8 |
9 | from surfkit.types import AgentType
10 |
11 |
12 | def instance_name(type: AgentType) -> str:
13 | random_string = "".join(random.choices(string.ascii_letters + string.digits, k=5))
14 | random_name = get_random_name("-")
15 | if not random_name:
16 | raise ValueError("Could not generate a random name")
17 | name_parts = random_name.split("-")
18 | if len(name_parts) != 2:
19 | raise ValueError("Could not generate a random name with 2 parts")
20 | name_only = name_parts[1]
21 |
22 | return f"{type.name.lower()}-{name_only.lower()}-{random_string.lower()}"
23 |
24 |
25 | def pull_image(img: str, api_client: APIClient):
26 | """
27 | Pulls a Docker image with progress bars for each layer.
28 |
29 | Args:
30 | img (str): The Docker image to pull.
31 | api_client (APIClient): The Docker API client.
32 | """
33 |
34 | print(f"Pulling Docker image '{img}'...")
35 |
36 | progress_bars = {}
37 | layers = {}
38 |
39 | try:
40 | for line in api_client.pull(img, stream=True, decode=True):
41 | if "id" in line and "progressDetail" in line:
42 | layer_id = line["id"]
43 | progress_detail = line["progressDetail"]
44 | current = progress_detail.get("current", 0)
45 | total = progress_detail.get("total", 0)
46 |
47 | if total:
48 | if layer_id not in layers:
49 | progress_bars[layer_id] = tqdm(
50 | total=total,
51 | desc=f"Layer {layer_id}",
52 | leave=False,
53 | ncols=100,
54 | )
55 | layers[layer_id] = 0
56 |
57 | layers[layer_id] = current
58 | progress_bars[layer_id].n = current
59 | progress_bars[layer_id].refresh()
60 | elif "status" in line and "id" in line:
61 | print(f"Status update for {line['id']}: {line['status']}")
62 | elif "error" in line:
63 | raise APIError(line["error"])
64 |
65 | except APIError as e:
66 | print(f"Error pulling Docker image: {e.explanation}")
67 | except Exception as e:
68 | print(f"An unexpected error occurred: {str(e)}")
69 | finally:
70 | # Close all progress bars
71 | for bar in progress_bars.values():
72 | bar.n = bar.total # Ensure the progress bar is full before closing
73 | bar.refresh()
74 | bar.close()
75 |
76 | print("")
77 |
--------------------------------------------------------------------------------
/surfkit/runtime/container/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Generic, List, Optional, Tuple, Type, TypeVar
3 |
4 | from pydantic import BaseModel
5 |
6 | R = TypeVar("R", bound="ContainerRuntime")
7 | C = TypeVar("C", bound="BaseModel")
8 |
9 |
10 | class ContainerRuntime(Generic[C, R], ABC):
11 |
12 | @classmethod
13 | def name(cls) -> str:
14 | return cls.__name__
15 |
16 | @classmethod
17 | @abstractmethod
18 | def connect_config_type(cls) -> Type[C]:
19 | pass
20 |
21 | @classmethod
22 | @abstractmethod
23 | def connect(cls, cfg: C) -> R:
24 | pass
25 |
26 | @abstractmethod
27 | def create(
28 | self,
29 | image: str,
30 | name: Optional[str] = None,
31 | env_vars: Optional[dict] = None,
32 | mem_request: Optional[str] = "500m",
33 | mem_limit: Optional[str] = "2Gi",
34 | cpu_request: Optional[str] = "1",
35 | cpu_limit: Optional[str] = "4",
36 | gpu_mem: Optional[str] = None,
37 | ) -> None:
38 | pass
39 |
40 | @abstractmethod
41 | def call(
42 | self,
43 | name: str,
44 | path: str,
45 | method: str,
46 | port: int = 8080,
47 | data: Optional[dict] = None,
48 | headers: Optional[dict] = None,
49 | ) -> Tuple[int, str]:
50 | pass
51 |
52 | @abstractmethod
53 | def delete(self, name: str) -> None:
54 | pass
55 |
56 | @abstractmethod
57 | def list(self) -> List[str]:
58 | pass
59 |
60 | @abstractmethod
61 | def clean(self) -> None:
62 | pass
63 |
64 | @abstractmethod
65 | def logs(self, name: str) -> str:
66 | pass
67 |
--------------------------------------------------------------------------------
/surfkit/runtime/container/docker.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Tuple, Type, TypeVar
2 |
3 | import docker
4 | import requests
5 | from agentdesk.util import find_open_port
6 | from docker.errors import NotFound
7 | from namesgenerator import get_random_name
8 | from pydantic import BaseModel
9 |
10 | from .base import ContainerRuntime
11 |
12 |
13 | class ConnectConfig(BaseModel):
14 | pass
15 |
16 |
17 | class DockerRuntime(ContainerRuntime):
18 | """A container runtime that uses docker"""
19 |
20 | def __init__(self) -> None:
21 | self.client = docker.from_env()
22 |
23 | @classmethod
24 | def name(cls) -> str:
25 | return "docker"
26 |
27 | @classmethod
28 | def connect_config_type(cls) -> Type[ConnectConfig]:
29 | return ConnectConfig
30 |
31 | @classmethod
32 | def connect(cls, cfg: ConnectConfig) -> "DockerRuntime":
33 | return cls()
34 |
35 | def create(
36 | self,
37 | image: str,
38 | name: Optional[str] = None,
39 | env_vars: Optional[dict] = None,
40 | mem_request: Optional[str] = "500m",
41 | mem_limit: Optional[str] = "2Gi",
42 | cpu_request: Optional[str] = "1",
43 | cpu_limit: Optional[str] = "4",
44 | gpu_mem: Optional[str] = None,
45 | ) -> None:
46 | if not name:
47 | name = get_random_name("-")
48 |
49 | labels = {
50 | "provisioner": "surfkit",
51 | }
52 |
53 | port = find_open_port(9090, 10090)
54 | print("running container")
55 | container = self.client.containers.run(
56 | image,
57 | network_mode="host",
58 | environment=env_vars,
59 | detach=True,
60 | labels=labels,
61 | name=name,
62 | mem_limit=mem_limit,
63 | mem_reservation=mem_request,
64 | nano_cpus=int(float(cpu_limit) * 1e9), # type: ignore
65 | )
66 | if container and type(container) != bytes:
67 | print(f"ran container '{container.id}'") # type: ignore
68 |
69 | def call(
70 | self,
71 | name: str,
72 | path: str,
73 | method: str,
74 | port: int = 8080,
75 | data: Optional[dict] = None,
76 | headers: Optional[dict] = None,
77 | ) -> Tuple[int, str]:
78 | """
79 | Makes an HTTP request to the specified container.
80 |
81 | Parameters:
82 | name (str): The name of the container.
83 | route (str): The endpoint route (e.g., 'api/data').
84 | method (str): HTTP method (e.g., 'GET', 'POST').
85 | port (int): The port on which the container's server is listening.
86 | params (dict, optional): The URL parameters for GET or DELETE requests.
87 | body (dict, optional): The JSON body for POST, PUT requests.
88 | headers (dict, optional): HTTP headers.
89 |
90 | Returns:
91 | requests.Response: The HTTP response.
92 | """
93 | try:
94 | container = self.client.containers.get(name)
95 | print(f"Container '{name}' found.")
96 | except NotFound:
97 | print(f"Container '{name}' does not exist.")
98 | raise
99 | except Exception as e:
100 | print(f"An unexpected error occurred calling docker container: {e}")
101 | raise
102 |
103 | url = f"http://localhost:{port}{path}"
104 |
105 | # Dynamically calling the method based on 'method' parameter
106 | http_request = getattr(requests, method.lower(), requests.get)
107 |
108 | if not callable(http_request):
109 | raise ValueError(f"Unsupported HTTP method: {method}")
110 |
111 | if method.upper() in ["GET", "DELETE"]: # These methods should use params
112 | response = http_request(url, params=data, headers=headers)
113 | return response.status_code, response.text
114 | else:
115 | response = http_request(url, json=data, headers=headers)
116 | return response.status_code, response.text
117 |
118 | def list(self) -> List[str]:
119 | label_filter = {"label": ["provisioner=surfkit"]}
120 | containers = self.client.containers.list(filters=label_filter)
121 |
122 | container_names_or_ids = [container.name for container in containers] # type: ignore
123 |
124 | return container_names_or_ids
125 |
126 | def delete(self, name: str) -> None:
127 | try:
128 | # Attempt to get the container by name
129 | container = self.client.containers.get(name)
130 |
131 | # If found, remove the container
132 | container.remove(force=True) # type: ignore
133 | print(f"Successfully deleted container: {name}")
134 | except NotFound:
135 | # Handle the case where the container does not exist
136 | raise
137 | except Exception as e:
138 | # Handle other potential errors
139 | print(f"Failed to delete container '{name}': {e}")
140 |
141 | def clean(self) -> None:
142 | # Define the filter for containers with the specific label
143 | label_filter = {"label": ["provisioner=surfkit"]}
144 |
145 | # Use the filter to list containers
146 | containers = self.client.containers.list(filters=label_filter, all=True)
147 |
148 | # Initialize a list to keep track of deleted container names or IDs
149 | deleted_containers = []
150 |
151 | for container in containers:
152 | try:
153 | container_name_or_id = (
154 | container.name # type: ignore
155 | ) # or container.id for container ID
156 | container.remove(force=True) # type: ignore
157 | print(f"Deleted container: {container_name_or_id}")
158 | deleted_containers.append(container_name_or_id)
159 | except Exception as e:
160 | print(f"Failed to delete container: {e}")
161 |
162 | return
163 |
164 | def logs(self, name: str) -> str:
165 | """
166 | Fetches the logs from the specified container.
167 |
168 | Parameters:
169 | name (str): The name of the container.
170 |
171 | Returns:
172 | str: The logs from the container.
173 | """
174 | try:
175 | container = self.client.containers.get(name)
176 | return container.logs().decode("utf-8") # type: ignore
177 | except NotFound:
178 | print(f"Container '{name}' does not exist.")
179 | raise
180 | except Exception as e:
181 | print(f"An unexpected error occurred retrieving logs: {e}")
182 | raise
183 |
--------------------------------------------------------------------------------
/surfkit/runtime/container/kube.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import json
3 | import os
4 | from typing import Literal, List, Optional, Tuple, Type
5 |
6 | import httpx
7 | from google.auth.transport.requests import Request
8 | from google.cloud import container_v1
9 | from google.oauth2 import service_account
10 | from kubernetes import client, config
11 | from kubernetes.client.rest import ApiException
12 | from namesgenerator import get_random_name
13 | from pydantic import BaseModel
14 | from tenacity import retry, stop_after_attempt
15 |
16 | from .base import ContainerRuntime
17 |
18 |
19 | class GKEOpts(BaseModel):
20 | cluster_name: str
21 | region: str
22 | service_account_json: str
23 |
24 |
25 | class LocalOpts(BaseModel):
26 | path: Optional[str] = os.getenv("KUBECONFIG", os.path.expanduser("~/.kube/config"))
27 |
28 |
29 | class ConnectConfig(BaseModel):
30 | provider: Literal["gke", "local"] = "local"
31 | namespace: str = "default"
32 | gke_opts: Optional[GKEOpts] = None
33 | local_opts: Optional[LocalOpts] = None
34 | branch: Optional[str] = None
35 |
36 |
37 | def gke_opts_from_env(
38 | gke_sa_json=os.getenv("GKE_SA_JSON"),
39 | cluster_name=os.getenv("CLUSTER_NAME"),
40 | region=os.getenv("CLUSTER_REGION"),
41 | ) -> GKEOpts:
42 | if not gke_sa_json:
43 | raise ValueError("GKE_SA_JSON not set")
44 | if not cluster_name:
45 | raise ValueError("CLUSTER_NAME not set")
46 | if not region:
47 | raise ValueError("CLUSTER_REGION not set")
48 | return GKEOpts(
49 | service_account_json=gke_sa_json,
50 | cluster_name=cluster_name,
51 | region=region,
52 | )
53 |
54 |
55 | class KubernetesRuntime(ContainerRuntime):
56 | """A container runtime that uses Kubernetes to manage Pods directly"""
57 |
58 | def __init__(self, cfg: ConnectConfig) -> None:
59 | self.cfg = cfg or ConnectConfig()
60 |
61 | self.kubeconfig = None
62 | if cfg.provider == "gke":
63 | opts = cfg.gke_opts
64 | if not opts:
65 | raise ValueError("GKE opts missing")
66 | self.connect_to_gke(opts)
67 | elif cfg.provider == "local":
68 | opts = cfg.local_opts
69 | if not opts:
70 | opts = LocalOpts()
71 | if opts.path:
72 | config.load_kube_config(opts.path)
73 | self.kubeconfig = opts.path
74 | else:
75 | raise ValueError("Unsupported provider: " + cfg.provider)
76 |
77 | self.core_api = client.CoreV1Api()
78 |
79 | self.namespace = cfg.namespace
80 |
81 | self.branch = cfg.branch
82 |
83 | @classmethod
84 | def name(cls) -> str:
85 | return "kube"
86 |
87 | def create(
88 | self,
89 | image: str,
90 | name: Optional[str] = None,
91 | env_vars: Optional[dict] = None,
92 | mem_request: Optional[str] = "500m",
93 | mem_limit: Optional[str] = "2Gi",
94 | cpu_request: Optional[str] = "1",
95 | cpu_limit: Optional[str] = "4",
96 | gpu_mem: Optional[str] = None,
97 | ) -> None:
98 | if not name:
99 | name = get_random_name("-")
100 |
101 | labels = {"provisioner": "surfkit"}
102 |
103 | # Define resource requirements
104 | resources = client.V1ResourceRequirements(
105 | requests={"memory": mem_request, "cpu": cpu_request},
106 | limits={"memory": mem_limit, "cpu": cpu_limit},
107 | )
108 |
109 | if gpu_mem:
110 | if "limits" in resources: # type: ignore
111 | resources.limits["nvidia.com/gpu"] = gpu_mem # type: ignore
112 | else:
113 | resources.limits = {"nvidia.com/gpu": gpu_mem}
114 |
115 | # Define the container with environment variables
116 | env_list = []
117 | if env_vars:
118 | for key, value in env_vars.items():
119 | env_list.append(client.V1EnvVar(name=key, value=value))
120 |
121 | container = client.V1Container(
122 | name=name,
123 | image=image,
124 | ports=[client.V1ContainerPort(container_port=8080)],
125 | resources=resources,
126 | env=env_list,
127 | )
128 |
129 | # Create a Pod specification
130 | pod_spec = client.V1PodSpec(
131 | containers=[container],
132 | restart_policy="Never", # 'Always' if you want the pod to restart on failure
133 | )
134 |
135 | # Create the Pod
136 | pod = client.V1Pod(
137 | api_version="v1",
138 | kind="Pod",
139 | metadata=client.V1ObjectMeta(name=name, labels=labels),
140 | spec=pod_spec,
141 | )
142 |
143 | # Launch the Pod
144 | print("Creating pod")
145 | try:
146 | self.core_api.create_namespaced_pod(namespace="default", body=pod)
147 | print(f"Pod created. name='{name}'")
148 | except ApiException as e:
149 | print(f"Exception when creating pod: {e}")
150 |
151 | @classmethod
152 | def connect_config_type(cls) -> Type[ConnectConfig]:
153 | return ConnectConfig
154 |
155 | @classmethod
156 | def connect(cls, cfg: ConnectConfig) -> "KubernetesRuntime":
157 | return cls(cfg)
158 |
159 | @retry(stop=stop_after_attempt(15))
160 | def connect_to_gke(self, opts: GKEOpts) -> Tuple[client.CoreV1Api, str, str]:
161 | """
162 | Sets up and returns a configured Kubernetes client (CoreV1Api) and cluster details.
163 |
164 | Returns:
165 | Tuple containing the Kubernetes CoreV1Api client object, the project ID, and the cluster name.
166 | """
167 | service_account_info = json.loads(opts.service_account_json)
168 | credentials = service_account.Credentials.from_service_account_info(
169 | service_account_info,
170 | scopes=["https://www.googleapis.com/auth/cloud-platform"],
171 | )
172 |
173 | # Setup GKE client to get cluster information
174 | gke_service = container_v1.ClusterManagerClient(credentials=credentials)
175 | project_id = service_account_info.get("project_id")
176 | if not project_id or not opts.cluster_name or not opts.region:
177 | raise ValueError(
178 | "Missing project_id, cluster_name, or region in credentials or metadata"
179 | )
180 |
181 | print("\nK8s getting cluster...")
182 | cluster_request = container_v1.GetClusterRequest(
183 | name=f"projects/{project_id}/locations/{opts.region}/clusters/{opts.cluster_name}"
184 | )
185 | cluster = gke_service.get_cluster(request=cluster_request)
186 |
187 | # Configure Kubernetes client
188 | print("\nK8s getting token...")
189 | ca_cert = base64.b64decode(cluster.master_auth.cluster_ca_certificate)
190 | try:
191 | print("\nK8s refreshing token...")
192 | credentials.refresh(Request())
193 | except Exception as e:
194 | print("\nK8s token refresh failed: ", e)
195 | raise e
196 | access_token = credentials.token
197 | print("\nK8s got token: ", access_token)
198 |
199 | cluster_name = opts.cluster_name
200 |
201 | kubeconfig = {
202 | "apiVersion": "v1",
203 | "kind": "Config",
204 | "clusters": [
205 | {
206 | "name": cluster_name,
207 | "cluster": {
208 | "server": f"https://{cluster.endpoint}",
209 | "certificate-authority-data": base64.b64encode(
210 | ca_cert
211 | ).decode(),
212 | },
213 | }
214 | ],
215 | "contexts": [
216 | {
217 | "name": cluster_name,
218 | "context": {
219 | "cluster": cluster_name,
220 | "user": cluster_name,
221 | },
222 | }
223 | ],
224 | "current-context": cluster_name,
225 | "users": [
226 | {
227 | "name": cluster_name,
228 | "user": {
229 | "token": access_token,
230 | },
231 | }
232 | ],
233 | }
234 |
235 | config.load_kube_config_from_dict(config_dict=kubeconfig)
236 | v1_client = client.CoreV1Api()
237 | print("\nK8s returning client...")
238 |
239 | return v1_client, project_id, cluster_name
240 |
241 | @retry(stop=stop_after_attempt(15))
242 | def call(
243 | self,
244 | name: str,
245 | path: str,
246 | method: str,
247 | port: int = 8080,
248 | data: Optional[dict] = None,
249 | headers: Optional[dict] = None,
250 | ) -> Tuple[int, str]:
251 | data = data or {}
252 | headers = headers or {}
253 |
254 | workload_proxy_url = os.getenv("WORKLOAD_PROXY_URL")
255 | if workload_proxy_url is not None:
256 | print("Using workload proxy at", workload_proxy_url)
257 | client_cert = os.getenv("WORKLOAD_PROXY_CLIENT_CERT")
258 | client_key = os.getenv("WORKLOAD_PROXY_CLIENT_KEY")
259 | ca_cert = os.getenv("WORKLOAD_PROXY_CA_CERT")
260 |
261 | workload_proxy_client = httpx.Client(
262 | verify=ca_cert, cert=(client_cert, client_key)
263 | )
264 |
265 | merged_headers = {
266 | **headers,
267 | "X-Pod-Name": name,
268 | "X-Namespace": self.cfg.namespace,
269 | "X-Port": str(port),
270 | }
271 | else:
272 | print("Using direct connection to workload service")
273 | workload_proxy_client = httpx.Client()
274 | merged_headers = headers
275 | workload_proxy_url = (
276 | f"http://{name}.{self.cfg.namespace}.svc.cluster.local:{port}"
277 | )
278 |
279 | json_data = None if method == "GET" else data
280 | query_parameters = ""
281 | if method == "GET" and data:
282 | query_parameters = "?" + "&".join([f"{k}={v}" for k, v in data.items()])
283 |
284 | url = f"{workload_proxy_url.rstrip('/')}/{path.lstrip('/')}" + query_parameters
285 |
286 | print("Method: ", method)
287 | print("URL: ", url)
288 | print("Headers: ", merged_headers)
289 | print("JSON Data: ", json_data)
290 |
291 | r = workload_proxy_client.request(
292 | method=method, url=url, headers=merged_headers, json=json_data
293 | )
294 |
295 | return r.status_code, r.text
296 |
297 | def logs(self, name: str) -> str:
298 | """
299 | Fetches the logs from the specified pod.
300 |
301 | Parameters:
302 | name (str): The name of the pod.
303 |
304 | Returns:
305 | str: The logs from the pod.
306 | """
307 | try:
308 | return self.core_api.read_namespaced_pod_log(name=name, namespace="default")
309 | except ApiException as e:
310 | print(f"Failed to get logs for pod '{name}': {e}")
311 | raise
312 |
313 | def list(self) -> List[str]:
314 | pods = self.core_api.list_namespaced_pod(
315 | namespace="default", label_selector="provisioner=surfkit"
316 | )
317 | return [pod.metadata.name for pod in pods.items]
318 |
319 | def delete(self, name: str) -> None:
320 | try:
321 | # Delete the pod
322 | self.core_api.delete_namespaced_pod(
323 | name=name,
324 | namespace="default",
325 | body=client.V1DeleteOptions(grace_period_seconds=5),
326 | )
327 | print(f"Successfully deleted pod: {name}")
328 | except ApiException as e:
329 | print(f"Failed to delete pod '{name}': {e}")
330 | raise
331 |
332 | def clean(self) -> None:
333 | pods = self.core_api.list_namespaced_pod(
334 | namespace="default", label_selector="provisioner=surfkit"
335 | )
336 | for pod in pods.items:
337 | try:
338 | self.core_api.delete_namespaced_pod(
339 | name=pod.metadata.name,
340 | namespace="default",
341 | body=client.V1DeleteOptions(grace_period_seconds=5),
342 | )
343 | print(f"Deleted pod: {pod.metadata.name}")
344 | except ApiException as e:
345 | print(f"Failed to delete pod '{pod.metadata.name}': {e}")
346 |
--------------------------------------------------------------------------------
/surfkit/runtime/container/load.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from pydantic import BaseModel
4 |
5 | from .base import ContainerRuntime
6 | from .docker import ConnectConfig as DockerConnectConfig
7 | from .docker import DockerRuntime
8 | from .kube import ConnectConfig as KubeConnectConfig
9 | from .kube import KubernetesRuntime
10 |
11 |
12 | class ContainerRuntimeConfig(BaseModel):
13 | provider: Optional[str] = None
14 | docker_config: Optional[DockerConnectConfig] = None
15 | kube_config: Optional[KubeConnectConfig] = None
16 | preference: List[str] = ["kube", "docker"]
17 |
18 |
19 | def load_container_runtime(cfg: ContainerRuntimeConfig) -> ContainerRuntime:
20 | if cfg.provider == KubernetesRuntime.name():
21 | if not cfg.kube_config:
22 | raise ValueError("Kubernetes config is required")
23 | return KubernetesRuntime.connect(cfg.kube_config)
24 |
25 | elif cfg.provider == DockerRuntime.name():
26 | if not cfg.docker_config:
27 | raise ValueError("Docker config is required")
28 | return DockerRuntime.connect(cfg.docker_config)
29 |
30 | else:
31 | raise ValueError(f"Unknown provider: {cfg.provider}")
32 |
--------------------------------------------------------------------------------
/surfkit/runtime/vm/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 |
4 | class VM:
5 | pass
6 |
7 |
8 | class VMRuntime(ABC):
9 | """A virtual machine runntime"""
10 |
11 | @abstractmethod
12 | def create(self) -> VM:
13 | pass
14 |
15 | @abstractmethod
16 | def delete(self, name: str) -> None:
17 | pass
18 |
19 | @abstractmethod
20 | def stop(self, name: str) -> None:
21 | pass
22 |
23 | @abstractmethod
24 | def start(self, name: str) -> None:
25 | pass
26 |
--------------------------------------------------------------------------------
/surfkit/server/models.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, List, Optional
2 |
3 | from pydantic import BaseModel, Field
4 | from taskara import V1Task
5 | from taskara.review import V1ReviewRequirement
6 | from threadmem import V1RoleThread
7 |
8 |
9 | class V1Action(BaseModel):
10 | """An action"""
11 |
12 | name: str
13 | parameters: Dict[str, Any]
14 |
15 |
16 | class V1ActionSelection(BaseModel):
17 | """An action selection from the model"""
18 |
19 | observation: str
20 | reason: str
21 | action: V1Action
22 |
23 |
24 | class V1DeviceConfig(BaseModel):
25 | name: str
26 | provision: bool = False
27 |
28 |
29 | class V1DevicesConfig(BaseModel):
30 | preference: List[V1DeviceConfig] = []
31 |
32 |
33 | class V1Runtime(BaseModel):
34 | type: str
35 | preference: List[str] = []
36 |
37 |
38 | class V1ResourceLimits(BaseModel):
39 | cpu: str = "2"
40 | memory: str = "2Gi"
41 |
42 |
43 | class V1ResourceRequests(BaseModel):
44 | cpu: str = "1"
45 | memory: str = "500m"
46 | gpu: Optional[str] = None
47 |
48 |
49 | class V1EnvVarOpt(BaseModel):
50 | name: str
51 | description: Optional[str] = None
52 | required: bool = False
53 | default: Optional[str] = None
54 | secret: bool = False
55 | options: List[str] = []
56 |
57 |
58 | class V1LLMProviders(BaseModel):
59 | preference: List[str] = []
60 |
61 |
62 | class V1Agent(BaseModel):
63 | name: str
64 | config: Dict[str, Any]
65 |
66 |
67 | class V1SolveTask(BaseModel):
68 | task: V1Task
69 | agent: Optional[V1Agent] = None
70 |
71 |
72 | class V1CreateTask(BaseModel):
73 | task: V1Task
74 | agent: Optional[V1Agent] = None
75 |
76 |
77 | class V1Meter(BaseModel):
78 | name: str
79 | unit: str
80 | cost: float
81 | description: Optional[str] = None
82 |
83 |
84 | class V1RuntimeConnect(BaseModel):
85 | name: str
86 | connect_config: BaseModel
87 |
88 |
89 | class V1AgentType(BaseModel):
90 | version: Optional[str] = None
91 | kind: Optional[str] = None
92 | id: Optional[str] = None
93 | name: str
94 | description: str
95 | cmd: str
96 | owner_id: Optional[str] = None
97 | repo: Optional[str] = None
98 | img_repo: Optional[str] = None
99 | versions: Optional[Dict[str, str]] = None
100 | env_opts: List[V1EnvVarOpt] = []
101 | supports: List[str] = []
102 | runtimes: List[V1Runtime] = []
103 | created: Optional[float] = None
104 | updated: Optional[float] = None
105 | public: bool = False
106 | icon: Optional[str] = None
107 | resource_requests: V1ResourceRequests = V1ResourceRequests()
108 | resource_limits: V1ResourceLimits = V1ResourceLimits()
109 | llm_providers: Optional[V1LLMProviders] = None
110 | devices: List[V1DeviceConfig] = []
111 | meters: List[V1Meter] = []
112 | tags: List[str] = []
113 | labels: Dict[str, str] = {}
114 | namespace: Optional[str] = None
115 |
116 |
117 | class V1AgentTypes(BaseModel):
118 | types: List[V1AgentType]
119 |
120 |
121 | class V1AgentInstance(BaseModel):
122 | name: str
123 | type: V1AgentType
124 | runtime: V1RuntimeConnect
125 | version: Optional[str] = None
126 | port: int = 9090
127 | labels: Dict[str, str] = {}
128 | tags: List[str] = []
129 | status: str
130 | owner_id: Optional[str] = None
131 | icon: Optional[str] = None
132 | created: float
133 | updated: float
134 |
135 |
136 | class V1AgentInstances(BaseModel):
137 | instances: List[V1AgentInstance]
138 |
139 |
140 | class V1Find(BaseModel):
141 | args: dict = {}
142 |
143 |
144 | class V1CreateAgentType(BaseModel):
145 | id: str
146 | name: str
147 | description: str
148 | image: str
149 | env_opts: List[V1EnvVarOpt] = []
150 | supported_runtimes: List[str] = []
151 | versions: Dict[str, str] = {}
152 | public: bool = False
153 | icon: Optional[str] = None
154 | tags: List[str] = []
155 | labels: Dict[str, str] = {}
156 |
157 |
158 | class V1Work:
159 | remote: str
160 | check_interval: int
161 |
162 |
163 | class V1UserProfile(BaseModel):
164 | email: Optional[str] = None
165 | display_name: Optional[str] = None
166 | handle: Optional[str] = None
167 | picture: Optional[str] = None
168 | created: Optional[int] = None
169 | updated: Optional[int] = None
170 | token: Optional[str] = None
171 |
172 |
173 | class V1Meta(BaseModel):
174 | id: str
175 | tags: List[str] = []
176 | labels: Dict[str, str] = {}
177 | owner_id: Optional[str] = None
178 | created: float
179 | updated: float
180 |
181 |
182 | class UserTasks(BaseModel):
183 | """A list of tasks for a user story"""
184 |
185 | tasks: List[str] = Field(description="A list of tasks for a user story")
186 |
187 |
188 | class UserTask(BaseModel):
189 | """A task for a user story"""
190 |
191 | task: str = Field(description="A task for a user story")
192 |
193 |
194 | class V1Skill(BaseModel):
195 | id: str
196 | name: str
197 | description: str
198 | requirements: List[str]
199 | max_steps: int
200 | review_requirements: List[V1ReviewRequirement]
201 | tasks: List[V1Task]
202 | example_tasks: List[str]
203 | threads: List[V1RoleThread] = []
204 | status: Optional[str] = None
205 | min_demos: Optional[int] = None
206 | demos_outstanding: Optional[int] = None
207 | demo_queue_size: Optional[int] = None
208 | owner_id: Optional[str] = None
209 | generating_tasks: Optional[bool] = None
210 | agent_type: str
211 | kvs: Optional[Dict[str, Any]] = None
212 | remote: Optional[str] = None
213 | created: int
214 | updated: int
215 |
216 |
217 | class SkillsWithGenTasks(BaseModel):
218 | skill_id: str
219 | in_queue_count: int
220 | tasks_needed: int
221 |
222 |
223 | class V1UpdateSkill(BaseModel):
224 | name: Optional[str] = None
225 | description: Optional[str] = None
226 | requirements: Optional[List[str]] = None
227 | max_steps: Optional[int] = None
228 | review_requirements: Optional[List[V1ReviewRequirement]] = None
229 | tasks: Optional[List[str]] = None
230 | example_tasks: Optional[List[str]] = None
231 | threads: Optional[List[str]] = None
232 | status: Optional[str] = None
233 | min_demos: Optional[int] = None
234 | demos_outstanding: Optional[int] = None
235 | demo_queue_size: Optional[int] = None
236 | kvs: Optional[Dict[str, Any]] = None
237 |
238 |
239 | class V1SetKey(BaseModel):
240 | key: str
241 | value: str
242 |
243 |
244 | class V1LearnSkill(BaseModel):
245 | skill_id: str
246 | remote: Optional[str] = None
247 | agent: Optional[V1Agent] = None
248 |
249 |
250 | class V1LearnTask(BaseModel):
251 | task: V1Task
252 | remote: Optional[str] = None
253 | agent: Optional[V1Agent] = None
254 |
--------------------------------------------------------------------------------
/surfkit/server/routes.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import time
4 | from typing import Annotated, Optional, Type
5 |
6 | import requests
7 | from agentcore.models import V1UserProfile
8 | from fastapi import APIRouter, BackgroundTasks, Depends
9 | from taskara import Task, TaskStatus
10 | from taskara.server.models import V1Task, V1Tasks, V1TaskUpdate
11 | from tenacity import retry, stop_after_attempt, wait_fixed
12 |
13 | from surfkit.agent import TaskAgent
14 | from surfkit.auth.transport import get_user_dependency
15 | from surfkit.env import AGENTESEA_HUB_API_KEY_ENV
16 | from surfkit.server.models import V1Agent, V1LearnTask, V1SolveTask
17 | from surfkit.skill import Skill
18 |
19 | DEBUG_ENV_VAR = os.getenv("DEBUG", "false").lower() == "true"
20 | log_level = logging.DEBUG if DEBUG_ENV_VAR else logging.INFO
21 | logging.basicConfig(level=log_level)
22 | logger = logging.getLogger(__name__)
23 |
24 |
25 | def task_router(Agent: Type[TaskAgent]) -> APIRouter:
26 | """API router for a task agent.
27 |
28 | Args:
29 | Agent (Type[TaskAgent]): Task agent type.
30 |
31 | Returns:
32 | APIRouter: An APIRouter for the task agent.
33 | """
34 |
35 | api_router = APIRouter()
36 |
37 | @api_router.get("/")
38 | async def root():
39 | return {"message": f"{Agent.name()} in the shell"}
40 |
41 | @api_router.get("/health")
42 | async def health():
43 | return {"status": "ok"}
44 |
45 | @api_router.post("/v1/learn")
46 | async def learn_task(
47 | current_user: Annotated[V1UserProfile, Depends(get_user_dependency())],
48 | background_tasks: BackgroundTasks,
49 | learn_model: V1LearnTask,
50 | ):
51 | task_model = learn_model.task
52 | logger.info(
53 | f"learning task: {task_model.model_dump()} with user {current_user.model_dump()}"
54 | )
55 |
56 | found = Task.find(
57 | remote=task_model.remote,
58 | id=task_model.id,
59 | owner_id=task_model.owner_id,
60 | auth_token=task_model.auth_token,
61 | )
62 | if not found:
63 | raise Exception(f"Task {task_model.id} not found")
64 |
65 | logger.info(f"found task: {found[0].to_v1().model_dump()}")
66 |
67 | task = found[0]
68 | task.remote = task_model.remote # type: ignore
69 | task.auth_token = task_model.auth_token # type: ignore
70 |
71 | skill_id = None
72 | if task.skill:
73 | skill_id = task.skill
74 | elif "skill" in task.labels:
75 | skill_id = task.labels["skill"]
76 | elif "skill_id" in task.labels:
77 | skill_id = task.labels["skill_id"]
78 | else:
79 | raise ValueError("Task skill or skill label not set")
80 |
81 | logger.info(f"finding skill_id: {skill_id}")
82 | skills = Skill.find(
83 | id=skill_id, remote=task.remote, token=task_model.auth_token
84 | )
85 | if not skills:
86 | raise ValueError(f"Skill not found: {skill_id}")
87 | skill = skills[0]
88 | logger.info(f"skill: {skill.to_v1().model_dump()}")
89 |
90 | background_tasks.add_task(
91 | _learn_task, task, skill, current_user, learn_model.agent
92 | )
93 |
94 | def _learn_task(
95 | task: Task,
96 | skill: Skill,
97 | current_user: V1UserProfile,
98 | v1_agent: Optional[V1Agent] = None,
99 | ):
100 | if v1_agent:
101 | config = Agent.config_type().model_validate(v1_agent.config)
102 | agent = Agent.from_config(config=config)
103 | else:
104 | agent = Agent.default()
105 |
106 | print(f"agent: {agent}", flush=True)
107 |
108 | if not task.remote or not task.auth_token:
109 | raise ValueError("Task remote and auth token must be set")
110 |
111 | try:
112 | print(f"labeling task as training: {task.id}", flush=True)
113 | _label_task(
114 | task.remote, task.auth_token, task, "foo/train/status", "training"
115 | )
116 | print("labeled task as training", flush=True)
117 | agent.learn_task(task, skill)
118 | print(f"labeling task as finished: {task.id}", flush=True)
119 | _label_task(
120 | task.remote, task.auth_token, task, "foo/train/status", "finished"
121 | )
122 | print("labeled task as finished", flush=True)
123 | except Exception as e:
124 | logger.error(f"error learning task: {e}")
125 | print(f"labeling task as error: {task.id}", flush=True)
126 | _label_task(task.remote, task.auth_token, task, "foo/train/status", "error")
127 | _label_task(task.remote, task.auth_token, task, "foo/train/error", str(e))
128 | print("labeled task as error", flush=True)
129 |
130 | @api_router.post("/v1/tasks")
131 | async def solve_task(
132 | current_user: Annotated[V1UserProfile, Depends(get_user_dependency())],
133 | background_tasks: BackgroundTasks,
134 | task_model: V1SolveTask,
135 | ):
136 | logger.info(
137 | f"solving task: {task_model.model_dump()} with user {current_user.email}"
138 | )
139 |
140 | background_tasks.add_task(_solve_task, task_model, current_user)
141 | logger.info("created background task...")
142 | return
143 |
144 | def _solve_task(task_model: V1SolveTask, current_user: V1UserProfile):
145 | owner_id = task_model.task.owner_id
146 | if not owner_id:
147 | owner_id = "local"
148 | task = Task.from_v1(
149 | task_model.task, owner_id=owner_id, auth_token=task_model.task.auth_token
150 | )
151 |
152 | logger.info("Saving remote tasks status to running...")
153 | task.status = TaskStatus.IN_PROGRESS
154 | task.started = time.time()
155 | task.save()
156 |
157 | if task_model.task.device:
158 | logger.info(f"connecting to device {task_model.task.device.name}...")
159 | device = None
160 | for Device in Agent.supported_devices():
161 | if Device.type() == task_model.task.device.type:
162 | logger.debug(f"found device: {task_model.task.device.model_dump()}")
163 | api_key = task_model.task.auth_token
164 | if api_key is None:
165 | logger.info("No Api key/token on Task or in Auth")
166 |
167 | try:
168 | config = Device.connect_config_type()(
169 | **{**task_model.task.device.config, "api_key": api_key} # type: ignore
170 | )
171 | device = Device.connect(config=config)
172 | except Exception as e:
173 | err = f"error connecting to device: {e}"
174 | task.error = err
175 | task.status = TaskStatus.ERROR
176 | task.save()
177 | raise Exception(err)
178 |
179 | if not device:
180 | raise ValueError(
181 | f"Device {task_model.task.device.name} provided in solve task, but not supported by agent"
182 | )
183 |
184 | logger.debug(f"connected to device: {device.__dict__}")
185 | else:
186 | raise ValueError("No device provided")
187 |
188 | logger.info("starting agent...")
189 | if task_model.agent:
190 | config = Agent.config_type().model_validate(task_model.agent.config)
191 | agent = Agent.from_config(config=config)
192 | else:
193 | agent = Agent.default()
194 |
195 | try:
196 | final_task = agent.solve_task(
197 | task=task, device=device, max_steps=task.max_steps
198 | )
199 |
200 | except Exception as e:
201 | logger.error(f"error running agent: {e}")
202 |
203 | task.refresh()
204 | task.status = TaskStatus.FAILED
205 | task.error = str(e)
206 | task.completed = time.time()
207 | task.save()
208 | task.post_message(
209 | "assistant", f"Failed to run task '{task.description}': {e}"
210 | )
211 | return
212 |
213 | finally:
214 | print(f"► task run ended '{task.id}'", flush=True)
215 |
216 | if final_task:
217 | final_task.refresh()
218 | final_task.completed = time.time()
219 | final_task.save()
220 |
221 | @api_router.get("/v1/tasks", response_model=V1Tasks)
222 | async def get_tasks(
223 | current_user: Annotated[V1UserProfile, Depends(get_user_dependency())],
224 | ):
225 | tasks = Task.find()
226 | return V1Tasks(tasks=[task.to_v1() for task in tasks])
227 |
228 | @api_router.get("/v1/tasks/{id}", response_model=V1Task)
229 | async def get_task(
230 | current_user: Annotated[V1UserProfile, Depends(get_user_dependency())], id: str
231 | ):
232 | tasks = Task.find(id=id)
233 | if not tasks:
234 | raise Exception(f"Task {id} not found")
235 | return tasks[0].to_v1()
236 |
237 | @api_router.put("/v1/tasks/{id}", response_model=V1Task)
238 | async def put_task(
239 | current_user: Annotated[V1UserProfile, Depends(get_user_dependency())],
240 | id: str,
241 | data: V1TaskUpdate,
242 | ):
243 | tasks = Task.find(id=id)
244 | if not tasks:
245 | raise Exception(f"Task {id} not found")
246 | task = tasks[0]
247 | if data.status:
248 | task.status = TaskStatus(data.status)
249 | logging.info("updated task status to: ", task.status)
250 | task.save()
251 | return task.to_v1()
252 |
253 | return api_router
254 |
255 |
256 | @retry(stop=stop_after_attempt(10), wait=wait_fixed(10))
257 | def get_remote_task(id: str, owner_id: str, server: str, auth_token: str) -> Task:
258 | HUB_API_KEY = os.environ.get(AGENTESEA_HUB_API_KEY_ENV)
259 | if not HUB_API_KEY:
260 | raise Exception(f"${AGENTESEA_HUB_API_KEY_ENV} not set")
261 |
262 | logger.debug(f"connecting to remote task: {id} key: {HUB_API_KEY}")
263 | try:
264 | tasks = Task.find(
265 | id=id,
266 | remote=server,
267 | owner_id=owner_id,
268 | auth_token=auth_token,
269 | )
270 | if not tasks:
271 | raise Exception(f"Task {id} not found")
272 | logger.debug(f"got remote task: {tasks[0].__dict__}")
273 | return tasks[0]
274 | except Exception as e:
275 | logger.error(f"error getting remote task: {e}")
276 | raise e
277 |
278 |
279 | def _label_task(remote: str, token: str, task: Task, key: str, value: str) -> None:
280 | """Label a task as trained
281 |
282 | Args:
283 | task (Task): The task
284 | """
285 | update = V1TaskUpdate(
286 | set_labels={key: value},
287 | )
288 | resp = requests.put(
289 | f"{remote}/v1/tasks/{task.id}",
290 | json=update.model_dump(),
291 | headers={"Authorization": f"Bearer {token}"},
292 | )
293 | resp.raise_for_status()
294 |
--------------------------------------------------------------------------------
/surfkit/util.py:
--------------------------------------------------------------------------------
1 | import json
2 | import re
3 | import socket
4 | from datetime import datetime
5 | from typing import Any, Dict, Optional
6 |
7 |
8 | def extract_parse_json(input_str: str) -> Dict[str, Any]:
9 | """
10 | Extracts and parses a JSON object from the input string using regex if it is tagged with 'json\n'
11 | and enclosed in backticks, otherwise returns the input string.
12 |
13 | :param input_str: A string that may contain a JSON object.
14 | :return: A dictionary if JSON is parsed, otherwise the original string.
15 | """
16 | # Regex to match 'json\n{...}' pattern enclosed in backticks
17 | match = re.search(r"```json\n([\s\S]+?)\n```", input_str)
18 | if match:
19 | json_str = match.group(1) # Extract the JSON string
20 | try:
21 | return json.loads(json_str)
22 | except json.JSONDecodeError:
23 | raise
24 | else:
25 | return json.loads(input_str)
26 |
27 |
28 | def find_open_port(start_port: int = 1024, end_port: int = 65535) -> Optional[int]:
29 | """Finds an open port on the machine"""
30 | for port in range(start_port, end_port + 1):
31 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
32 | try:
33 | s.bind(("", port))
34 | return port # Port is open
35 | except socket.error:
36 | continue # Port is in use, try the next one
37 | return None # No open port found
38 |
39 |
40 | def convert_unix_to_datetime(unix_timestamp: int) -> str:
41 | dt = datetime.utcfromtimestamp(unix_timestamp)
42 | friendly_format = dt.strftime("%Y-%m-%d %H:%M:%S")
43 | return friendly_format
44 |
--------------------------------------------------------------------------------
/ui/surfkit/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 |
8 | # testing
9 | /coverage
10 |
11 | # production
12 | /build
13 |
14 | # misc
15 | .DS_Store
16 | .env.local
17 | .env.development.local
18 | .env.test.local
19 | .env.production.local
20 |
21 | npm-debug.log*
22 | yarn-debug.log*
23 | yarn-error.log*
24 |
--------------------------------------------------------------------------------
/ui/surfkit/Dockerfile:
--------------------------------------------------------------------------------
1 | # Step 1: Use an official Node.js runtime as a parent image
2 | FROM node:latest
3 |
4 | # Step 2: Set the working directory in the container
5 | WORKDIR /usr/src/app
6 |
7 | # Step 3: Copy the package.json files and install dependencies
8 | COPY package*.json ./
9 | RUN npm install
10 |
11 | # Step 4: Bundle app source inside Docker image
12 | COPY . .
13 |
14 | # Step 5: Your app binds to port 3000, so you'll use the EXPOSE instruction to have it mapped by the docker daemon
15 | EXPOSE 3000
16 |
17 | # Step 6: Define the command to run your app
18 | CMD ["npm", "start"]
19 |
--------------------------------------------------------------------------------
/ui/surfkit/Makefile:
--------------------------------------------------------------------------------
1 | IMAGE_REPO := us-central1-docker.pkg.dev/agentsea-dev/guisurfer/surfkit-ui
2 | TAG := latest
3 |
4 |
5 | .PHONY: serve
6 | serve:
7 | npm start
8 |
9 | .PHONY: build-img
10 | build-img:
11 | docker buildx build --platform linux/amd64,linux/arm64 --push -t ${IMAGE_REPO}:${TAG} .
12 |
13 | .PHONY: push-img
14 | push-img:
15 | docker push ${IMAGE_REPO}:${TAG}
16 |
17 | # .PHONY: deploy
18 | # deploy:
19 | # helm install -n hub -f ./deploy/helm/agentsea-hub-ui/values.yaml hubapi ./deploy/helm/agentsea-hub-api
20 |
21 | # .PHONY: package-chart
22 | # package-chart:
23 | # rm -rf ./pkg
24 | # mkdir -p ./pkg
25 | # helm package ./deploy/helm/agentsea-hub-ui -d ./pkg
26 |
27 | # .PHONY: push-chart
28 | # push-chart: package-chart
29 | # $(eval CHART_FILE=$(shell ls ./pkg | head -n1))
30 | # helm push ./pkg/$(CHART_FILE) oci://us-central1-docker.pkg.dev/agentsea-dev/hub
31 | # rm -rf ./pkg
32 |
33 | # .PHONY: release
34 | # release: build-img push-chart
--------------------------------------------------------------------------------
/ui/surfkit/README.md:
--------------------------------------------------------------------------------
1 | # Getting Started with Create React App
2 |
3 | This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
4 |
5 | ## Available Scripts
6 |
7 | In the project directory, you can run:
8 |
9 | ### `npm start`
10 |
11 | Runs the app in the development mode.\
12 | Open [http://localhost:3000](http://localhost:3000) to view it in your browser.
13 |
14 | The page will reload when you make changes.\
15 | You may also see any lint errors in the console.
16 |
17 | ### `npm test`
18 |
19 | Launches the test runner in the interactive watch mode.\
20 | See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
21 |
22 | ### `npm run build`
23 |
24 | Builds the app for production to the `build` folder.\
25 | It correctly bundles React in production mode and optimizes the build for the best performance.
26 |
27 | The build is minified and the filenames include the hashes.\
28 | Your app is ready to be deployed!
29 |
30 | See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
31 |
32 | ### `npm run eject`
33 |
34 | **Note: this is a one-way operation. Once you `eject`, you can't go back!**
35 |
36 | If you aren't satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
37 |
38 | Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you're on your own.
39 |
40 | You don't have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn't feel obligated to use this feature. However we understand that this tool wouldn't be useful if you couldn't customize it when you are ready for it.
41 |
42 | ## Learn More
43 |
44 | You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
45 |
46 | To learn React, check out the [React documentation](https://reactjs.org/).
47 |
48 | ### Code Splitting
49 |
50 | This section has moved here: [https://facebook.github.io/create-react-app/docs/code-splitting](https://facebook.github.io/create-react-app/docs/code-splitting)
51 |
52 | ### Analyzing the Bundle Size
53 |
54 | This section has moved here: [https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size](https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size)
55 |
56 | ### Making a Progressive Web App
57 |
58 | This section has moved here: [https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app](https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app)
59 |
60 | ### Advanced Configuration
61 |
62 | This section has moved here: [https://facebook.github.io/create-react-app/docs/advanced-configuration](https://facebook.github.io/create-react-app/docs/advanced-configuration)
63 |
64 | ### Deployment
65 |
66 | This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment)
67 |
68 | ### `npm run build` fails to minify
69 |
70 | This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify)
71 |
--------------------------------------------------------------------------------
/ui/surfkit/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "surfkit",
3 | "version": "0.0.0",
4 | "private": true,
5 | "dependencies": {
6 | "@fortawesome/fontawesome-svg-core": "^6.5.2",
7 | "@fortawesome/react-fontawesome": "^0.2.2",
8 | "@heroicons/react": "^2.1.3",
9 | "@material-tailwind/react": "2.1.8",
10 | "react": "18.2.0",
11 | "react-dom": "18.2.0",
12 | "react-router-dom": "^6.23.1",
13 | "react-scripts": "5.0.1",
14 | "react-vnc": "^1.0.0"
15 | },
16 | "scripts": {
17 | "start": "react-scripts start",
18 | "build": "react-scripts build",
19 | "test": "react-scripts test",
20 | "eject": "react-scripts eject"
21 | },
22 | "eslintConfig": {
23 | "extends": [
24 | "react-app",
25 | "react-app/jest"
26 | ]
27 | },
28 | "browserslist": {
29 | "production": [
30 | ">0.2%",
31 | "not dead",
32 | "not op_mini all"
33 | ],
34 | "development": [
35 | "last 1 chrome version",
36 | "last 1 firefox version",
37 | "last 1 safari version"
38 | ]
39 | },
40 | "devDependencies": {
41 | "tailwindcss": "3.4.0"
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/ui/surfkit/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agentsea/surfkit/8708abd995d45cad5e0a94a9606d6ada6b8a94dd/ui/surfkit/public/favicon.ico
--------------------------------------------------------------------------------
/ui/surfkit/public/favicon_a2.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agentsea/surfkit/8708abd995d45cad5e0a94a9606d6ada6b8a94dd/ui/surfkit/public/favicon_a2.ico
--------------------------------------------------------------------------------
/ui/surfkit/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
12 |
13 |
17 |
18 |
27 | Surfkit
28 |
29 |
30 | You need to enable JavaScript to run this app.
31 |
32 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/ui/surfkit/public/logo.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ui/surfkit/public/logo192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agentsea/surfkit/8708abd995d45cad5e0a94a9606d6ada6b8a94dd/ui/surfkit/public/logo192.png
--------------------------------------------------------------------------------
/ui/surfkit/public/logo512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agentsea/surfkit/8708abd995d45cad5e0a94a9606d6ada6b8a94dd/ui/surfkit/public/logo512.png
--------------------------------------------------------------------------------
/ui/surfkit/public/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "short_name": "React App",
3 | "name": "Create React App Sample",
4 | "icons": [
5 | {
6 | "src": "favicon.ico",
7 | "sizes": "64x64 32x32 24x24 16x16",
8 | "type": "image/x-icon"
9 | },
10 | {
11 | "src": "logo192.png",
12 | "type": "image/png",
13 | "sizes": "192x192"
14 | },
15 | {
16 | "src": "logo512.png",
17 | "type": "image/png",
18 | "sizes": "512x512"
19 | }
20 | ],
21 | "start_url": ".",
22 | "display": "standalone",
23 | "theme_color": "#000000",
24 | "background_color": "#ffffff"
25 | }
26 |
--------------------------------------------------------------------------------
/ui/surfkit/public/robots.txt:
--------------------------------------------------------------------------------
1 | # https://www.robotstxt.org/robotstxt.html
2 | User-agent: *
3 | Disallow:
4 |
--------------------------------------------------------------------------------
/ui/surfkit/src/App.js:
--------------------------------------------------------------------------------
1 | // App.js
2 | import React from "react";
3 | import { BrowserRouter as Router } from "react-router-dom";
4 | import AppRoutes from "./server/Routes";
5 |
6 | function App() {
7 | return (
8 |
13 | );
14 | }
15 |
16 | export default App;
17 |
--------------------------------------------------------------------------------
/ui/surfkit/src/api/Tasks.js:
--------------------------------------------------------------------------------
1 | export async function getTasks(addr, token) {
2 | const url = new URL(`/v1/tasks`, addr);
3 | try {
4 | const headers = {
5 | "Content-Type": "application/json",
6 | };
7 | if (token) {
8 | headers["Authorization"] = `Bearer ${token}`;
9 | }
10 | const resp = await fetch(url, {
11 | method: "GET",
12 | cache: "no-cache",
13 | headers: headers,
14 | redirect: "follow",
15 | });
16 | if (!resp.ok) {
17 | throw new Error("HTTP status " + resp.status);
18 | }
19 | const data = await resp.json();
20 | return data.tasks;
21 | } catch (error) {
22 | console.error("Failed to list tasks", error);
23 | }
24 | }
25 |
26 | export async function getTask(addr, id, token) {
27 | const url = new URL(`/v1/tasks/${id}`, addr);
28 | try {
29 | const headers = {
30 | "Content-Type": "application/json",
31 | };
32 | if (token) {
33 | headers["Authorization"] = `Bearer ${token}`;
34 | }
35 | const resp = await fetch(url, {
36 | method: "GET",
37 | cache: "no-cache",
38 | headers: headers,
39 | redirect: "follow",
40 | });
41 | if (!resp.ok) {
42 | throw new Error("HTTP status " + resp.status);
43 | }
44 | const data = await resp.json();
45 | return data;
46 | } catch (error) {
47 | console.error("Failed to get task", error);
48 | }
49 | }
50 |
51 | export async function updateTask(addr, id, bodyData, token) {
52 | console.log("updating task with id: ", id);
53 | console.log("bodyData: ", bodyData);
54 | console.log("addr: ", addr);
55 | const url = new URL(`/v1/tasks/${id}`, addr);
56 | console.log("updating tasks with URL: ", url);
57 |
58 | try {
59 | const headers = {
60 | "Content-Type": "application/json",
61 | };
62 | if (token) {
63 | headers["Authorization"] = `Bearer ${token}`;
64 | }
65 | const resp = await fetch(url, {
66 | method: "PUT",
67 | cache: "no-cache",
68 | headers: headers,
69 | redirect: "follow",
70 | body: JSON.stringify(bodyData),
71 | });
72 | if (!resp.ok) {
73 | throw new Error("HTTP status " + resp.status);
74 | }
75 | console.log("Updated task successfully");
76 | const data = await resp.json();
77 | console.log("Got update tasks response: ", data);
78 | return data.tasks;
79 | } catch (error) {
80 | console.error("Failed to update task", error);
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/ui/surfkit/src/api/agentd.js:
--------------------------------------------------------------------------------
1 | export async function getHealth(addr) {
2 | const url = new URL("/health", addr);
3 | try {
4 | const response = await fetch(url);
5 | if (!response.ok) {
6 | throw new Error("HTTP status " + response.status);
7 | }
8 | return await response.json();
9 | } catch (error) {
10 | console.error("Failed to fetch health", error);
11 | }
12 | }
13 |
14 | export async function getMouseCoordinates(addr) {
15 | const url = new URL("/mouse_coordinates", addr);
16 | try {
17 | const response = await fetch(url);
18 | if (!response.ok) {
19 | throw new Error("HTTP status " + response.status);
20 | }
21 | return await response.json();
22 | } catch (error) {
23 | console.error("Failed to fetch mouse coordinates", error);
24 | }
25 | }
26 |
27 | export async function openUrl(addr, urlToOpen) {
28 | const url = new URL("/open_url", addr);
29 | try {
30 | const response = await fetch(url, {
31 | method: "POST",
32 | headers: {
33 | "Content-Type": "application/json",
34 | },
35 | body: JSON.stringify({ url: urlToOpen }),
36 | });
37 | if (!response.ok) {
38 | throw new Error("HTTP status " + response.status);
39 | }
40 | return await response.json();
41 | } catch (error) {
42 | console.error("Failed to open URL", error);
43 | }
44 | }
45 |
46 | export async function moveMouseTo(addr, x, y, duration, tween) {
47 | const url = new URL("/move_mouse_to", addr);
48 | try {
49 | const response = await fetch(url, {
50 | method: "POST",
51 | headers: {
52 | "Content-Type": "application/json",
53 | },
54 | body: JSON.stringify({ x, y, duration, tween }),
55 | });
56 | if (!response.ok) {
57 | throw new Error("HTTP status " + response.status);
58 | }
59 | return await response.json();
60 | } catch (error) {
61 | console.error("Failed to move mouse", error);
62 | }
63 | }
64 |
65 | export async function click(addr, button) {
66 | const url = new URL("/click", addr);
67 | try {
68 | const response = await fetch(url, {
69 | method: "POST",
70 | headers: {
71 | "Content-Type": "application/json",
72 | },
73 | body: JSON.stringify({ button }),
74 | });
75 | if (!response.ok) {
76 | throw new Error("HTTP status " + response.status);
77 | }
78 | return await response.json();
79 | } catch (error) {
80 | console.error("Failed to click", error);
81 | }
82 | }
83 |
84 | export async function doubleClick(addr) {
85 | const url = new URL("/double_click", addr);
86 | try {
87 | const response = await fetch(url, {
88 | method: "POST",
89 | headers: {
90 | "Content-Type": "application/json",
91 | },
92 | });
93 | if (!response.ok) {
94 | throw new Error("HTTP status " + response.status);
95 | }
96 | return await response.json();
97 | } catch (error) {
98 | console.error("Failed to double click", error);
99 | }
100 | }
101 |
102 | export async function typeText(addr, text, minInterval, maxInterval) {
103 | const url = new URL("/type_text", addr);
104 | try {
105 | const response = await fetch(url, {
106 | method: "POST",
107 | headers: {
108 | "Content-Type": "application/json",
109 | },
110 | body: JSON.stringify({ text, minInterval, maxInterval }),
111 | });
112 | if (!response.ok) {
113 | throw new Error("HTTP status " + response.status);
114 | }
115 | return await response.json();
116 | } catch (error) {
117 | console.error("Failed to type text", error);
118 | }
119 | }
120 |
121 | export async function pressKey(addr, key) {
122 | const url = new URL("/press_key", addr);
123 | try {
124 | const response = await fetch(url, {
125 | method: "POST",
126 | headers: {
127 | "Content-Type": "application/json",
128 | },
129 | body: JSON.stringify({ key }),
130 | });
131 | if (!response.ok) {
132 | throw new Error("HTTP status " + response.status);
133 | }
134 | return await response.json();
135 | } catch (error) {
136 | console.error("Failed to press key", error);
137 | }
138 | }
139 |
140 | export async function scroll(addr, clicks) {
141 | const url = new URL("/scroll", addr);
142 | try {
143 | const response = await fetch(url, {
144 | method: "POST",
145 | headers: {
146 | "Content-Type": "application/json",
147 | },
148 | body: JSON.stringify({ clicks }),
149 | });
150 | if (!response.ok) {
151 | throw new Error("HTTP status " + response.status);
152 | }
153 | return await response.json();
154 | } catch (error) {
155 | console.error("Failed to scroll", error);
156 | }
157 | }
158 |
159 | export async function dragMouse(addr, x, y) {
160 | const url = new URL("/drag_mouse", addr);
161 | try {
162 | const response = await fetch(url, {
163 | method: "POST",
164 | headers: {
165 | "Content-Type": "application/json",
166 | },
167 | body: JSON.stringify({ x, y }),
168 | });
169 | if (!response.ok) {
170 | throw new Error("HTTP status " + response.status);
171 | }
172 | return await response.json();
173 | } catch (error) {
174 | console.error("Failed to drag mouse", error);
175 | }
176 | }
177 |
178 | export async function takeScreenshot(addr) {
179 | const url = new URL("/screenshot", addr);
180 | try {
181 | const response = await fetch(url, {
182 | method: "POST",
183 | headers: {
184 | "Content-Type": "application/json",
185 | },
186 | });
187 | if (!response.ok) {
188 | throw new Error("HTTP status " + response.status);
189 | }
190 | return await response.json();
191 | } catch (error) {
192 | console.error("Failed to take screenshot", error);
193 | }
194 | }
195 |
196 | export async function listRecordings(addr) {
197 | const url = new URL("/recordings", addr);
198 | try {
199 | const response = await fetch(url);
200 | if (!response.ok) {
201 | throw new Error("HTTP status " + response.status);
202 | }
203 | return await response.json();
204 | } catch (error) {
205 | console.error("Failed to list recordings", error);
206 | }
207 | }
208 |
209 | export async function stopRecording(addr, sessionId) {
210 | const url = new URL(`/recordings/${sessionId}/stop`, addr);
211 | try {
212 | const response = await fetch(url, {
213 | method: "POST",
214 | headers: {
215 | "Content-Type": "application/json",
216 | },
217 | });
218 | if (!response.ok) {
219 | throw new Error("HTTP status " + response.status);
220 | }
221 | return await response.json();
222 | } catch (error) {
223 | console.error("Failed to stop recording", error);
224 | }
225 | }
226 |
227 | export async function getRecording(addr, sessionId) {
228 | const url = new URL(`/recordings/${sessionId}`, addr);
229 | try {
230 | const response = await fetch(url);
231 | if (!response.ok) {
232 | throw new Error("HTTP status " + response.status);
233 | }
234 | return await response.json();
235 | } catch (error) {
236 | console.error("Failed to get recording", error);
237 | }
238 | }
239 |
240 | export async function startRecording(addr, description) {
241 | const url = new URL("/recordings", addr);
242 | console.log("starting recording at URL: ", url);
243 |
244 | try {
245 | const resp = await fetch(url, {
246 | method: "POST",
247 | cache: "no-cache",
248 | headers: {
249 | "Content-Type": "application/json",
250 | },
251 | body: JSON.stringify({ description }),
252 | redirect: "follow",
253 | });
254 | if (!resp.ok) {
255 | throw new Error("HTTP status " + resp.status);
256 | }
257 | const data = await resp.json();
258 | console.log("start recording data: ", data);
259 | return data;
260 | } catch (error) {
261 | console.error("Failed to start recording", error);
262 | }
263 | }
264 |
265 | export async function getEvent(addr, sessionId, eventId) {
266 | const url = new URL(`/recordings/${sessionId}/event/${eventId}`, addr);
267 | try {
268 | const response = await fetch(url);
269 | if (!response.ok) {
270 | throw new Error("HTTP status " + response.status);
271 | }
272 | return await response.json();
273 | } catch (error) {
274 | console.error("Failed to get event", error);
275 | }
276 | }
277 |
278 | export async function deleteEvent(addr, sessionId, eventId) {
279 | const url = new URL(`/recordings/${sessionId}/event/${eventId}`, addr);
280 | try {
281 | const response = await fetch(url, {
282 | method: "DELETE",
283 | });
284 | if (!response.ok) {
285 | throw new Error("HTTP status " + response.status);
286 | }
287 | return await response.json();
288 | } catch (error) {
289 | console.error("Failed to delete event", error);
290 | }
291 | }
292 |
293 | export async function listActiveSessions(addr) {
294 | const url = new URL("/active_sessions", addr);
295 | try {
296 | const response = await fetch(url);
297 | if (!response.ok) {
298 | throw new Error("HTTP status " + response.status);
299 | }
300 | return await response.json();
301 | } catch (error) {
302 | console.error("Failed to list active sessions", error);
303 | }
304 | }
305 |
306 | export async function getActions(addr, sessionId) {
307 | const url = new URL(`/recordings/${sessionId}/actions`, addr);
308 |
309 | try {
310 | const response = await fetch(url, {
311 | method: "GET",
312 | headers: {
313 | "Content-Type": "application/json",
314 | },
315 | });
316 | if (!response.ok) {
317 | throw new Error("HTTP status " + response.status);
318 | }
319 | const data = await response.json();
320 | console.log("Actions data:", data);
321 | return data;
322 | } catch (error) {
323 | console.error("Failed to fetch actions data", error);
324 | }
325 | }
326 |
--------------------------------------------------------------------------------
/ui/surfkit/src/components/Layout.js:
--------------------------------------------------------------------------------
1 | import { Nav } from "./Nav";
2 |
3 | export default function Layout({ children }) {
4 | return (
5 |
6 |
7 |
8 |
9 | {children}
10 |
11 | );
12 | }
13 |
--------------------------------------------------------------------------------
/ui/surfkit/src/components/Nav.js:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import {
3 | Navbar,
4 | Collapse,
5 | Typography,
6 | IconButton,
7 | } from "@material-tailwind/react";
8 | import { Bars3Icon, XMarkIcon } from "@heroicons/react/24/outline";
9 |
10 | function NavList() {
11 | return (
12 |
40 | );
41 | }
42 |
43 | export function Nav() {
44 | const [openNav, setOpenNav] = React.useState(false);
45 |
46 | const handleWindowResize = () =>
47 | window.innerWidth >= 960 && setOpenNav(false);
48 |
49 | React.useEffect(() => {
50 | window.addEventListener("resize", handleWindowResize);
51 |
52 | return () => {
53 | window.removeEventListener("resize", handleWindowResize);
54 | };
55 | }, []);
56 |
57 | return (
58 |
59 |
60 |
65 |
66 | {/*
72 | SurfKit
73 | */}
74 |
75 |
76 |
77 |
setOpenNav(!openNav)}
82 | >
83 | {openNav ? (
84 |
85 | ) : (
86 |
87 | )}
88 |
89 |
90 |
91 |
92 |
93 |
94 | );
95 | }
96 |
--------------------------------------------------------------------------------
/ui/surfkit/src/components/Recording.js:
--------------------------------------------------------------------------------
1 | import { useState } from "react";
2 | import { Typography } from "@material-tailwind/react";
3 | import { XCircleIcon } from "@heroicons/react/24/outline";
4 |
5 | import { deleteEvent } from "../api/agentd";
6 |
7 | function EditableEvent({ data, onDelete }) {
8 | const renderEventData = () => {
9 | switch (data.type) {
10 | case "scroll":
11 | return ;
12 | case "click":
13 | return ;
14 | case "key":
15 | return ;
16 | case "text":
17 | return ;
18 | default:
19 | return null;
20 | }
21 | };
22 |
23 | return (
24 |
25 | {/* Delete button with X icon */}
26 |
27 |
28 |
29 |
30 |
31 |
35 |
39 | {renderEventData()}
40 |
41 | );
42 | }
43 |
44 | function Event({ data }) {
45 | // id: str
46 | // type: str
47 | // timestamp: float
48 | // coordinates: CoordinatesModel
49 | // screenshot_path: Optional[str]
50 | // screenshot_b64: Optional[str]
51 | // click_data: Optional[ClickData]
52 | // key_data: Optional[KeyData]
53 | // scroll_data = Optional[ScrollData]
54 | const renderEventData = () => {
55 | switch (data.type) {
56 | case "scroll":
57 | return ;
58 | case "click":
59 | return ;
60 | case "key":
61 | return ;
62 | case "text":
63 | return ;
64 | default:
65 | return null;
66 | }
67 | };
68 | return (
69 |
70 |
71 |
75 |
79 | {renderEventData()}
80 | {/*
; */}
81 |
82 | );
83 | }
84 |
85 | function Property({ k, v }) {
86 | const renderValue = (value) => {
87 | if (value !== null && typeof value === "object") {
88 | return {JSON.stringify(value, null, 2)} ;
89 | }
90 | return {value}
;
91 | };
92 |
93 | return (
94 |
95 |
{k}:
96 | {renderValue(v)}
97 |
98 | );
99 | }
100 |
101 | export default function Recording({ data }) {
102 | // id: str
103 | // description: str
104 | // start_time: float
105 | // end_time: float
106 | // events: List[RecordedEvent] = []
107 |
108 | return (
109 |
110 |
Current Task
111 |
112 |
113 |
117 |
Events {"\u25BC"}
118 |
119 | {data.events.map((event, index) => (
120 |
121 | ))}
122 |
123 |
124 | );
125 | }
126 |
127 | export function EditableRecording({ data, agentdAddr }) {
128 | const [events, setEvents] = useState(data.events);
129 |
130 | const handleDeleteEvent = async (index) => {
131 | const eventToDelete = events[index];
132 | try {
133 | const response = await deleteEvent(agentdAddr, data.id, eventToDelete.id);
134 | if (response && response.status === "success") {
135 | const updatedEvents = events.filter(
136 | (_, eventIndex) => eventIndex !== index
137 | );
138 | setEvents(updatedEvents);
139 | } else {
140 | console.error("Failed to delete the event");
141 | }
142 | } catch (error) {
143 | console.error("Error deleting event:", error);
144 | }
145 | };
146 |
147 | return (
148 |
149 |
Events {"\u25BC"}
150 |
151 | {events.map((event, index) => (
152 | handleDeleteEvent(index)}
156 | />
157 | ))}
158 |
159 |
160 | );
161 | }
162 |
--------------------------------------------------------------------------------
/ui/surfkit/src/components/RoleThread.js:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect, useRef } from "react";
2 | import { Typography, Avatar, Dialog } from "@material-tailwind/react";
3 | import { motion } from "framer-motion";
4 |
5 | const RoleThread = ({ data }) => {
6 | // console.log("thread data: ");
7 | // console.log(data);
8 | const messages = data?.messages || [];
9 | const roleMapping = data?.role_mapping || {};
10 | console.log("role mapping: ", roleMapping);
11 | const [modalOpen, setModalOpen] = useState(false);
12 | const [currentImage, setCurrentImage] = useState("");
13 | const endOfMessagesRef = useRef(null);
14 |
15 | // Function to handle image click
16 | const handleImageClick = (image) => {
17 | setCurrentImage(image);
18 | setModalOpen(true);
19 | };
20 |
21 | useEffect(() => {
22 | // Automatically scroll to the end of messages when new messages are added
23 | const scrollContainer = endOfMessagesRef.current?.parentNode;
24 | if (scrollContainer && endOfMessagesRef.current) {
25 | scrollContainer.scrollTo({
26 | top: endOfMessagesRef.current.offsetTop,
27 | behavior: "smooth",
28 | });
29 | }
30 | }, [messages.length]);
31 |
32 | return (
33 |
34 | {messages.map((msg, index) => (
35 |
42 |
43 | {/* Render the avatar with fallback */}
44 |
52 | {/* Role Name or Fallback */}
53 |
54 | {roleMapping[msg.role]?.user_name || msg.role}
55 |
56 |
57 |
58 | {msg.text}
59 |
60 | {/* If there are images, render them */}
61 | {msg.images &&
62 | msg.images.map((image, imgIndex) => (
63 | handleImageClick(image)}
69 | />
70 | ))}
71 |
72 | ))}
73 | {/* Invisible marker for scrolling */}
74 |
75 | {/* Modal for displaying the full-size image */}
76 |
setModalOpen(false)}
79 | className="max-w-full max-h-full overflow-y-auto"
80 | >
81 |
86 |
87 |
88 | );
89 | };
90 |
91 | export default RoleThread;
92 |
--------------------------------------------------------------------------------
/ui/surfkit/src/components/RoleThreads.js:
--------------------------------------------------------------------------------
1 | import React, { useState } from "react";
2 | import { Typography } from "@material-tailwind/react";
3 | import RoleThread from "./RoleThread";
4 |
5 | const RoleThreads = ({ threads = [] }) => {
6 | // Default threads to an empty array if undefined
7 | const [selectedThreadName, setSelectedThreadName] = useState(
8 | threads[0]?.name || ""
9 | );
10 |
11 | const getSelectedThreadMessages = () => {
12 | const selectedThread = threads.find(
13 | (thread) => thread.name === selectedThreadName
14 | );
15 | return selectedThread?.messages || [];
16 | };
17 | const getSelectedThread = () => {
18 | const selectedThread = threads.find(
19 | (thread) => thread.name === selectedThreadName
20 | );
21 | return selectedThread;
22 | };
23 |
24 | return (
25 |
26 |
27 | {threads.map((thread) => (
28 | setSelectedThreadName(thread.name)}
37 | >
38 | #{thread.name}
39 |
40 | ))}
41 |
42 |
43 |
44 | );
45 | };
46 |
47 | export default RoleThreads;
48 |
--------------------------------------------------------------------------------
/ui/surfkit/src/components/Task.js:
--------------------------------------------------------------------------------
1 | import { Typography, Button, Chip } from "@material-tailwind/react";
2 | import { useRef, useEffect, useState } from "react";
3 | import {
4 | ClipboardIcon,
5 | XCircleIcon,
6 | CheckCircleIcon,
7 | } from "@heroicons/react/24/outline";
8 | import { motion } from "framer-motion";
9 | import { updateTask } from "../api/Tasks";
10 | import RoleThreads from "./RoleThreads";
11 |
12 | export default function Task({ data, addr, token }) {
13 | const endOfMessagesRef = useRef(null);
14 | const prevMessagesLength = useRef(data?.thread?.messages.length || 0);
15 | const [message, setMessage] = useState(null);
16 | const [thread, setThread] = useState("feed");
17 | const messageContainerRef = useRef(null);
18 | const [isUserAtBottom, setIsUserAtBottom] = useState(true);
19 | const [activeMessages, setActiveMessages] = useState([]);
20 |
21 | const threads = data.threads || [];
22 |
23 | const handleCancelTask = () => {
24 | console.log("cancelling task...");
25 | updateTask(addr, data.id, { status: "canceling" }, token);
26 | };
27 |
28 | const handleFailTask = () => {
29 | console.log("failing task...");
30 | updateTask(addr, data.id, { status: "failed" }, token);
31 | };
32 |
33 | const handleCompleteTask = (stat) => {
34 | console.log("completing task...");
35 | updateTask(addr, data.id, { status: "completed" }, token);
36 | };
37 |
38 | const handleKeyDown = async (event) => {
39 | if (event.key === "Enter") {
40 | event.preventDefault();
41 | console.log("message:");
42 | console.log(message);
43 |
44 | var msgData = {
45 | role: "user",
46 | msg: message,
47 | };
48 | console.log("data: ");
49 | console.log(data);
50 | // postTaskMessage(data.id, msgData, token);
51 | setMessage("");
52 | }
53 | };
54 |
55 | useEffect(() => {
56 | console.log("thread changed: ", thread);
57 | }, [thread]);
58 |
59 | const getActiveThreadMessages = () => {
60 | const threadMessages =
61 | threads.find((thrd) => thrd.name === thread)?.messages || [];
62 | return threadMessages;
63 | };
64 |
65 | useEffect(() => {
66 | // Function to handle auto-scrolling logic
67 | const handleAutoScroll = () => {
68 | if (isUserAtBottom && messageContainerRef.current) {
69 | messageContainerRef.current.scrollTop =
70 | messageContainerRef.current.scrollHeight;
71 | }
72 | };
73 |
74 | // Call the auto-scroll function whenever messages are updated
75 | handleAutoScroll();
76 | }, [activeMessages, isUserAtBottom]); // Depend on the messages count
77 |
78 | useEffect(() => {
79 | console.log(`Thread changed to: ${thread}`);
80 | // Log current thread messages for debugging
81 | console.log(
82 | `Current thread messages for ${thread}:`,
83 | getActiveThreadMessages()
84 | );
85 | // Add any additional logic here if you need to fetch new messages when the thread changes
86 | }, [thread]);
87 |
88 | useEffect(() => {
89 | const handleScroll = () => {
90 | if (!messageContainerRef.current) return;
91 |
92 | const { scrollTop, clientHeight, scrollHeight } =
93 | messageContainerRef.current;
94 | // Set isUserAtBottom based on whether the user is scrolled to within 10 pixels of the bottom
95 | setIsUserAtBottom(scrollHeight - scrollTop <= clientHeight + 10);
96 | };
97 |
98 | // Add scroll event listener
99 | const currentContainer = messageContainerRef.current;
100 | currentContainer?.addEventListener("scroll", handleScroll);
101 |
102 | return () => {
103 | // Clean up event listener
104 | currentContainer?.removeEventListener("scroll", handleScroll);
105 | };
106 | }, []);
107 |
108 | useEffect(() => {
109 | setActiveMessages(getActiveThreadMessages());
110 | }, [thread, data.thread?.messages]);
111 |
112 | const getChipColor = (status) => {
113 | switch (status) {
114 | case "completed":
115 | return "green";
116 | case "error":
117 | return "red";
118 | case "failed":
119 | return "red";
120 | case "review":
121 | return "purple";
122 | case "canceled":
123 | return "gray";
124 | case "canceling":
125 | return "yellow";
126 | default:
127 | return "blue";
128 | }
129 | };
130 |
131 | useEffect(() => {
132 | // Check if a new message was added by comparing the current length to the previous one
133 | if (data.thread?.messages.length > prevMessagesLength.current) {
134 | // Use the scrollTo method with top equal to the element's offsetTop. This way, it scrolls within its container.
135 | const scrollContainer = endOfMessagesRef.current?.parentNode;
136 | if (scrollContainer && endOfMessagesRef.current) {
137 | scrollContainer.scrollTo({
138 | top: endOfMessagesRef.current.offsetTop,
139 | behavior: "smooth",
140 | });
141 | }
142 | }
143 | // Update the previous length for the next render
144 | prevMessagesLength.current = data.thread?.messages.length;
145 | }, [data.thread?.messages.length]);
146 | return (
147 |
153 |
154 |
155 |
Task
156 | {data.status &&
157 | data.status !== "completed" &&
158 | data.status !== "failed" &&
159 | data.status !== "review" &&
160 | data.status !== "cancelled" && (
161 |
162 |
169 |
170 | Cancel
171 |
172 |
178 |
179 | Complete
180 |
181 |
182 | )}
183 | {data.status && data.status === "review" && (
184 |
185 |
192 |
193 | Fail
194 |
195 |
201 |
202 | Complete
203 |
204 |
205 | )}
206 |
207 |
208 |
209 |
210 | Description
211 |
212 | {/* TODO: this should be handled better */}
213 |
214 | {data.description}
215 |
216 |
217 |
218 |
219 | Status
220 |
221 |
228 |
229 |
230 |
231 |
232 |
233 | {/*
237 | {getActiveThreadMessages().map((msg, index) => (
238 |
239 |
243 |
249 |
250 | {msg.text}
251 |
252 |
253 |
254 | ))}
255 |
256 |
*/}
257 | {/* {data.status &&
258 | data.status !== "completed" &&
259 | data.status !== "cancelled" &&
260 | data.status !== "failed" && (
261 |
262 | }
266 | onKeyDown={handleKeyDown}
267 | value={message}
268 | onChange={(e) => setMessage(e.target.value)}
269 | />
270 |
271 | )} */}
272 | {data.output && data.output !== "" && (
273 |
274 |
275 | Result
276 |
277 | {data.output}
278 |
279 | )}
280 | {data.error && data.error !== "" && (
281 |
282 |
287 | Error
288 |
289 | {data.error}
290 |
291 | )}
292 |
293 | );
294 | }
295 |
--------------------------------------------------------------------------------
/ui/surfkit/src/index.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
--------------------------------------------------------------------------------
/ui/surfkit/src/index.js:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import ReactDOM from "react-dom/client";
3 | import "./index.css";
4 | import App from "./App";
5 |
6 | import { ThemeProvider } from "@material-tailwind/react";
7 |
8 | const root = ReactDOM.createRoot(document.getElementById("root"));
9 | root.render(
10 |
11 |
12 |
13 |
14 | ,
15 | );
16 |
--------------------------------------------------------------------------------
/ui/surfkit/src/pages/ContainerDesktopPage.js:
--------------------------------------------------------------------------------
1 | import React, { useRef, useEffect, useState } from "react";
2 | import Layout from "../components/Layout";
3 | import { useLocation } from "react-router-dom";
4 | import Task from "../components/Task";
5 | import { getTask } from "../api/Tasks";
6 | import { Typography } from "@material-tailwind/react";
7 |
8 | export default function ContainerDesktopPage() {
9 | const location = useLocation();
10 | const queryParams = new URLSearchParams(location.search);
11 | const agentAddr = queryParams.get("agentAddr") || "http://localhost:9090";
12 | const taskAddr = queryParams.get("taskAddr") || "http://localhost:9070";
13 | const vncAddr = queryParams.get("vncAddr") || "http://localhost:3000";
14 | const taskID = queryParams.get("taskID");
15 | const token = queryParams.get("authToken");
16 |
17 | const [agentTask, setAgentTask] = useState(null);
18 |
19 | const abortControllerRef = useRef(new AbortController());
20 | const timeoutRef = useRef(null);
21 | useEffect(() => {
22 | const handleStart = async () => {
23 | console.log("Starting fetch at:", new Date().toISOString());
24 | const task = await getTask(taskAddr, taskID, token);
25 | if (!task) {
26 | return;
27 | }
28 | setAgentTask(task);
29 | console.log("Tasks updated at:", new Date().toISOString());
30 | console.log(task);
31 | // Schedule the next call
32 | timeoutRef.current = setTimeout(handleStart, 2000);
33 | };
34 |
35 | handleStart(); // Call initially
36 |
37 | return () => {
38 | if (timeoutRef.current) {
39 | clearTimeout(timeoutRef.current); // Clear the timeout if the component unmounts
40 | }
41 | };
42 | }, [taskAddr, taskID, token]);
43 | const ref = useRef();
44 |
45 | return (
46 |
47 |
48 |
49 | {agentTask ? (
50 |
51 | ) : (
52 |
53 | No tasks
54 |
55 | )}
56 |
57 |
58 |
63 |
64 |
65 |
66 | );
67 | }
68 |
--------------------------------------------------------------------------------
/ui/surfkit/src/pages/DesktopPage.js:
--------------------------------------------------------------------------------
1 | import React, { useRef, useEffect, useState } from "react";
2 | import { VncScreen } from "react-vnc";
3 | import Layout from "../components/Layout";
4 | import { useLocation } from "react-router-dom";
5 | import Task from "../components/Task";
6 | import { getTask } from "../api/Tasks";
7 | import { Typography } from "@material-tailwind/react";
8 |
9 | export default function DesktopPage() {
10 | const location = useLocation();
11 | const queryParams = new URLSearchParams(location.search);
12 | const agentAddr = queryParams.get("agentAddr") || "http://localhost:9090";
13 | const taskAddr = queryParams.get("taskAddr") || "http://localhost:9070";
14 | const vncAddr = queryParams.get("vncAddr") || "ws://localhost:6080";
15 | const taskID = queryParams.get("taskID");
16 | const token = queryParams.get("authToken");
17 |
18 | const [agentTask, setAgentTask] = useState(null);
19 |
20 | const abortControllerRef = useRef(new AbortController());
21 | const timeoutRef = useRef(null);
22 | useEffect(() => {
23 | const handleStart = async () => {
24 | console.log("Starting fetch at:", new Date().toISOString());
25 | const task = await getTask(taskAddr, taskID, token);
26 | if (!task) {
27 | return;
28 | }
29 | setAgentTask(task);
30 | console.log("Tasks updated at:", new Date().toISOString());
31 | console.log(task);
32 | // Schedule the next call
33 | timeoutRef.current = setTimeout(handleStart, 2000);
34 | };
35 |
36 | handleStart(); // Call initially
37 |
38 | return () => {
39 | if (timeoutRef.current) {
40 | clearTimeout(timeoutRef.current); // Clear the timeout if the component unmounts
41 | }
42 | };
43 | }, [taskAddr, taskID, token]);
44 | const ref = useRef();
45 |
46 | return (
47 |
48 |
49 |
50 | {agentTask ? (
51 |
52 | ) : (
53 |
54 | No tasks
55 |
56 | )}
57 |
58 |
59 |
72 |
73 |
74 |
75 | );
76 | }
77 |
--------------------------------------------------------------------------------
/ui/surfkit/src/server/Routes.js:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import { Route, Routes } from "react-router-dom";
3 |
4 | import DesktopPage from "../pages/DesktopPage";
5 | import ContainerDesktopPage from "../pages/ContainerDesktopPage";
6 |
7 | export default function AppRoutes() {
8 | return (
9 |
10 | } />
11 | } />
12 |
13 | );
14 | }
15 |
--------------------------------------------------------------------------------
/ui/surfkit/tailwind.config.js:
--------------------------------------------------------------------------------
1 | const withMT = require("@material-tailwind/react/utils/withMT");
2 |
3 | module.exports = withMT({
4 | content: ["./src/**/*.{js,jsx,ts,tsx}"],
5 | theme: {
6 | extend: {},
7 | },
8 | plugins: [],
9 | });
10 |
--------------------------------------------------------------------------------