├── .dockerignore
├── .github
└── workflows
│ └── mdbook.yml
├── .gitignore
├── .pre-commit-config.yaml
├── Dockerfile
├── LICENSE
├── README.Docker.md
├── README.md
├── build.sh
├── compose.yaml
├── docs
├── .gitignore
├── assets
│ └── img
│ │ ├── icon.png
│ │ ├── icon.svg
│ │ └── screenshot_playground_llm.png
├── book.toml
└── src
│ ├── SUMMARY.md
│ ├── api.md
│ ├── assets
│ └── img
│ │ ├── google_flights_search.png
│ │ ├── google_flights_search_recording.png
│ │ └── icon.svg
│ ├── introduction.md
│ ├── quick_start.md
│ ├── tutorials
│ └── browser_recording_to_ai_replay.md
│ └── user_guide
│ ├── installation.md
│ └── quick_start.md
├── iauto
├── __init__.py
├── __main__.py
├── _asyncio.py
├── actions
│ ├── __init__.py
│ ├── action.py
│ ├── buildin
│ │ ├── __init__.py
│ │ ├── collections.py
│ │ ├── db.py
│ │ ├── file.py
│ │ ├── flow.py
│ │ ├── hash.py
│ │ ├── json.py
│ │ ├── log.py
│ │ ├── math.py
│ │ ├── playbook.py
│ │ ├── queue.py
│ │ ├── shell.py
│ │ └── time.py
│ ├── contrib
│ │ ├── __init__.py
│ │ ├── browser.py
│ │ └── webdriver.py
│ ├── executor.py
│ ├── loader.py
│ └── playbook.py
├── agents
│ ├── __init__.py
│ ├── _actions.py
│ ├── executor.py
│ └── model_clients.py
├── api
│ ├── __init__.py
│ ├── _api.py
│ ├── _entry.py
│ ├── _playbooks.py
│ └── _server.py
├── db.py
├── llms
│ ├── __init__.py
│ ├── __main__.py
│ ├── _openai_qwen.py
│ ├── _qwen.py
│ ├── actions.py
│ ├── chatglm.py
│ ├── llama.py
│ ├── llm.py
│ ├── llm_factory.py
│ ├── openai.py
│ └── session.py
├── log.py
└── playground
│ ├── Agents.py
│ ├── __init__.py
│ ├── pages
│ ├── 100_Settings.py
│ ├── 1_Playbooks.py
│ ├── 3_Developer.py.hide
│ ├── 4_Actions.py
│ └── __init__.py
│ ├── playbooks
│ ├── __init__.py
│ ├── agents.yaml
│ └── llm_chat.yaml
│ ├── runner.py
│ ├── st_widgets.py
│ ├── static
│ ├── __init__.py
│ └── ia.png
│ └── utils.py
├── playbooks
├── agents.yaml
├── agents_repl.yaml
├── bing.yaml
├── browser.yaml
├── caixin.yaml
├── collections.yaml
├── control_flow.yaml
├── fetch_links_from_url.yaml
├── get_readability_text_from_url.yaml
├── google_flights_search.yaml
├── google_flights_search_replay_script.json
├── google_news.yaml
├── llama_repl.yaml
├── llm_chat.yaml
├── llm_function_calling.yaml
├── llm_react_repl.yaml
├── openai_repl.yaml
└── webdriver.yaml
├── pydoc-markdown.yaml
├── pyproject.toml
├── release.sh
├── requirements-appium.txt
├── requirements-basic.txt
├── requirements-dev.txt
├── requirements-llm-local.txt
├── requirements-llm.txt
├── requirements-playground.txt
├── requirements-playwright.txt
├── requirements-sql.txt
├── requirements.txt
├── setup.cfg
├── setup.py
├── test.sh
└── tests
├── __init__.py
├── data
└── playbooks
│ ├── playbook_load_test.json
│ └── playbook_load_test.yaml
└── iauto
├── __init__.py
└── actions
├── __init__.py
├── test_json.py
└── test_playbook.py
/.dockerignore:
--------------------------------------------------------------------------------
1 | # Include any files or directories that you don't want to be copied to your
2 | # container here (e.g., local build artifacts, temporary files, etc.).
3 | #
4 | # For more help, visit the .dockerignore file reference guide at
5 | # https://docs.docker.com/go/build-context-dockerignore/
6 |
7 | **/.DS_Store
8 | **/__pycache__
9 | **/.venv
10 | **/.classpath
11 | **/.dockerignore
12 | **/.env
13 | **/.git
14 | **/.gitignore
15 | **/.project
16 | **/.settings
17 | **/.toolstarget
18 | **/.vs
19 | **/.vscode
20 | **/*.*proj.user
21 | **/*.dbmdl
22 | **/*.jfm
23 | **/bin
24 | **/charts
25 | **/docker-compose*
26 | **/compose.y*ml
27 | **/Dockerfile*
28 | **/node_modules
29 | **/npm-debug.log
30 | **/obj
31 | **/secrets.dev.yaml
32 | **/values.dev.yaml
33 | LICENSE
34 | README.md
35 |
--------------------------------------------------------------------------------
/.github/workflows/mdbook.yml:
--------------------------------------------------------------------------------
1 | # Sample workflow for building and deploying a mdBook site to GitHub Pages
2 | #
3 | # To get started with mdBook see: https://rust-lang.github.io/mdBook/index.html
4 | #
5 | name: Deploy mdBook site to Pages
6 |
7 | on:
8 | # Runs on pushes targeting the default branch
9 | push:
10 | branches: ["main"]
11 |
12 | # Allows you to run this workflow manually from the Actions tab
13 | workflow_dispatch:
14 |
15 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
16 | permissions:
17 | contents: read
18 | pages: write
19 | id-token: write
20 |
21 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
22 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
23 | concurrency:
24 | group: "pages"
25 | cancel-in-progress: false
26 |
27 | jobs:
28 | # Build job
29 | build:
30 | runs-on: ubuntu-latest
31 | env:
32 | MDBOOK_VERSION: 0.4.36
33 | steps:
34 | - uses: actions/checkout@v4
35 | - name: Install mdBook
36 | run: |
37 | curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSf -y | sh
38 | rustup update
39 | cargo install --version ${MDBOOK_VERSION} mdbook
40 | - name: Setup Pages
41 | id: pages
42 | uses: actions/configure-pages@v4
43 | - name: Build with mdBook
44 | run: mdbook build ./docs
45 | - name: Upload artifact
46 | uses: actions/upload-pages-artifact@v3
47 | with:
48 | path: ./docs/build
49 |
50 | # Deployment job
51 | deploy:
52 | environment:
53 | name: github-pages
54 | url: ${{ steps.deployment.outputs.page_url }}
55 | runs-on: ubuntu-latest
56 | needs: build
57 | steps:
58 | - name: Deploy to GitHub Pages
59 | id: deployment
60 | uses: actions/deploy-pages@v4
61 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | .cache
6 | *.spec
7 | build/
8 | dist/
9 | *.egg-info
10 | *.zip
11 | *.tar.gz
12 |
13 | .env
14 | .venv
15 |
16 | # C extensions
17 | *.so
18 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/hhatto/autopep8
3 | rev: v2.0.4
4 | hooks:
5 | - id: autopep8
6 | - repo: https://github.com/pycqa/isort
7 | rev: 5.13.2
8 | hooks:
9 | - id: isort
10 | - repo: https://github.com/pycqa/flake8
11 | rev: 7.0.0
12 | hooks:
13 | - id: flake8
14 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # syntax=docker/dockerfile:1
2 |
3 | # Comments are provided throughout this file to help you get started.
4 | # If you need more help, visit the Dockerfile reference guide at
5 | # https://docs.docker.com/go/dockerfile-reference/
6 |
7 | # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7
8 |
9 | ARG PYTHON_VERSION=3.11.7
10 | FROM python:${PYTHON_VERSION}-slim as base
11 |
12 | # Prevents Python from writing pyc files.
13 | ENV PYTHONDONTWRITEBYTECODE=1
14 |
15 | # Keeps Python from buffering stdout and stderr to avoid situations where
16 | # the application crashes without emitting any logs due to buffering.
17 | ENV PYTHONUNBUFFERED=1
18 |
19 | WORKDIR /app
20 |
21 | # Create a non-privileged user that the app will run under.
22 | # See https://docs.docker.com/go/dockerfile-user-best-practices/
23 | ARG UID=10001
24 | RUN adduser \
25 | --disabled-password \
26 | --gecos "" \
27 | --home "/nonexistent" \
28 | --shell "/sbin/nologin" \
29 | --no-create-home \
30 | --uid "${UID}" \
31 | appuser
32 |
33 | # Download dependencies as a separate step to take advantage of Docker's caching.
34 | # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
35 | # Leverage a bind mount to requirements.txt to avoid having to copy them into
36 | # into this layer.
37 | RUN --mount=type=cache,target=/root/.cache/pip \
38 | --mount=type=bind,source=requirements.txt,target=requirements.txt \
39 | python -m pip install -r requirements.txt
40 |
41 | # Switch to the non-privileged user to run the application.
42 | USER appuser
43 |
44 | # Copy the source code into the container.
45 | COPY . .
46 |
47 | # Expose the port that the application listens on.
48 | EXPOSE 2000
49 |
50 | # Run the application.
51 | CMD python -m iauto serve --host=0.0.0.0 --port=2000
52 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Wang Shenggong
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.Docker.md:
--------------------------------------------------------------------------------
1 | ### Building and running iauto
2 |
3 | When you're ready, start iauto by running:
4 | `docker compose up --build`.
5 |
6 | iauto server will be available at http://localhost:2000.
7 |
8 | ### Deploying iauto server to the cloud
9 |
10 | First, build your image, e.g.: `docker build -t iauto .`.
11 | If your cloud uses a different CPU architecture than your development
12 | machine (e.g., you are on a Mac M1 and your cloud provider is amd64),
13 | you'll want to build the image for that platform, e.g.:
14 | `docker build --platform=linux/amd64 -t iauto .`.
15 |
16 | Then, push it to your registry, e.g. `docker push myregistry.com/iauto`.
17 |
18 | Consult Docker's [getting started](https://docs.docker.com/go/get-started-sharing/)
19 | docs for more detail on building and pushing.
20 |
21 | ### References
22 | * [Docker's Python guide](https://docs.docker.com/language/python/)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | [Documentation](https://shellc.github.io/iauto)
4 |
5 | # iauto
6 |
7 | `iauto` is a low-code engine for building and deploying AI agents.
8 |
9 | - **AI**: Cutting-edge AI capabilities.
10 | - **Low-Code**: Define your agents using YAML.
11 | - **Automation**: Integrated automation frameworks like [Appium](https://github.com/appium/appium) and [Playwright](https://playwright.dev/python/).
12 | - **Extensible**: Well-designed Python API.
13 |
14 | ## News
15 |
16 | - Integrated [autogen](https://github.com/microsoft/autogen): Create your Multi-Agent system efficiently with iauto.
17 | - Run most open source LLMs locally using [llama.cpp](https://github.com/ggerganov/llama.cpp) and [llama-cpp-python](https://github.com/abetlen/llama-cpp-python).
18 |
19 | ## Quick Start
20 |
21 | ### Installation
22 |
23 | Python version requirement: >=3.8
24 |
25 | `iauto` can be installed from PyPI using `pip`. It is recommended to create a new virtual environment before installation to avoid conflicts.
26 |
27 | ```bash
28 | pip install -U iauto
29 | ```
30 |
31 | If you want to run LLM locally, you can enable hardware acceleration in the following ways.
32 |
33 | To enable cuBLAS acceleration on NVIDIA GPU:
34 |
35 | ```bash
36 | CMAKE_ARGS="-DGGML_CUBLAS=ON" pip install -U iauto
37 | ```
38 |
39 | To enable Metal on Apple silicon devices:
40 |
41 | ```bash
42 | CMAKE_ARGS="-DGGML_METAL=ON" pip install -U iauto
43 | ```
44 |
45 | ## Command-line tool
46 |
47 | Usage:
48 |
49 | ```bash
50 | python -m iauto --help
51 |
52 | or
53 |
54 | ia --help
55 | ```
56 |
57 | Run playbook:
58 |
59 | ```bash
60 | ia run ./your-plabook.yaml
61 | ```
62 |
63 | **[Example playbooks](./playbooks)**
64 |
65 | ## Playground
66 |
67 | `iauto` provides a web-based application for running playbooks, LLM Chat, ReAct reasoning, and Multi-Agent tasks.
68 |
69 | Launch playground:
70 |
71 | ```bash
72 | ia playground
73 | ```
74 |
75 | **Screenshot:**
76 |
77 | 
78 |
79 | ## Contribution
80 |
81 | We are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
82 |
83 | ### Development setup
84 |
85 | - Code Style: [PEP-8](https://peps.python.org/pep-0008/)
86 | - Docstring Style: [Google Style](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
87 |
88 | ```bash
89 | # Create python venv
90 | python -m venv .venv
91 | source .venv/bin/activate
92 |
93 | # Install dependencies
94 | pip install -r requirements.txt
95 | pip install -r requirements-dev.txt
96 |
97 | # Apply autopep8, isort and flake8 as pre commit hooks
98 | pre-commit install
99 | ```
100 |
101 | ### Build
102 |
103 | ```bash
104 | ./build.sh
105 | ```
106 |
107 | ## License
108 |
109 | [MIT](./LICENSE)
110 |
111 |
icon license: https://openmoji.org/library/emoji-1F9BE
112 |
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | python -m build
4 | pydoc-markdown --render-toc > docs/src/api.md
5 |
--------------------------------------------------------------------------------
/compose.yaml:
--------------------------------------------------------------------------------
1 | # Comments are provided throughout this file to help you get started.
2 | # If you need more help, visit the Docker Compose reference guide at
3 | # https://docs.docker.com/go/compose-spec-reference/
4 |
5 | # Here the instructions define your application as a service called "server".
6 | # This service is built from the Dockerfile in the current directory.
7 | # You can add other services your application may depend on here, such as a
8 | # database or a cache. For examples, see the Awesome Compose repository:
9 | # https://github.com/docker/awesome-compose
10 | services:
11 | server:
12 | build:
13 | context: .
14 | ports:
15 | - 2000:2000
16 |
17 | # The commented out section below is an example of how to define a PostgreSQL
18 | # database that your application can use. `depends_on` tells Docker Compose to
19 | # start the database before your application. The `db-data` volume persists the
20 | # database data between container restarts. The `db-password` secret is used
21 | # to set the database password. You must create `db/password.txt` and add
22 | # a password of your choosing to it before running `docker compose up`.
23 | # depends_on:
24 | # db:
25 | # condition: service_healthy
26 | # db:
27 | # image: postgres
28 | # restart: always
29 | # user: postgres
30 | # secrets:
31 | # - db-password
32 | # volumes:
33 | # - db-data:/var/lib/postgresql/data
34 | # environment:
35 | # - POSTGRES_DB=example
36 | # - POSTGRES_PASSWORD_FILE=/run/secrets/db-password
37 | # expose:
38 | # - 5432
39 | # healthcheck:
40 | # test: [ "CMD", "pg_isready" ]
41 | # interval: 10s
42 | # timeout: 5s
43 | # retries: 5
44 | # volumes:
45 | # db-data:
46 | # secrets:
47 | # db-password:
48 | # file: db/password.txt
49 |
50 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | build
2 |
--------------------------------------------------------------------------------
/docs/assets/img/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/docs/assets/img/icon.png
--------------------------------------------------------------------------------
/docs/assets/img/icon.svg:
--------------------------------------------------------------------------------
1 |
11 |
--------------------------------------------------------------------------------
/docs/assets/img/screenshot_playground_llm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/docs/assets/img/screenshot_playground_llm.png
--------------------------------------------------------------------------------
/docs/book.toml:
--------------------------------------------------------------------------------
1 | [book]
2 | title = "iauto Documentation"
3 | description = "iauto is a Low-Code intelligent automation tool that integrates LLM and RPA."
4 | authors = ["shellc"]
5 | language = "en"
6 | multilingual = false
7 | src = "src"
8 |
9 | [build]
10 | build-dir = "build"
11 |
12 | [output.html]
13 | no-section-label = true
14 | git-repository-url = "https://github.com/shellc/iauto"
15 | edit-url-template = "https://github.com/shellc/iauto/edit/main/docs/{path}"
16 |
--------------------------------------------------------------------------------
/docs/src/SUMMARY.md:
--------------------------------------------------------------------------------
1 | # Summary
2 |
3 | [Introduction](introduction.md)
4 |
5 | # User Guide
6 |
7 | - [Installation](user_guide/installation.md)
8 | - [Quick start](user_guide/quick_start.md)
9 |
10 | # Tutorials
11 |
12 | - [Browser recording to AI replay](tutorials/browser_recording_to_ai_replay.md)
13 |
14 | # Reference Guide
15 |
16 | - [Python API](api.md)
17 |
18 | ## Command Line Tool
19 |
20 | ## Actions
21 |
22 | ## Playbook
23 |
--------------------------------------------------------------------------------
/docs/src/assets/img/google_flights_search.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/docs/src/assets/img/google_flights_search.png
--------------------------------------------------------------------------------
/docs/src/assets/img/google_flights_search_recording.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/docs/src/assets/img/google_flights_search_recording.png
--------------------------------------------------------------------------------
/docs/src/assets/img/icon.svg:
--------------------------------------------------------------------------------
1 |
11 |
--------------------------------------------------------------------------------
/docs/src/introduction.md:
--------------------------------------------------------------------------------
1 | # Introduction
2 |
3 |
4 |
5 | `iauto` is a low-code engine for building and deploying AI agents.
6 |
7 | - **AI**: Cutting-edge AI capabilities.
8 | - **Low-Code**: Define your agents using YAML.
9 | - **Automation**: Integrated automation frameworks like [Appium](https://github.com/appium/appium) and [Playwright](https://playwright.dev/python/).
10 | - **Extensible**: Well-designed Python API.
11 |
12 | ## News
13 |
14 | - Integrated [autogen](https://github.com/microsoft/autogen): Create your Multi-Agent system efficiently with iauto.
15 | - Run most open source LLMs locally using [llama.cpp](https://github.com/ggerganov/llama.cpp) and [llama-cpp-python](https://github.com/abetlen/llama-cpp-python).
16 |
--------------------------------------------------------------------------------
/docs/src/quick_start.md:
--------------------------------------------------------------------------------
1 | # Quick Start
2 |
--------------------------------------------------------------------------------
/docs/src/user_guide/installation.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | Python version requirement: >=3.8
4 |
5 | iauto can be installed from PyPI using pip. It is recommended to create a new virtual environment before installation to avoid conflicts.
6 |
7 | ```bash
8 | pip install -U iauto
9 | ```
10 |
11 | To access the latest code, install directly from the GitHub repository.
12 |
13 | ```bash
14 | pip install git+https://github.com/shellc/iauto.git
15 | ```
16 |
17 | ## Enable hardware acceleration for local LLM
18 |
19 | If you want to run LLM locally, you can enable hardware acceleration in the following ways.
20 |
21 | To enable cuBLAS acceleration on NVIDIA GPU:
22 |
23 | ```bash
24 | CMAKE_ARGS="-DGGML_CUBLAS=ON" pip install -U iauto
25 | ```
26 |
27 | To enable Metal on Apple silicon devices:
28 |
29 | ```bash
30 | CMAKE_ARGS="-DGGML_METAL=ON" pip install -U iauto
31 | ```
32 |
--------------------------------------------------------------------------------
/docs/src/user_guide/quick_start.md:
--------------------------------------------------------------------------------
1 | # Quick start
2 |
--------------------------------------------------------------------------------
/iauto/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | `iauto` is a Low-Code intelligent automation tool that integrates LLM and RPA.
3 |
4 | Classes:
5 | * Playbook
6 | * PlaybookExecutor
7 | """
8 |
9 | from .actions import (Playbook, PlaybookExecutor, execute, execute_in_process,
10 | execute_in_thread, load)
11 | from .agents import _actions
12 | from .llms.actions import register_actions as register_llm_actions
13 |
14 | VERSION = "0.1.10"
15 | """The current version."""
16 |
17 | # Register actions
18 |
19 | register_llm_actions()
20 |
21 | __all__ = [
22 | "Playbook",
23 | "PlaybookExecutor"
24 | ]
25 |
--------------------------------------------------------------------------------
/iauto/__main__.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import importlib
3 | import json
4 | import os
5 | import sys
6 | import time
7 | import traceback
8 |
9 | from dotenv import dotenv_values, load_dotenv
10 |
11 | os.environ["PYDEVD_DISABLE_FILE_VALIDATION"] = "1"
12 |
13 | env = {}
14 |
15 |
16 | def load_env(args):
17 | if args.env:
18 | load_dotenv(args.env)
19 | env.update(dotenv_values(args.env))
20 |
21 |
22 | def list_actions():
23 | from iauto.actions import loader
24 | actions = []
25 | for a in loader.actions:
26 | desc = a.spec.description or ""
27 | desc = [x for x in desc.split('\n') if x != ""]
28 | desc = desc[0] if len(desc) > 0 else ""
29 | z = desc.find(".")
30 | if z > -1:
31 | desc = desc[:z+1]
32 |
33 | actions.append(f"{a.spec.name} : {desc}")
34 |
35 | actions.sort()
36 |
37 | print('\n'.join(actions))
38 |
39 |
40 | def print_action_spec(name):
41 | from iauto.actions import loader
42 | action = loader.get(name=name)
43 | if not action:
44 | print(f"No action found: {name}")
45 | else:
46 | spec = action.spec
47 | print(json.dumps(spec.model_dump(), ensure_ascii=False, indent=2))
48 |
49 |
50 | def run_playground(args, parser):
51 | from iauto.playground import runner
52 |
53 | playbook_dir = None
54 | if args.playbooks:
55 | playbook_dir = os.path.abspath(args.playbooks)
56 | runner.env.update(env)
57 | runner.run(app=args.playground_name, playbook_dir=playbook_dir)
58 |
59 |
60 | def run(args, parser):
61 | if not hasattr(args, "playbook") or not args.playbook:
62 | parser.print_help()
63 | sys.exit(-1)
64 |
65 | playbook = args.playbook
66 |
67 | p = os.path.abspath(playbook)
68 | if not os.path.exists(p) or not os.path.isfile(p):
69 | print(f"Invalid playbook file: {p}")
70 | sys.exit(-1)
71 |
72 | from iauto.actions import executor
73 | variables = {}
74 | if args.kwargs:
75 | variables.update(args.kwargs)
76 | variables.update(env)
77 |
78 | while True:
79 | try:
80 | result = executor.execute(playbook=p, variables=variables)
81 | if result is not None:
82 | print(result)
83 | break
84 | except Exception as e:
85 | if args.autorestart:
86 | print(f"Error: {e}")
87 | print("Restarting...")
88 | time.sleep(3)
89 | else:
90 | raise e
91 |
92 |
93 | def serve(args, parser):
94 | from iauto.api import start
95 | start(host=args.host, port=args.port)
96 |
97 |
98 | class ParseDict(argparse.Action):
99 | def __call__(self, parser, namespace, values, option_string=None):
100 | setattr(namespace, self.dest, dict())
101 | if values is not None:
102 | for value in values:
103 | key, value = value.split('=')
104 | getattr(namespace, self.dest)[key] = value
105 |
106 |
107 | def parse_args(argv):
108 | parser = argparse.ArgumentParser(
109 | prog='ia',
110 | )
111 |
112 | parser.add_argument('--env', default='.env', metavar="ENV_FILE", help="environment configuration file")
113 | parser.add_argument('--load', default=None, metavar="module", help="load modules, like: --load module")
114 | parser.add_argument('--list-actions', action="store_true", help="list actions")
115 | parser.add_argument('--spec', metavar="action", default=None, help="print action spec")
116 | parser.add_argument('--log-level', default=None, help="log level, default INFO")
117 | parser.add_argument('--traceback', action="store_true", help="print error traceback")
118 |
119 | subparser = parser.add_subparsers(title="commands", help="type command --help to print help message")
120 | subparser.default = "run"
121 |
122 | parser_run = subparser.add_parser('run', help="run a playbook")
123 | parser_run.add_argument(
124 | "playbook", nargs="?", default=None, help="playbook file path"
125 | )
126 | parser_run.add_argument('--kwargs', nargs="*", metavar="name=value",
127 | action=ParseDict, help="set playbook variables")
128 | parser_run.add_argument('--autorestart', action="store_true", help="Autorestart when error occurs")
129 | parser_run.set_defaults(func=lambda args: run(args=args, parser=parser_run))
130 |
131 | parser_playground = subparser.add_parser('playground', help="start playground")
132 | parser_playground.add_argument('playground_name', nargs="?", default=None,
133 | metavar="PLAYGROUND_NAME", help="playground name")
134 | parser_playground.add_argument('--playbooks', default=None, help="playbook dir for playground")
135 | parser_playground.set_defaults(func=lambda args: run_playground(args, parser=parser_playground))
136 |
137 | parser_serve = subparser.add_parser("serve", help="run iauto server")
138 | parser_serve.add_argument('--port', default=2000, type=int, help="port for server")
139 | parser_serve.add_argument('--host', default="0.0.0.0", help="host for server")
140 | parser_serve.set_defaults(func=lambda args: serve(args=args, parser=parser_serve))
141 |
142 | args = parser.parse_args(argv)
143 | return args, parser
144 |
145 |
146 | def main():
147 | args, parser = parse_args(sys.argv[1:])
148 |
149 | load_env(args=args)
150 |
151 | if args.log_level:
152 | os.environ["IA_LOG_LEVEL"] = args.log_level
153 |
154 | if args.load:
155 | try:
156 | modules = args.load.split(",")
157 | for m in modules:
158 | importlib.import_module(m.strip())
159 | except ImportError as e:
160 | print(f"Load moudle error: {e}")
161 | sys.exit(-1)
162 |
163 | if args.list_actions:
164 | list_actions()
165 | sys.exit(0)
166 |
167 | if args.spec:
168 | print_action_spec(args.spec)
169 | sys.exit(0)
170 |
171 | if hasattr(args, "func"):
172 | try:
173 | args.func(args)
174 | except Exception as e:
175 | if args.traceback:
176 | traceback.print_exc()
177 | else:
178 | print(f"Error: {e}")
179 | else:
180 | parser.print_help()
181 |
182 |
183 | if __name__ == "__main__":
184 | try:
185 | main()
186 | except KeyboardInterrupt:
187 | print("Bye.\n")
188 |
--------------------------------------------------------------------------------
/iauto/_asyncio.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 |
4 | def ensure_event_loop():
5 | try:
6 | loop = asyncio.get_running_loop()
7 | except RuntimeError:
8 | loop = asyncio.new_event_loop()
9 | asyncio.set_event_loop(loop=loop)
10 | return loop
11 |
--------------------------------------------------------------------------------
/iauto/actions/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | This module defines the core components of the action system.
3 |
4 | It provides the necessary classes and functions to create, manage, and execute actions
5 | within the context of the playbook execution environment. It includes
6 | definitions for actions, action arguments, action specifications, executors, and playbooks.
7 |
8 | Classes:
9 | * Action: Represents a single action to be executed.
10 | * ActionArg: Defines an argument that can be passed to an action.
11 | * ActionSpec: Contains the specification details of an action.
12 | * Executor: The base class for action executors.
13 | * Playbook: Represents a sequence of actions to be executed as a unit.
14 | * PlaybookExecutor: Responsible for executing the actions defined in a playbook.
15 | * PlaybookRunAction: A special action that represents the execution of a playbook.
16 |
17 | Functions:
18 | * create_action: A factory function to create instances of actions.
19 | * loader: A function to load actions and their specifications.
20 | * register_action: A function to register new actions into the system.
21 |
22 | The module also handles the registration and discovery of built-in actions.
23 | """
24 |
25 | from . import buildin, contrib
26 | from .action import Action, ActionArg, ActionSpec, create
27 | from .executor import (Executor, PlaybookExecutor, execute, execute_in_process,
28 | execute_in_thread)
29 | from .loader import loader, register
30 | from .playbook import Playbook, load
31 |
--------------------------------------------------------------------------------
/iauto/actions/action.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Any, Dict, List, Optional
3 |
4 | from pydantic import BaseModel
5 |
6 |
7 | class ActionArg(BaseModel):
8 | """
9 | A class representing an argument for an action.
10 |
11 | Attributes:
12 | name (str): The name of the argument.
13 | type (str): The type of the argument, default is "string".
14 | description (str): A description of what the argument is for.
15 | required (bool): Whether the argument is required or optional, default is False.
16 | """
17 | name: str
18 | type: str = "string"
19 | description: str
20 | required: bool = False
21 |
22 |
23 | class ActionSpec(BaseModel):
24 | """
25 | A class representing the specification of an action.
26 |
27 | Attributes:
28 | name (str): The name of the action.
29 | description (str): A brief description of what the action does.
30 | arguments (Optional[List[ActionArg]]): A list of arguments that the action accepts.
31 | """
32 | name: str
33 | description: str
34 | arguments: Optional[List[ActionArg]] = None
35 |
36 | @staticmethod
37 | def from_dict(d: Dict = {}) -> 'ActionSpec':
38 | """
39 | Create an ActionSpec instance from a dictionary representation.
40 |
41 | Args:
42 | d (Dict, optional): The dictionary containing action specification data.
43 |
44 | Returns:
45 | ActionSpec: An instance of ActionSpec created from the provided dictionary.
46 |
47 | Raises:
48 | ValueError: If the dictionary contains invalid data for creating an ActionSpec.
49 | """
50 | try:
51 | func = ActionSpec(
52 | name=d.get("name") or "UNNAMED",
53 | description=d.get("description") or ""
54 | )
55 |
56 | args = d.get("arguments")
57 | if args:
58 | func.arguments = []
59 | for arg in args:
60 | func.arguments.append(ActionArg(**arg))
61 | except Exception as e:
62 | raise ValueError(f"Invalid ActionDef: {e}")
63 | return func
64 |
65 | @staticmethod
66 | def from_oai_dict(d: Dict = {}) -> 'ActionSpec':
67 | """
68 | Create an ActionSpec instance from a dictionary following the OpenAPI Specification.
69 |
70 | Args:
71 | d (Dict, optional): The dictionary containing OpenAPI Specification data.
72 |
73 | Returns:
74 | ActionSpec: An instance of ActionSpec created from the provided OpenAPI dictionary.
75 |
76 | Raises:
77 | ValueError: If the dictionary does not conform to the expected OpenAPI Specification format.
78 | """
79 | try:
80 | if d["type"] != "function":
81 | raise ValueError(f"invalid function type: {d.get('type')}")
82 | func = ActionSpec(
83 | name=d["function"]["name"],
84 | description=d["function"].get("description")
85 | )
86 |
87 | params = d.get("parameters")
88 | if params:
89 | func.arguments = []
90 | for param in params["properties"].keys():
91 | func.arguments.append(ActionArg(
92 | name=param,
93 | type=params["properties"][param]["type"],
94 | description=params["properties"][param]["description"],
95 | required=params["required"].get(params) is not None
96 | ))
97 | except Exception as e:
98 | raise ValueError(f"Invalid ActionDef: {e}")
99 | return func
100 |
101 | def oai_spec(self) -> Dict:
102 | """
103 | Generate an OpenAPI Specification dictionary for this action.
104 |
105 | Returns:
106 | Dict: A dictionary representing the action in OpenAPI Specification format.
107 | """
108 | args = {}
109 | required = []
110 |
111 | if self.arguments:
112 | for arg in self.arguments:
113 | args[arg.name] = {
114 | "type": arg.type,
115 | "description": arg.description
116 | }
117 | if arg.required:
118 | required.append(arg.name)
119 |
120 | return {
121 | "type": "function",
122 | "function": {
123 | "name": self.name.replace(".", "_"),
124 | "description": self.description,
125 | "parameters": {
126 | "type": "object",
127 | "properties": args,
128 | "required": required
129 | }
130 | }
131 | }
132 |
133 |
134 | class Action(ABC):
135 | """
136 | Abstract base class for an action.
137 |
138 | An action defines a single operation that can be performed. Actions are typically
139 | executed by an Executor within the context of a Playbook.
140 |
141 | Attributes:
142 | spec (ActionSpec): The specification of the action.
143 | """
144 |
145 | def __init__(self) -> None:
146 | """
147 | Initializes the Action.
148 | """
149 | super().__init__()
150 | self.spec = ActionSpec(name="UNNAMED", description="")
151 |
152 | @abstractmethod
153 | def perform(
154 | self,
155 | *args,
156 | **kwargs
157 | ) -> Any:
158 | """
159 | Execute the action with the given arguments.
160 |
161 | Args:
162 | *args: Positional arguments for the action.
163 | **kwargs: Keyword arguments for the action.
164 |
165 | Returns:
166 | Any: The result of the action execution.
167 |
168 | Raises:
169 | NotImplementedError: If the method is not implemented by the subclass.
170 | """
171 | raise NotImplementedError()
172 |
173 | def __call__(self, *args: Any, **kwargs: Any) -> Any:
174 | """
175 | Allows the Action to be called as a function.
176 |
177 | Args:
178 | *args: Positional arguments for the action.
179 | **kwargs: Keyword arguments for the action.
180 |
181 | Returns:
182 | Any: The result of the action execution.
183 | """
184 | return self.perform(*args, **kwargs)
185 |
186 | def copy(self):
187 | """
188 | Create a copy of the Action instance.
189 |
190 | Returns:
191 | Action: A new instance of the Action with the same specification.
192 | """
193 | obj = type(self)()
194 | obj.spec = self.spec
195 | return obj
196 |
197 |
198 | class FunctionAction(Action):
199 | """
200 | A concrete implementation of Action that wraps a Python callable.
201 |
202 | Attributes:
203 | _func (Callable): The Python callable to wrap.
204 | spec (ActionSpec): The specification of the action.
205 | """
206 |
207 | def __init__(self, func, spec: Optional[Dict] = None) -> None:
208 | """
209 | Initializes the FunctionAction with a Python callable and an optional specification.
210 |
211 | Args:
212 | func (Callable): The Python callable that this action will execute.
213 | spec (Optional[Dict]): A dictionary representing the action's specification.
214 | """
215 | super().__init__()
216 | self._func = func
217 | if spec:
218 | self.spec = ActionSpec.from_dict(spec)
219 |
220 | def perform(
221 | self,
222 | *args,
223 | **kwargs
224 | ) -> Any:
225 | """
226 | Execute the wrapped Python callable with the given arguments.
227 |
228 | Args:
229 | *args: Positional arguments for the callable.
230 | **kwargs: Keyword arguments for the callable.
231 |
232 | Returns:
233 | Any: The result of the callable execution.
234 | """
235 | return self._func(*args, **kwargs)
236 |
237 |
238 | def create(func, spec: Dict) -> Action:
239 | """
240 | Factory function to create a FunctionAction.
241 |
242 | Args:
243 | func (Callable): The Python callable that the action will execute.
244 | spec (Dict): A dictionary representing the action's specification.
245 |
246 | Returns:
247 | Action: A FunctionAction instance with the given callable and specification.
248 | """
249 | return FunctionAction(func=func, spec=spec)
250 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/__init__.py:
--------------------------------------------------------------------------------
1 | from ..loader import loader
2 | from . import (collections, db, file, flow, hash, json, log, math, playbook,
3 | queue, shell, time)
4 |
5 | actions = {}
6 |
7 | actions["playbook"] = playbook.PlaybookAction()
8 | actions["setvar"] = playbook.SetVarAction()
9 | actions["log"] = log.LogAction()
10 | actions["echo"] = log.EchoAction()
11 |
12 | actions["repeat"] = flow.RepeatAction()
13 | actions["when"] = flow.WhenAction()
14 | actions["each"] = flow.ForEachAction()
15 |
16 | actions["list.append"] = collections.ListAppendAction()
17 | actions["dict.set"] = collections.DictSetAction()
18 | actions["dict.get"] = collections.DictGetAction()
19 |
20 | actions["time.wait"] = time.WaitAction()
21 | actions["time.now"] = time.GetNow()
22 |
23 | actions["math.mod"] = math.ModAction()
24 |
25 | actions["shell.cmd"] = shell.ShellCommandAction()
26 | actions["shell.print"] = shell.PrintAction()
27 | actions["shell.prompt"] = shell.PromptAction()
28 | actions["file.write"] = file.FileWriteAction()
29 |
30 | # for name, action in actions.items():
31 | # if action.spec.name is None or action.spec.name == 'UNNAMED':
32 | # action.spec.name = name
33 |
34 | loader.register(actions)
35 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/collections.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Optional
2 |
3 | from ..action import Action, ActionSpec
4 | from ..executor import Executor
5 | from ..loader import register
6 | from ..playbook import Playbook
7 |
8 |
9 | class ListAppendAction(Action):
10 | def __init__(self) -> None:
11 | super().__init__()
12 | self.spec = ActionSpec.from_dict({
13 | "name": "list.append",
14 | "description": "Add an element to the end of the list."
15 | })
16 |
17 | def perform(
18 | self,
19 | *args,
20 | executor: Optional[Executor] = None,
21 | playbook: Optional[Playbook] = None,
22 | **kwargs
23 | ) -> Any:
24 | if executor is None:
25 | raise ValueError("executor is None")
26 | if len(args) != 2:
27 | raise ValueError("list.append needs 2 args, like: [$list, $value]")
28 |
29 | ls = args[0]
30 | if isinstance(ls, str) and ls.startswith("$"):
31 | var = ls
32 | ls = []
33 | executor.set_variable(var, ls)
34 |
35 | if not isinstance(ls, list):
36 | raise ValueError("args[0] is not a list")
37 |
38 | ls.append(args[1])
39 |
40 |
41 | class DictSetAction(Action):
42 | def __init__(self) -> None:
43 | super().__init__()
44 | self.spec = ActionSpec.from_dict({
45 | "name": "dict.set",
46 | "description": "Set a key-value pair in a dictionary.",
47 |
48 | "arguments": [
49 | {
50 | "name": "d",
51 | "type": "dict",
52 | "description": "The dictionary in which to set the key-value pair.",
53 | "required": True
54 | },
55 | {
56 | "name": "key",
57 | "type": "str",
58 | "description": "The key for the value to set in the dictionary.",
59 | "required": True
60 | },
61 | {
62 | "name": "value",
63 | "type": "any",
64 | "description": "The value to set for the given key in the dictionary.",
65 | "required": True
66 | }
67 | ]
68 | })
69 |
70 | def perform(
71 | self,
72 | d: dict,
73 | key: str,
74 | value: str,
75 | executor: Optional[Executor] = None,
76 | playbook: Optional[Playbook] = None,
77 | **kwargs
78 | ) -> Any:
79 | if executor is None:
80 | raise ValueError("executor is None")
81 |
82 | if isinstance(d, str) and d.startswith("$"):
83 | var = d
84 | d = {}
85 | executor.set_variable(var, d)
86 |
87 | if not isinstance(d, dict):
88 | raise ValueError("args[0] is not a dict")
89 |
90 | d[key] = value
91 |
92 |
93 | class DictGetAction(Action):
94 | def __init__(self) -> None:
95 | super().__init__()
96 | self.spec = ActionSpec.from_dict({
97 | "name": "dict.get",
98 | "description": "Get a value by key from a dictionary.",
99 | "arguments": [
100 | {
101 | "name": "d",
102 | "type": "dict",
103 | "description": "The dictionary from which to get the value.",
104 | "required": True
105 | },
106 | {
107 | "name": "key",
108 | "type": "str",
109 | "description": "The key for the value to retrieve from the dictionary.",
110 | "required": True
111 | }
112 | ]
113 | })
114 |
115 | def perform(
116 | self,
117 | d: dict,
118 | key: str,
119 | executor: Optional[Executor] = None,
120 | playbook: Optional[Playbook] = None,
121 | **kwargs
122 | ) -> Any:
123 | if d is None or not isinstance(d, dict):
124 | raise ValueError("invalid dict")
125 | return d.get(key)
126 |
127 |
128 | @register(name="list.clear", spec={
129 | "description": "Clear the list."
130 | })
131 | def list_clear(ls, **kwargs):
132 | if not isinstance(ls, list):
133 | raise ValueError("invalid list")
134 | ls.clear()
135 |
136 |
137 | @register(name="dict.clear", spec={
138 | "description": "Clear the dict."
139 | })
140 | def dict_clear(d, **kwargs):
141 | if not isinstance(d, dict):
142 | raise ValueError("invalid dict")
143 | d.clear()
144 |
145 |
146 | @register(name="len", spec={
147 | "description": "Get the length."
148 | })
149 | def length(c, **kwargs):
150 | if not isinstance(c, list) and not isinstance(c, dict):
151 | raise ValueError("invalid args")
152 | return len(c)
153 |
154 |
155 | @register(name="list.get", spec={
156 | "description": "Get the list from list."
157 | })
158 | def list_get(ls, idx, **kwargs):
159 | if not isinstance(ls, list):
160 | raise ValueError("invalid args")
161 | return ls[idx]
162 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/db.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, Optional
2 |
3 | import pandas as pd
4 | import sqlalchemy
5 |
6 | from ..loader import register
7 |
8 |
9 | @register(name="db.create_engine", spec={
10 | "description": "Create a new SQLAlchemy engine instance.",
11 | "arguments": [
12 | {
13 | "name": "url",
14 | "description": "Database connection URL in the SQLAlchemy format.",
15 | "type": "string",
16 | "required": True
17 | }
18 | ]
19 | })
20 | def create_engine(*args, url: str, **kwargs) -> sqlalchemy.Engine:
21 | return sqlalchemy.create_engine(url)
22 |
23 |
24 | @register(name="db.read", spec={
25 | "description": "Read data from the database into a DataFrame or a list of dictionaries.",
26 | "arguments": [
27 | {
28 | "name": "sql",
29 | "description": "SQL query string to be executed.",
30 | "type": "string",
31 | "required": True
32 | },
33 | {
34 | "name": "return_type",
35 | "description": "The format in which to return the data. Options are 'dataframe' or 'dict'. Defaults to 'dataframe'.", # noqa: E501
36 | "type": "string",
37 | "required": False
38 | }
39 | ]
40 | })
41 | def read(
42 | *args,
43 | engine: sqlalchemy.Engine,
44 | sql: str,
45 | return_type: Optional[str] = None,
46 | **kwargs
47 | ) -> Any:
48 | df = pd.read_sql(sql=sql, con=engine)
49 |
50 | return_type = (return_type or "").lower()
51 | if return_type == "dataframe":
52 | return df
53 | else:
54 | return df.to_dict(orient='records')
55 |
56 |
57 | @register(name="db.exec", spec={
58 | "description": "Execute an SQL statement.",
59 | "arguments": [
60 | {
61 | "name": "sql",
62 | "description": "SQL statement to be executed.",
63 | "type": "string",
64 | "required": True
65 | },
66 | {
67 | "name": "values",
68 | "description": "Optional dictionary of parameters to pass to the SQL statement.",
69 | "type": "dict",
70 | "required": False
71 | }
72 | ]
73 | })
74 | def exec(
75 | *args,
76 | engine: sqlalchemy.Engine,
77 | sql: str,
78 | values: Optional[Dict] = None,
79 | **kwargs
80 | ) -> Any:
81 | text = sqlalchemy.text(sql)
82 |
83 | with engine.connect() as conn:
84 | if values is None:
85 | conn.execute(text)
86 | else:
87 | conn.execute(text, values)
88 | conn.commit()
89 |
90 |
91 | @register(name="db.select", spec={
92 | "description": "Execute a select statement and return the results.",
93 | "arguments": [
94 | {
95 | "name": "sql",
96 | "description": "SQL select query to be executed.",
97 | "type": "string",
98 | "required": True
99 | },
100 | {
101 | "name": "values",
102 | "description": "Optional dictionary of parameters to pass to the SQL select query.",
103 | "type": "dict",
104 | "required": False
105 | }
106 | ]
107 | })
108 | def select(
109 | *args,
110 | engine: sqlalchemy.Engine,
111 | sql: str,
112 | values: Optional[Dict] = None,
113 | **kwargs
114 | ) -> Any:
115 | text = sqlalchemy.text(sql)
116 |
117 | with engine.connect() as conn:
118 | if values is None:
119 | cursor = conn.execute(text)
120 | else:
121 | cursor = conn.execute(text, values)
122 |
123 | columns = list(cursor.keys())
124 | rows = cursor.all()
125 |
126 | if len(rows) == 0:
127 | return None
128 | elif len(rows) == 1:
129 | return dict(zip(columns, rows[0]))
130 | else:
131 | return [dict(zip(columns, r)) for r in rows]
132 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/file.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from typing import Any, Dict, List, Optional, Union
4 |
5 | from ..action import Action, ActionSpec
6 | from ..loader import register
7 |
8 |
9 | class FileWriteAction(Action):
10 | def __init__(self) -> None:
11 | super().__init__()
12 | self.spec = ActionSpec.from_dict({
13 | "name": "file.write",
14 | "description": "Writes the specified content to a file at the given path.",
15 | "arguments": [
16 | {
17 | "name": "file",
18 | "description": "The path to the file where content will be written",
19 | "type": "string",
20 | "required": True
21 | },
22 | {
23 | "name": "mode",
24 | "description": "The file writing mode ('w' for writing, 'a' for appending, etc.). Defaults to 'w' if not specified.", # noqa: E501
25 | "type": "string",
26 | "required": False
27 | },
28 | {
29 | "name": "cotent",
30 | "description": "The content to be written to the file",
31 | "type": "string",
32 | "required": True
33 | }
34 | ]
35 | })
36 |
37 | def perform(self, file: str, content: Union[str, Dict, List], model: str = "w", **kwargs) -> Any:
38 | with open(file=file, mode=model) as f:
39 | if isinstance(content, List) or isinstance(content, Dict):
40 | content = json.dumps(content, ensure_ascii=False)
41 |
42 | f.write(content)
43 |
44 |
45 | @register(name="file.exists", spec={
46 | "description": "Determines if a file exists at the given path.",
47 | "arguments": [
48 | {
49 | "name": "file",
50 | "description": "The path to the file to be checked for existence.",
51 | "type": "string",
52 | "required": True
53 | }
54 | ]
55 | })
56 | def file_exists(file: Optional[str] = None, **kwargs) -> bool:
57 | if file is None:
58 | raise ValueError(f"invalid args: {file}")
59 | return os.path.exists(file) and os.path.isfile(file)
60 |
61 |
62 | @register(name="file.rename", spec={
63 | "description": "Rename file.",
64 | "arguments": [
65 |
66 | ]
67 | })
68 | def file_rename(src: str = None, dst: str = None, **kwargs) -> bool:
69 | return os.rename(src, dst)
70 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/flow.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, Optional
2 |
3 | from ..action import Action, ActionSpec
4 | from ..executor import Executor
5 | from ..playbook import Playbook
6 |
7 | _operators = set(["not", "all", "any", "lt", 'le', 'eq', 'ne', 'ge', 'gt', 'in'])
8 |
9 |
10 | def is_operator(d):
11 | if isinstance(d, Dict) and any([x in _operators for x in d.keys()]):
12 | return True
13 | else:
14 | return False
15 |
16 |
17 | def eval_operator(operator, vars={}) -> bool:
18 | """
19 | not: not true
20 | all: all true
21 | any: any is true
22 | lt: less than
23 | le: less than or equal to
24 | eq: equal to
25 | ne: not equal to
26 | ge: greater than or equal to
27 | gt: greater than
28 | in: contains
29 | """
30 | if not isinstance(operator, Dict) or len(operator) != 1:
31 | raise ValueError(f"Invalid operator: {operator}")
32 |
33 | o = list(operator.keys())[0]
34 |
35 | if o not in _operators:
36 | raise ValueError(f"Invalid operator: {o}")
37 |
38 | values = operator.get(o) or []
39 | if not isinstance(values, list):
40 | values = [values]
41 | values = values[::]
42 |
43 | if not (o == "not" or o == "all" or o == "any") and len(values) != 2:
44 | raise ValueError(f"operator reqiures 2 args: {operator}")
45 |
46 | for i in range(len(values)):
47 | v = values[i]
48 | if v is not None and isinstance(v, str) and v.startswith("$"):
49 | values[i] = vars.get(v)
50 |
51 | if o == "not" or o == "all" or o == "any":
52 | results = []
53 | for v in values:
54 | if is_operator(v):
55 | r = eval_operator(v)
56 | else:
57 | r = bool(v)
58 | results.append(r)
59 | if o == "not":
60 | if len(results) == 0 or len(results) > 1:
61 | raise ValueError(f"operator not reqiure 1 args: {values}")
62 | return not results[0]
63 | elif o == "all":
64 | return all(results)
65 | elif o == "any":
66 | return any(results)
67 | else:
68 | raise ValueError(f"Bug: {operator}")
69 | elif o == "lt":
70 | return values[0] < values[1]
71 | elif o == "le":
72 | return values[0] <= values[1]
73 | elif o == "eq":
74 | return values[0] == values[1]
75 | elif o == "ne":
76 | return values[0] != values[1]
77 | elif o == "ge":
78 | return values[0] >= values[1]
79 | elif o == "gt":
80 | return values[0] > values[1]
81 | elif o == "in":
82 | return values[0] in values[1]
83 | else:
84 | raise ValueError(f"Bug: {operator}")
85 |
86 |
87 | def eval_operators(operators, vars={}) -> bool:
88 | if operators is None:
89 | return False
90 | elif isinstance(operators, dict):
91 | return eval_operator(operator=operators, vars=vars)
92 | elif isinstance(operators, list):
93 | r = [eval_operators(x) for x in operators]
94 | return all(r)
95 | else:
96 | return bool(operators)
97 |
98 |
99 | def eval_args(args, kwargs, vars={}):
100 | if len(args) > 0:
101 | r1 = eval_operators(args, vars=vars)
102 | else:
103 | r1 = True
104 |
105 | if len(kwargs) > 0:
106 | r2 = eval_operators(kwargs, vars=vars)
107 | else:
108 | r2 = True
109 | return r1 and r2
110 |
111 |
112 | class RepeatAction(Action):
113 | def __init__(self) -> None:
114 | super().__init__()
115 | self.spec = ActionSpec.from_dict({
116 | "name": "repeat",
117 | "description": "Repeats the execution of actions based on the provided conditions.",
118 | })
119 |
120 | def perform(
121 | self,
122 | *args,
123 | executor: Optional[Executor] = None,
124 | playbook: Optional[Playbook] = None,
125 | **kwargs
126 | ) -> None:
127 | if executor is None or playbook is None:
128 | raise ValueError("executor and playbook can't be None")
129 | actions = playbook.actions or []
130 |
131 | result = None
132 |
133 | if len(kwargs) == 0 and len(args) == 1 and isinstance(args[0], int):
134 | for _ in range(args[0]):
135 | for action in actions:
136 | result = executor.perform(playbook=action)
137 | else:
138 | args, kwargs = executor.eval_args(args=playbook.args)
139 | while eval_args(args, kwargs, vars=executor.variables):
140 |
141 | for action in actions:
142 | result = executor.perform(playbook=action)
143 |
144 | args, kwargs = executor.eval_args(args=playbook.args)
145 | return result
146 |
147 |
148 | class WhenAction(Action):
149 | def __init__(self) -> None:
150 | super().__init__()
151 | self.spec = ActionSpec.from_dict({
152 | "name": "when",
153 | "description": "Executes the contained actions if the specified condition evaluates to true.",
154 | })
155 |
156 | def perform(
157 | self,
158 | *args,
159 | executor: Optional[Executor] = None,
160 | playbook: Optional[Playbook] = None,
161 | **kwargs
162 | ) -> None:
163 | if executor is None or playbook is None:
164 | raise ValueError("executor and playbook can't be None")
165 |
166 | result = None
167 | if eval_args(args, kwargs, vars=executor.variables):
168 | actions = playbook.actions or []
169 | for action in actions:
170 | result = executor.perform(playbook=action)
171 | return result
172 |
173 |
174 | class ForEachAction(Action):
175 | def __init__(self) -> None:
176 | super().__init__()
177 | self.spec = ActionSpec.from_dict({
178 | "name": "each",
179 | "description": "Executes the actions for each item in the provided iterable.",
180 | })
181 |
182 | def perform(
183 | self,
184 | *args,
185 | executor: Optional[Executor] = None,
186 | playbook: Optional[Playbook] = None,
187 | **kwargs
188 | ) -> Any:
189 | if executor is None or playbook is None:
190 | raise ValueError("executor and playbook can't be None")
191 |
192 | actions = playbook.actions or []
193 | if len(actions) == 0:
194 | return
195 |
196 | data = []
197 | if len(args) > 0:
198 | if len(args) == 1:
199 | if isinstance(args[0], list):
200 | data = args[0]
201 | else:
202 | data = args
203 | else:
204 | data = args
205 | elif len(kwargs) > 0:
206 | data = [kwargs]
207 |
208 | result = None
209 | for i in data:
210 | executor.set_variable("$_", i)
211 | for action in actions:
212 | result = executor.perform(playbook=action)
213 | return result
214 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/hash.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import uuid as _uuid
3 |
4 | from ..loader import register
5 |
6 | namespace = _uuid.uuid1()
7 |
8 |
9 | @register(name="uuid", spec={
10 | "description": "Generate a version 5 UUID using SHA1 hash.",
11 | })
12 | def uuid(*args, **kwargs):
13 | return _uuid.uuid5(namespace, _uuid.uuid1().hex).hex
14 |
15 |
16 | @register(name="sha1", spec={
17 | "description": "Generate a SHA1 hash of the given input string.",
18 | "arguments": [
19 | {
20 | "name": "s",
21 | "type": "string",
22 | "description": "The input string to hash with SHA1.",
23 | "required": True
24 | }
25 | ],
26 | })
27 | def sha1(s: str, *args, **kwargs):
28 | m = hashlib.sha1()
29 | m.update(s.encode())
30 | return m.digest().hex()
31 |
32 |
33 | @register(name="sha256", spec={
34 | "description": "Generate a SHA256 hash of the given input string.",
35 | "arguments": [
36 | {
37 | "name": "s",
38 | "type": "string",
39 | "description": "The input string to hash with SHA256.",
40 | "required": True
41 | }
42 | ],
43 | })
44 | def sha256(s: str, *args, **kwargs):
45 | m = hashlib.sha256()
46 | m.update(s.encode())
47 | return m.digest().hex()
48 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/json.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import Any
3 |
4 | from ..loader import register
5 |
6 |
7 | @register(name="json.loads", spec={
8 | "description": "Convert a JSON-formatted string into a Python object.",
9 | "arguments": [
10 | {
11 | "name": "s",
12 | "description": "The JSON-encoded string to be deserialized.",
13 | "type": "string",
14 | "required": True
15 | }
16 | ]
17 | })
18 | def loads(s, **kwargs) -> Any:
19 | return json.loads(s)
20 |
21 |
22 | @register(name="json.load", spec={
23 | "description": "Read a JSON file and convert its contents to a Python object.",
24 | "arguments": [
25 | {
26 | "name": "file",
27 | "description": "The file object representing the JSON file to deserialize.",
28 | "type": "string",
29 | "required": True
30 | }
31 | ]
32 | })
33 | def load(file: str, **kwargs):
34 | with open(file, "r", encoding="utf-8") as f:
35 | return json.loads(f.read())
36 |
37 |
38 | @register(name="json.dumps", spec={
39 | "description": "Convert a Python object into a JSON-formatted string.",
40 | "arguments": [
41 | {
42 | "name": "obj",
43 | "description": "The Python object to be serialized as a JSON-encoded string.",
44 | "type": "object",
45 | "required": True
46 | }
47 | ]
48 | })
49 | def dumps(obj, **kwargs) -> Any:
50 | return json.dumps(obj=obj, ensure_ascii=False)
51 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/log.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Optional
2 |
3 | from ...log import get_logger
4 | from ..action import Action, ActionSpec
5 | from ..executor import Executor
6 | from ..playbook import Playbook
7 |
8 |
9 | class LogAction(Action):
10 | def __init__(self) -> None:
11 | super().__init__()
12 | self._log = get_logger("Log")
13 |
14 | self.spec = ActionSpec.from_dict({
15 | "name": "log",
16 | "description": "Logs a message to the terminal.",
17 | })
18 |
19 | def perform(
20 | self,
21 | *args,
22 | executor: Optional[Executor] = None,
23 | playbook: Optional[Playbook] = None,
24 | **kwargs
25 | ) -> None:
26 | message = ""
27 |
28 | if len(args) > 0:
29 | message += ', '.join([str(x) for x in args])
30 |
31 | if len(kwargs) > 0:
32 | message += str(kwargs)
33 |
34 | self._log.info(message)
35 |
36 |
37 | class EchoAction(Action):
38 | def __init__(self) -> None:
39 | super().__init__()
40 | self.spec = ActionSpec.from_dict({
41 | "name": "echo",
42 | "description": "Echoes the input arguments back to the caller.",
43 | })
44 |
45 | def perform(
46 | self,
47 | *args,
48 | executor: Optional[Executor] = None,
49 | playbook: Optional[Playbook] = None,
50 | **kwargs
51 | ) -> Any:
52 | if len(args) > 0:
53 | if len(args) == 1:
54 | return args[0]
55 | else:
56 | return list(args)
57 | elif len(kwargs) > 0:
58 | return kwargs
59 | else:
60 | return None
61 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/math.py:
--------------------------------------------------------------------------------
1 | from ..action import Action, ActionSpec
2 |
3 |
4 | class ModAction(Action):
5 | def __init__(self) -> None:
6 | super().__init__()
7 |
8 | self.spec = ActionSpec.from_dict({
9 | "name": "math.mod",
10 | "description": "Calculates the remainder of the division of two numbers.",
11 | "arguments": [
12 | {
13 | "name": "l",
14 | "type": "int",
15 | "description": "The dividend in the division operation.",
16 | "required": True
17 | },
18 | {
19 | "name": "r",
20 | "type": "int",
21 | "description": "The divisor in the division operation.",
22 | "required": True
23 | }
24 | ],
25 | })
26 |
27 | def perform(
28 | self,
29 | l: int,
30 | r: int,
31 | **kwargs
32 | ) -> int:
33 | return l % r
34 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/playbook.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Any, Optional
3 |
4 | from ..action import Action, ActionSpec
5 | from ..executor import Executor
6 | from ..playbook import Playbook
7 | from ..playbook import load as playbook_load
8 |
9 |
10 | class PlaybookAction(Action):
11 | def __init__(self) -> None:
12 | """
13 | Initializes a new instance of the PlaybookAction class.
14 |
15 | This action serves as the top-level Action used to execute other Actions by
16 | loading and performing them using the provided executor and playbook.
17 | """
18 | super().__init__()
19 |
20 | self.spec = ActionSpec.from_dict({
21 | "name": "playbook",
22 | "description": "Executes a series of actions defined within a playbook."
23 | })
24 |
25 | def perform(
26 | self,
27 | *args,
28 | execute: Optional[bool] = True,
29 | executor: Executor,
30 | playbook: Playbook,
31 | **kwargs
32 | ) -> Any:
33 | """
34 | Performs the action of executing a series of other actions defined within a playbook.
35 |
36 | This method takes a variable number of arguments and keyword arguments. It requires
37 | an executor and a playbook to be provided to perform the actions. The method sets
38 | variables in the executor from the provided keyword arguments and then loads and
39 | executes actions either from the provided playbook or from playbook paths specified
40 | in the args.
41 |
42 | Args:
43 | *args: Variable length argument list containing playbook paths as strings.
44 | exuecute (Optional[bool]) : Return actions if execute is False
45 | executor (Optional[Executor]): The executor to perform the actions. Must not be None.
46 | playbook (Optional[Playbook]): The playbook containing actions to be executed. Must not be None.
47 | **kwargs: Arbitrary keyword arguments which are set as variables in the executor.
48 |
49 | Raises:
50 | ValueError: If either executor or playbook is None, or if any of the args are not
51 | valid playbook paths as strings.
52 |
53 | Returns:
54 | Any: The result of the last action performed by the executor.
55 | """
56 |
57 | if executor is None or playbook is None:
58 | raise ValueError("Executor and playbook are required.")
59 |
60 | for k, v in kwargs.items():
61 | executor.set_variable(f"${k}", v)
62 |
63 | actions = []
64 |
65 | if len(args) > 0:
66 | fpath = playbook.metadata.get("__root__")
67 | # fname = executor.variables.get("__file__")
68 | # if fname is not None:
69 | # fpath = os.path.dirname(fname)
70 | for p in args:
71 | if not isinstance(p, str):
72 | raise ValueError(f"Invalid playbook path: {p}")
73 | if not os.path.isabs(p) and fpath is not None:
74 | p = os.path.join(fpath, p)
75 |
76 | pb = playbook_load(p)
77 | actions.append(pb)
78 |
79 | actions.extend(playbook.actions or [])
80 |
81 | if execute:
82 | result = None
83 | for action in actions:
84 | result = executor.perform(playbook=action)
85 |
86 | return result
87 | else:
88 | pb_run_actions = []
89 | for action in actions:
90 | pb_run = PlaybookRunAction(executor=executor, playbook=action)
91 | pb_run_actions.append(pb_run)
92 |
93 | return pb_run_actions
94 |
95 |
96 | class PlaybookRunAction(Action):
97 | def __init__(self, executor: Executor, playbook: Playbook) -> None:
98 | """
99 | Initialize a PlaybookRunAction with a specific executor and playbook.
100 |
101 | Args:
102 | executor (Executor): The executor that will perform the actions in the playbook.
103 | playbook (Playbook): The playbook containing the actions to be executed.
104 | """
105 | super().__init__()
106 | self._executor = executor
107 | self._playbook = playbook
108 | self.spec = playbook.spec
109 |
110 | def perform(
111 | self,
112 | *args,
113 | executor: Optional[Executor] = None,
114 | playbook: Optional[Playbook] = None,
115 | **kwargs
116 | ) -> Any:
117 | """
118 | Perform the actions defined in the playbook using the executor.
119 |
120 | This method sets any provided keyword arguments as variables in the executor and then
121 | performs the actions in the playbook.
122 |
123 | Args:
124 | *args: Variable length argument list, unused in this method.
125 | executor (Optional[Executor]): Unused in this method, as the executor is set during
126 | initialization.
127 | playbook (Optional[Playbook]): Unused in this method, as the playbook is set during
128 | initialization.
129 | **kwargs: Arbitrary keyword arguments which are set as variables in the executor.
130 |
131 | Returns:
132 | Any: The result of the last action performed by the executor.
133 | """
134 | for k, v in kwargs.items():
135 | self._executor.set_variable(f"${k}", v)
136 | return self._executor.perform(playbook=self._playbook)
137 |
138 |
139 | class SetVarAction(Action):
140 | def __init__(self) -> None:
141 | super().__init__()
142 | self.spec = ActionSpec.from_dict({
143 | "name": "setvar",
144 | "description": "Sets a variable to a specified value. Usage: setvar: [variable_name, value]",
145 | "arguments": [
146 | {
147 | "name": "name",
148 | "type": "string",
149 | "description": "The name of the variable to set.",
150 | "required": True
151 | },
152 | {
153 | "name": "value",
154 | "type": "any",
155 | "description": "The value to assign to the variable.",
156 | "required": True
157 | }
158 | ]
159 | })
160 |
161 | def perform(
162 | self,
163 | name: str,
164 | value: Any,
165 | executor: Optional[Executor] = None,
166 | playbook: Optional[Playbook] = None,
167 | **kwargs
168 | ) -> Any:
169 | if executor is None:
170 | raise ValueError("executor is None")
171 |
172 | executor.set_variable(f"${name}", value)
173 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/queue.py:
--------------------------------------------------------------------------------
1 | import queue
2 |
3 | from ..loader import register
4 |
5 |
6 | @register(name="queue.create", spec={
7 | "description": "Create a queue"
8 | })
9 | def create_queue(**kwargs):
10 | return queue.Queue()
11 |
12 |
13 | @register(name="queue.put", spec={
14 | "description": "Put item into the queue.",
15 | "arguments": [
16 | {
17 | "name": "item",
18 | "type": "object",
19 | "description": "The object will be put into the queue.",
20 | "required": True
21 | }
22 | ]
23 | })
24 | def put(q: queue.Queue, item, **kwargs):
25 | return q.put(item, block=False)
26 |
27 |
28 | @register(name="queue.get", spec={
29 | "description": "Remove and return an item from the queue.",
30 | "arguments": [
31 | {
32 | "name": "block",
33 | "type": "bool",
34 | "description": "If true, block until an item is available, otherwise return an item or None",
35 | "required": False
36 | }
37 | ]
38 | })
39 | def get(q: queue.Queue, block: bool = True, **kwargs):
40 | try:
41 | return q.get(block=block)
42 | except queue.Empty:
43 | return None
44 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/shell.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import sys
4 | from typing import Any, Optional
5 |
6 | from prompt_toolkit import prompt as prompt_func
7 | from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
8 | from prompt_toolkit.history import InMemoryHistory
9 |
10 | from ..action import Action, ActionSpec
11 |
12 | _platform = sys.platform
13 | _env = {
14 | "SHELL": os.environ.get("SHELL"),
15 | "USER": os.environ.get("USER"),
16 | "HOME": os.environ.get("HOME"),
17 | "PWD": os.environ.get("PWD")
18 | }
19 |
20 | _spec = ActionSpec.from_dict({
21 | "name": "shell.cmd",
22 | "description": f"""Use this tool to execute Linux, macOS, and DOS commands and output the execution results. \
23 | Current OS: {_platform}. \
24 | System Environments: {json.dumps(dict(_env))}""",
25 | "arguments": [
26 | {
27 | "name": "command",
28 | "type": "string",
29 | "description": "The command to execute, along with any arguments.",
30 | "required": True
31 | }
32 | ]
33 | })
34 |
35 |
36 | class ShellCommandAction(Action):
37 | def __init__(self) -> None:
38 | super().__init__()
39 | self.spec = _spec
40 |
41 | def perform(self, *args, command: str, **kwargs: Any) -> Any:
42 | try:
43 | with os.popen(cmd=command) as p:
44 | return p.read()
45 | except Exception as e:
46 | return f"Execute `{command}` failed: {e}"
47 |
48 |
49 | class PromptAction(Action):
50 | def __init__(self) -> None:
51 | super().__init__()
52 | self._history = InMemoryHistory()
53 |
54 | self.spec = ActionSpec.from_dict({
55 | "name": "shell.prompt",
56 | "description": "Prompt the user for input in the terminal and provide suggestions based on input history.",
57 | "arguments": [
58 | {
59 | "name": "prompt",
60 | "type": "string",
61 | "description": "The prompt message to display to the user.",
62 | "required": False
63 | }
64 | ]
65 | })
66 |
67 | def perform(self, prompt: Optional[str] = None, **kwargs: Any) -> str:
68 | prompt = prompt or ""
69 | return prompt_func(prompt, history=self._history, auto_suggest=AutoSuggestFromHistory(), in_thread=True)
70 |
71 |
72 | class PrintAction(Action):
73 | def __init__(self) -> None:
74 | super().__init__()
75 |
76 | self.spec = ActionSpec.from_dict({
77 | "name": "shell.print",
78 | "description": "Output a message to the terminal with optional color formatting.",
79 | "arguments": [
80 | {
81 | "name": "message",
82 | "type": "string",
83 | "description": "The message to be printed.",
84 | "required": False
85 | },
86 | {
87 | "name": "end",
88 | "type": "string",
89 | "description": "The end character to append after the message.",
90 | "required": False
91 | },
92 | {
93 | "name": "color",
94 | "type": "string",
95 | "description": "The color in which the message should be printed. Supported colors: red, green, yellow, blue, purple.", # noqa: E501
96 | "required": False
97 | }
98 | ]
99 | })
100 |
101 | def perform(self, *args, message=None, end="\n", color="", **kwargs) -> None:
102 | if color.lower() == "red":
103 | color = "\033[1;31m"
104 | elif color.lower() == "green":
105 | color = "\033[1;32m"
106 | elif color.lower() == "yellow":
107 | color = "\033[1;33m"
108 | elif color.lower() == "blue":
109 | color = "\033[1;34m"
110 | elif color.lower() == "purple":
111 | color = "\033[1;35m"
112 | else:
113 | color = ""
114 |
115 | if color:
116 | print(color, end='')
117 | if message:
118 | print(message, end='')
119 | else:
120 | print(end.join(args), end='')
121 | print(end=end)
122 |
123 | if color:
124 | print("\033[0m", end='')
125 |
--------------------------------------------------------------------------------
/iauto/actions/buildin/time.py:
--------------------------------------------------------------------------------
1 | import time
2 | from datetime import datetime
3 | from typing import Optional, Union
4 |
5 | from ..action import Action, ActionSpec
6 |
7 |
8 | class WaitAction(Action):
9 | def __init__(self) -> None:
10 | super().__init__()
11 |
12 | self.spec = ActionSpec.from_dict({
13 | "name": "time.wait",
14 | "description": "Waits for a specified number of seconds.",
15 | "arguments": [
16 | {
17 | "name": "seconds",
18 | "type": "float",
19 | "description": "The number of seconds to wait.",
20 | "required": True
21 | }
22 | ]
23 | })
24 |
25 | def perform(
26 | self,
27 | seconds: float,
28 | **kwargs
29 | ) -> None:
30 | if seconds > 0:
31 | time.sleep(seconds)
32 |
33 |
34 | class GetNow(Action):
35 | def __init__(self) -> None:
36 | super().__init__()
37 |
38 | self.spec = ActionSpec.from_dict({
39 | "name": "time.now",
40 | "description": "Returns the current date and time.",
41 | "arguments": [
42 | {
43 | "name": "format",
44 | "type": "string",
45 | "description": "The format string (python format) to format the date and time. If not provided, returns timestamp in milliseconds.", # noqa: E501
46 | "required": False
47 | }
48 | ]
49 | })
50 |
51 | def perform(self, format: Optional[str] = None, **kwargs) -> Union[int, str]:
52 | now = datetime.now()
53 | if format is None:
54 | return int(now.timestamp() * 1000)
55 | else:
56 | return now.strftime(format)
57 |
--------------------------------------------------------------------------------
/iauto/actions/contrib/__init__.py:
--------------------------------------------------------------------------------
1 | from ..loader import loader
2 | from . import browser
3 | from .webdriver import create_actions as create_wd_actions
4 |
5 | actions = {}
6 |
7 | actions["browser.open"] = browser.OpenBrowserAction()
8 | actions["browser.close"] = browser.CloseBrowserAction()
9 | actions["browser.goto"] = browser.GotoAction()
10 | actions["browser.locator"] = browser.LocatorAction()
11 | actions["browser.click"] = browser.ClickAction()
12 | actions["browser.scroll"] = browser.ScrollAction()
13 | actions["browser.eval"] = browser.EvaluateJavascriptAction()
14 | actions["browser.content"] = browser.GetContentAction()
15 | actions["browser.readability"] = browser.ReadabilityAction()
16 |
17 | wd_actions = create_wd_actions()
18 |
19 | actions.update(wd_actions)
20 |
21 | loader.register(actions)
22 |
--------------------------------------------------------------------------------
/iauto/actions/loader.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | from typing import Dict, Union
3 |
4 | from .action import Action, create
5 |
6 |
7 | class ActionLoader:
8 | """Manages the registration and retrieval of action instances.
9 |
10 | This class provides a mechanism to register actions by name and retrieve them.
11 | It keeps an internal dictionary that maps action names to action instances.
12 | """
13 |
14 | def __init__(self) -> None:
15 | self._actions = {}
16 |
17 | def register(self, actions: Dict[str, Action]):
18 | """
19 | Registers a set of actions.
20 |
21 | Args:
22 | actions (Dict[str, Action]): A dictionary with action names as keys and
23 | Action instances as values to be registered.
24 | """
25 | self._actions.update(actions)
26 |
27 | def get(self, name) -> Union[Action, None]:
28 | """
29 | Retrieves an action instance by its name.
30 |
31 | Args:
32 | name (str): The name of the action to retrieve.
33 |
34 | Returns:
35 | Action or None: The action instance if found, otherwise None.
36 | """
37 | return self._actions.get(name)
38 |
39 | @property
40 | def actions(self):
41 | """Gets a list of all registered action instances.
42 |
43 | Returns:
44 | list: A list of Action instances.
45 | """
46 | return [a for a in self._actions.values()]
47 |
48 | def load(self, identifier):
49 | """
50 | Loads an action from a given identifier.
51 |
52 | The identifier is expected to be a string in the format 'package.module.ClassName', where 'package.module'
53 | is the full path to the module containing the class 'ClassName' that is the action to be loaded.
54 |
55 | Args:
56 | identifier (str): A dot-separated path representing the action to be loaded.
57 |
58 | Raises:
59 | ValueError: If the action name conflicts with an already registered action.
60 | ImportError: If the module cannot be imported.
61 | AttributeError: If the class cannot be found in the module.
62 |
63 | Returns:
64 | None
65 | """
66 | ss = identifier.split(".")
67 | pkg = importlib.import_module('.'.join(ss[:-1]))
68 | if pkg != '':
69 | action = getattr(pkg, ss[-1])()
70 | name = action.definition.name
71 | if name in self._actions:
72 | raise ValueError(f"Action name conflic: {name}")
73 | self._actions[name] = action
74 |
75 |
76 | """Create an instance of ActionLoader.
77 |
78 | This instance will be used to register and retrieve actions. It maintains a dictionary of actions that can be accessed or modified.
79 | """ # noqa: E501
80 | loader = ActionLoader()
81 |
82 |
83 | def register(name: str, spec: Dict):
84 | """
85 | Registers a new action with the provided name and specification.
86 |
87 | Args:
88 | name (str): The unique name of the action to register.
89 | spec (Dict): A dictionary containing the action specification.
90 |
91 | Returns:
92 | The created action instance.
93 |
94 | Decorates:
95 | func: The function to be transformed into an action.
96 |
97 | Raises:
98 | ValueError: If an action with the given name already exists.
99 | """
100 |
101 | def decorator(func, *args, **kwargs):
102 | action = create(func=func, spec=spec)
103 | action.spec.name = name
104 | loader.register({name: action})
105 | return action
106 | return decorator
107 |
--------------------------------------------------------------------------------
/iauto/actions/playbook.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from typing import Any, Dict, List, Optional, Union
4 |
5 | from pydantic import BaseModel
6 |
7 | try:
8 | from yaml import CDumper as yaml_dumper
9 | from yaml import CLoader as yaml_loader
10 | except ImportError:
11 | from yaml import Loader as yaml_loader
12 | from yaml import Dumper as yaml_dumper
13 |
14 | from yaml import dump as yaml_dump
15 | from yaml import load as yaml_load
16 |
17 | from .action import ActionSpec
18 |
19 | KEY_ARGS = "args"
20 | KEY_ACTIONS = "actions"
21 | KEY_RESULT = "result"
22 | KEY_DESCRIPTION = "description"
23 | KEY_SPEC = "spec"
24 |
25 |
26 | class Playbook(BaseModel):
27 | """
28 | A class representing a playbook which includes a series of actions to be executed.
29 |
30 | Attributes:
31 | name (Optional[str]): The name of the playbook.
32 | description (Optional[str]): A brief description of what the playbook does.
33 | args (Union[str, List, Dict, None]): Arguments that can be passed to the actions in the playbook.
34 | actions (Optional[List['Playbook']]): A list of actions (playbooks) to be executed.
35 | result (Union[str, List, Dict, None]): The result of the playbook execution.
36 | spec (Optional[ActionSpec]): The function spec of the playbook.
37 | metadata (Optional[Dict]): The metadata of the playbook.
38 | """
39 |
40 | name: Optional[str] = None
41 | description: Optional[str] = None
42 | args: Union[str, List, Dict, None] = None
43 | actions: Optional[List['Playbook']] = None
44 | result: Union[str, List, Dict, None] = None
45 | spec: Optional[ActionSpec] = None
46 | metadata: Dict[str, Any] = {}
47 |
48 | def resolve_path(self, path: str) -> str:
49 | """
50 | Resolves a potentially relative path to an absolute path using the playbook's metadata.
51 |
52 | Args:
53 | path (str): The file path that may be relative or absolute.
54 |
55 | Returns:
56 | str: The absolute path resolved from the given path and the playbook's metadata root.
57 | """
58 | if os.path.isabs(path):
59 | return path
60 | return os.path.join(self.metadata["__root__"], path)
61 |
62 |
63 | def from_dict(d: Dict) -> Playbook:
64 | """
65 | Generate a Playbook object from the input dictionary.
66 |
67 | Attributes:
68 | d (Dict): The dictionary to convert into a Playbook object.
69 |
70 | Returns:
71 | playbook (Playbook): The converted Playbook object.
72 | """
73 | if d is None:
74 | raise ValueError(f"Invalid playbook: {d}")
75 |
76 | if len(d) == 1:
77 | name = list(d.keys())[0]
78 | pb = d[name]
79 | else:
80 | name = d.get("name")
81 | pb = d
82 |
83 | if name is None or not isinstance(name, str):
84 | raise ValueError(f"Invalid name: {name}")
85 |
86 | playbook = Playbook(
87 | name=name
88 | )
89 |
90 | if isinstance(pb, Dict):
91 | playbook.description = pb.get(KEY_DESCRIPTION)
92 | playbook.args = pb.get(KEY_ARGS)
93 | playbook.result = pb.get(KEY_RESULT)
94 |
95 | data_actions = pb.get(KEY_ACTIONS)
96 | if data_actions is not None:
97 | if not isinstance(data_actions, List):
98 | raise ValueError(f"Invalid actions: {data_actions}")
99 |
100 | actions = []
101 | for action in data_actions:
102 | action_pb = from_dict(action)
103 | actions.append(action_pb)
104 |
105 | playbook.actions = actions
106 |
107 | data_spec = pb.get(KEY_SPEC)
108 | if data_spec is not None:
109 | playbook.spec = ActionSpec.from_dict(data_spec)
110 | elif isinstance(pb, List):
111 | playbook.args = pb
112 | else:
113 | playbook.args = [pb]
114 | return playbook
115 |
116 |
117 | def load(fname: str) -> Playbook:
118 | """Load a playbook from file.
119 |
120 | Attributes:
121 | fname (str): Path to the YAML/JSON file containing the playbook.
122 |
123 | Returns:
124 | playbook (Playbook): The loaded playbook.
125 | """
126 | ext = fname[fname.rfind(".") + 1:].lower()
127 |
128 | with open(fname, 'r', encoding='utf-8') as f:
129 | if ext in ["yaml", "yml"]:
130 | data = yaml_load(f, Loader=yaml_loader)
131 | elif ext == "json":
132 | data = json.load(f)
133 | else:
134 | raise ValueError(f"Unsupported file extension: {ext}")
135 |
136 | playbook = from_dict(data)
137 | root = os.path.dirname(fname)
138 | _resolve_path(playbook=playbook, root=root)
139 |
140 | return playbook
141 |
142 |
143 | def dump(playbook: Playbook, fname: str, format: str = "yaml"):
144 | """Dump a playbook to a file.
145 |
146 | Attributes:
147 | playbook (Playbook): The playbook to dump.
148 | fname (str): The file name to dump to.
149 | format (str): The format to dump to.
150 | """
151 |
152 | pb = playbook.model_dump(exclude_none=True)
153 |
154 | if format == "yaml":
155 | with open(fname, "w") as f:
156 | yaml_dump(pb, f, Dumper=yaml_dumper, allow_unicode=True)
157 | elif format == "json":
158 | with open(fname, "w") as f:
159 | json.dump(pb, f, ensure_ascii=False, indent=4, sort_keys=True)
160 | else:
161 | raise ValueError(f"Invalid format: {format}")
162 |
163 |
164 | def _resolve_path(playbook: Playbook, root: str):
165 | if playbook.actions is not None and len(playbook.actions) > 0:
166 | for action in playbook.actions:
167 | action.metadata["__root__"] = root
168 | _resolve_path(action, root)
169 |
--------------------------------------------------------------------------------
/iauto/agents/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | This module provides the `AgentExecutor` class which is responsible for executing actions on behalf of agents.
3 | It acts as an intermediary layer between the agent's instructions and the actual execution of those instructions.
4 |
5 | Classes:
6 | * AgentExecutor
7 | """
8 | from .executor import AgentExecutor
9 |
10 | __all__ = [
11 | "AgentExecutor"
12 | ]
13 |
--------------------------------------------------------------------------------
/iauto/agents/_actions.py:
--------------------------------------------------------------------------------
1 | from autogen import AssistantAgent, ConversableAgent
2 | from typing_extensions import Dict, List, Optional
3 |
4 | from ..actions.loader import register
5 | from ..llms import ChatMessage, Session
6 | from .executor import AgentExecutor
7 | from .model_clients import SessionClient
8 |
9 |
10 | @register(name="agents.create", spec={
11 | "description": "Create a new agent instance.",
12 | "arguments": [
13 | {
14 | "name": "type",
15 | "description": "The type of agent to create.",
16 | "type": "string",
17 | "default": "assistant",
18 | "enum": ["assistant"]
19 | },
20 | {
21 | "name": "session",
22 | "description": "The LLM Session instance containing the state and configuration.",
23 | "type": "Session"
24 | },
25 | {
26 | "name": "llm_args",
27 | "description": "Additional arguments for the language model.",
28 | "type": "dict",
29 | "required": False
30 | },
31 | {
32 | "name": "react",
33 | "description": "Whether the agent should use react reasning.",
34 | "type": "bool",
35 | "required": False,
36 | "default": False
37 | },
38 | {
39 | "name": "name",
40 | "description": "The name of the agent.",
41 | "type": "string",
42 | "default": "assistant"
43 | },
44 | {
45 | "name": "description",
46 | "description": "A brief description of the agent's purpose.",
47 | "type": "string",
48 | "required": False
49 | },
50 | {
51 | "name": "instructions",
52 | "description": "Instructions for the agent.",
53 | "type": "string",
54 | "required": False
55 | }
56 | ]
57 | })
58 | def create_agent(
59 | *args,
60 | type: str = "assistant",
61 | session: Session,
62 | llm_args: Optional[Dict] = None,
63 | react: Optional[bool] = False,
64 | name: str = "assistant",
65 | description: Optional[str] = None,
66 | instructions: Optional[str] = None,
67 | **kwargs
68 | ) -> ConversableAgent:
69 | if instructions is None or instructions == "":
70 | instructions = AssistantAgent.DEFAULT_SYSTEM_MESSAGE
71 | else:
72 | instructions += '\nReply "TERMINATE" in the end when everything is done.'
73 |
74 | if description is not None and description.strip() == "":
75 | description = None
76 |
77 | llm_config = {
78 | "model": session.llm.model,
79 | "model_client_cls": "SessionClient",
80 | "tools": []
81 | }
82 |
83 | if session.actions and len(session.actions) > 0:
84 | llm_config["tools"] = [a.spec.oai_spec() for a in session.actions]
85 |
86 | agent = None
87 | if type == "assistant":
88 | agent = AssistantAgent(
89 | name=name,
90 | system_message=instructions,
91 | description=description,
92 | llm_config=llm_config
93 | )
94 | agent.register_model_client(
95 | model_client_cls=SessionClient,
96 | session=session,
97 | react=react,
98 | llm_args=llm_args
99 | )
100 | else:
101 | raise ValueError(f"Invalid agent type: {type}")
102 | return agent
103 |
104 |
105 | @register(name="agents.executor", spec={
106 | "description": "Instantiate a new agent executor.",
107 | "arguments": [
108 | {
109 | "name": "session",
110 | "description": "The LLM Session instance containing the state and configuration.",
111 | "type": "Session"
112 | },
113 | {
114 | "name": "llm_args",
115 | "description": "Additional arguments for the language model.",
116 | "type": "dict",
117 | "required": False
118 | },
119 | {
120 | "name": "react",
121 | "description": "Whether the agent should use react reasoning.",
122 | "type": "bool",
123 | "required": False,
124 | "default": False
125 | },
126 | {
127 | "name": "agents",
128 | "description": "A list of ConversableAgent instances to be managed by the executor.",
129 | "type": "List[ConversableAgent]"
130 | },
131 | {
132 | "name": "instructions",
133 | "description": "Instructions for the agent executor.",
134 | "type": "string",
135 | "required": False
136 | },
137 | {
138 | "name": "max_consecutive_auto_reply",
139 | "description": "The maximum number of consecutive auto-replies allowed by the executor.",
140 | "type": "int",
141 | "required": False,
142 | "default": 10
143 | }
144 | ]
145 | })
146 | def create_agent_executor(
147 | *args,
148 | session: Session,
149 | llm_args: Optional[Dict] = None,
150 | react: Optional[bool] = False,
151 | agents: List[ConversableAgent],
152 | instructions: Optional[str] = None,
153 | max_consecutive_auto_reply: Optional[int] = 10,
154 | **kwargs
155 | ):
156 | return AgentExecutor(
157 | session=session,
158 | llm_args=llm_args,
159 | react=react,
160 | agents=agents,
161 | instructions=instructions,
162 | max_consecutive_auto_reply=max_consecutive_auto_reply
163 | )
164 |
165 |
166 | @register(name="agents.run", spec={
167 | "description": "Run the specified agent executor with a given message.",
168 | "arguments": [
169 | {
170 | "name": "agent_executor",
171 | "description": "The agent executor instance to run.",
172 | "type": "AgentExecutor"
173 | },
174 | {
175 | "name": "message",
176 | "description": "The message to process.",
177 | "type": "string"
178 | },
179 | {
180 | "name": "clear_history",
181 | "description": "Whether to clear the conversation history before running.",
182 | "type": "bool",
183 | "required": False,
184 | "default": True
185 | },
186 | {
187 | "name": "silent",
188 | "description": "Whether to suppress output during execution.",
189 | "type": "bool",
190 | "required": False,
191 | "default": False
192 | }
193 | ]
194 | })
195 | def executor_run(
196 | *args,
197 | agent_executor: AgentExecutor,
198 | message: str,
199 | clear_history: Optional[bool] = True,
200 | silent: Optional[bool] = False,
201 | **kwargs
202 | ):
203 | m = agent_executor.run(
204 | message=ChatMessage(role="user", content=message),
205 | clear_history=clear_history,
206 | silent=silent
207 | )
208 | return m.get("summary", "")
209 |
--------------------------------------------------------------------------------
/iauto/agents/executor.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Optional, Union
2 |
3 | import autogen
4 | from autogen import (Agent, ConversableAgent, GroupChat, GroupChatManager,
5 | UserProxyAgent)
6 |
7 | from ..llms import ChatMessage, Session
8 | from ..log import get_level
9 | from .model_clients import SessionClient
10 |
11 | autogen.logger.setLevel(get_level("WARN"))
12 |
13 |
14 | class AgentExecutor:
15 | def __init__(
16 | self,
17 | agents: List[ConversableAgent],
18 | session: Session,
19 | llm_args: Optional[Dict] = None,
20 | instructions: Optional[str] = None,
21 | human_input_mode: Optional[str] = "NEVER",
22 | max_consecutive_auto_reply: Optional[int] = 10,
23 | react: Optional[bool] = False
24 | ) -> None:
25 | """Initializes the AgentExecutor class.
26 |
27 | This class is responsible for executing agents within a given session. It manages the interaction
28 | between UserProxyAgent and ConversableAgent instances, handles message passing, and manages the
29 | termination conditions for the chats.
30 |
31 | Args:
32 | agents (List[ConversableAgent]): A list of agent instances that will participate in the chat.
33 | session (Session): The session object containing details about the current LLM session.
34 | llm_args (Optional[Dict], optional): Additional arguments to pass to the LLM client. Defaults to None.
35 | instructions (Optional[str], optional): System messages set to the UserProxyAgent.
36 | human_input_mode (Optional[str], optional): The mode of human input, can be 'NEVER', 'TERNIMANTE',
37 | or 'ALWAYS'. Defaults to "NEVER".
38 | max_consecutive_auto_reply (Optional[int], optional): The maximum number of consecutive auto-replies
39 | allowed before requiring human input. Defaults to 10.
40 | react (Optional[bool], optional): Whether the agents should react to the messages. Defaults to False.
41 | """
42 | self._session = session
43 | self._agents = agents
44 | self.human_input_mode = human_input_mode
45 |
46 | llm_config = {
47 | "model": session.llm.model,
48 | "model_client_cls": "SessionClient"
49 | }
50 |
51 | def termination_func(x): return x.get("content", "").upper().find("TERMINATE") >= 0
52 | code_execution_config = {"executor": "ipython-embedded"}
53 |
54 | if instructions is None or instructions == "":
55 | instructions = """You are a helpful AI Assistant."""
56 | instructions += """Your purpose is to help users resolve their problems as quickly and efficiently as possible.
57 |
58 | Reply "TERMINATE" in the end when everything is done."""
59 |
60 | self._user_proxy = UserProxyAgent(
61 | name="UserProxy",
62 | system_message=instructions,
63 | is_termination_msg=termination_func,
64 | code_execution_config=code_execution_config,
65 | human_input_mode=self.human_input_mode,
66 | max_consecutive_auto_reply=max_consecutive_auto_reply,
67 | llm_config=llm_config
68 | )
69 | self._user_proxy.register_model_client(
70 | model_client_cls=SessionClient,
71 | session=session, react=react, llm_args=llm_args
72 | )
73 |
74 | function_map = {}
75 | if self._session.actions:
76 | for func in self._session.actions:
77 | function_map[func.spec.name.replace(".", "_")] = func
78 | self._user_proxy.register_function(function_map)
79 |
80 | if len(self._agents) == 1:
81 | self._recipient = self._agents[0]
82 | elif len(self._agents) > 1:
83 | if len(function_map) > 0:
84 | tools_proxy = UserProxyAgent(
85 | name="FunctionCaller",
86 | description="An assistant can execute functions.",
87 | system_message=instructions,
88 | is_termination_msg=termination_func,
89 | code_execution_config=code_execution_config,
90 | human_input_mode="NEVER",
91 | max_consecutive_auto_reply=max_consecutive_auto_reply,
92 | llm_config=llm_config
93 | )
94 | tools_proxy.register_function(function_map=function_map)
95 | tools_proxy.register_model_client(
96 | model_client_cls=SessionClient,
97 | session=session,
98 | react=react,
99 | llm_args=llm_args
100 | )
101 | self._agents.append(tools_proxy)
102 |
103 | speaker_selection_method = "round_robin" if len(self._agents) == 2 else "auto"
104 | groupchat = GroupChat(
105 | agents=self._agents,
106 | messages=[],
107 | speaker_selection_method=speaker_selection_method
108 | )
109 | mgr = GroupChatManager(
110 | groupchat=groupchat,
111 | name="GroupChatManager",
112 | llm_config=llm_config,
113 | is_termination_msg=termination_func,
114 | code_execution_config=code_execution_config,
115 | max_consecutive_auto_reply=max_consecutive_auto_reply,
116 | )
117 | mgr.register_model_client(
118 | model_client_cls=SessionClient,
119 | session=session,
120 | react=react,
121 | llm_args=llm_args
122 | )
123 | self._recipient = mgr
124 | else:
125 | raise ValueError("agents error")
126 |
127 | def run(
128 | self,
129 | message: ChatMessage,
130 | clear_history: Optional[bool] = True,
131 | silent: Optional[bool] = False,
132 | **kwargs
133 | ) -> Dict:
134 | """
135 | Runs the chat session with the given message and configuration.
136 |
137 | This method initiates a chat with the recipient (either a single agent or a group chat manager) using
138 | the UserProxyAgent. It processes the message, manages the chat history, and generates a summary
139 | of the conversation.
140 |
141 | Args:
142 | message (ChatMessage): The message to start the chat with.
143 | clear_history (Optional[bool]): Determines whether to clear the chat history before starting
144 | the new chat session. Defaults to True.
145 | silent (Optional[bool]): If set to True, the agents will not output any messages. Defaults to False.
146 | **kwargs: Additional keyword arguments that might be needed for extended functionality.
147 |
148 | Returns:
149 | Dict: A dictionary containing the chat history, summary of the conversation, and the cost of the session.
150 | """
151 | result = self._user_proxy.initiate_chat(
152 | self._recipient,
153 | clear_history=clear_history,
154 | silent=silent,
155 | message=message.content,
156 | summary_method="reflection_with_llm"
157 | )
158 | # last_message = result.chat_history[-1]["content"]
159 | summary = result.summary
160 | if isinstance(summary, dict):
161 | summary = summary["content"]
162 |
163 | return {
164 | "history": result.chat_history,
165 | "summary": summary,
166 | "cost": result.cost
167 | }
168 |
169 | def reset(self):
170 | """Resets the state of all agents and the UserProxyAgent.
171 |
172 | This method clears any stored state or history in the agents to prepare for a new task.
173 | """
174 | for agent in self._agents + [self._user_proxy, self._recipient]:
175 | agent.reset()
176 |
177 | def set_human_input_mode(self, mode):
178 | """Sets the human input mode for the UserProxyAgent and the recipient.
179 |
180 | Args:
181 | mode (str): The mode of human input to set. Can be 'NEVER', 'TERMINATE', or 'ALWAYS'.
182 | """
183 | self.human_input_mode = mode
184 | for agent in [self._user_proxy, self._recipient]:
185 | agent.human_input_mode = mode
186 |
187 | def register_human_input_func(self, func):
188 | """Registers a function to handle human input across all agents.
189 |
190 | Args:
191 | func (Callable): The function to be called when human input is needed.
192 | """
193 | for agent in self._agents + [self._user_proxy, self._recipient]:
194 | agent.get_human_input = func
195 |
196 | def register_print_received(self, func):
197 | """Registers a function to print messages received by agents.
198 |
199 | The function will be called each time an agent receives a message, unless the silent
200 | flag is set to True.
201 |
202 | Args:
203 | func (Callable): The function to be called with the message, sender, and receiver
204 | information when a message is received.
205 | """
206 | agents = [self._user_proxy, self._recipient]
207 | if len(self._agents) > 1:
208 | agents = self._agents + agents
209 |
210 | for agent in agents:
211 | receive_func = ReceiveFunc(agent, func)
212 | agent.receive = receive_func
213 |
214 | @property
215 | def session(self) -> Session:
216 | return self._session
217 |
218 |
219 | class ReceiveFunc:
220 | def __init__(self, receiver, print_recieved) -> None:
221 | self._receiver = receiver
222 | self._receive_func = receiver.receive
223 | self._print_recieved = print_recieved
224 |
225 | def __call__(
226 | self,
227 | message: Union[Dict, str],
228 | sender: Agent,
229 | request_reply: Optional[bool] = None,
230 | silent: Optional[bool] = False
231 | ):
232 | if not silent:
233 | self._print_recieved(message=message, sender=sender, receiver=self._receiver)
234 | self._receive_func(message=message, sender=sender, request_reply=request_reply, silent=silent)
235 |
--------------------------------------------------------------------------------
/iauto/agents/model_clients.py:
--------------------------------------------------------------------------------
1 | import json
2 | from types import SimpleNamespace
3 | from typing import Any, Dict, Iterable, Iterator, List, Optional, Tuple, Union
4 |
5 | from autogen import ModelClient
6 |
7 | from iauto.llms import ChatMessage, Session
8 |
9 | from .. import log
10 |
11 |
12 | class SessionResponse(SimpleNamespace):
13 | class Choice(SimpleNamespace):
14 | class Message(SimpleNamespace, Iterable):
15 | role: Optional[str]
16 | content: Optional[str]
17 | tool_calls: Optional[List[Dict]]
18 |
19 | def __iter__(self) -> Iterator[Tuple]:
20 | return iter([
21 | ("role", self.role),
22 | ("content", self.content),
23 | ("tool_calls", self.tool_calls)
24 | ])
25 |
26 | def __getitem__(self, index):
27 | d = dict(self)
28 | return d.get(index)
29 |
30 | message: Message
31 |
32 | choices: List[Choice]
33 | model: str
34 | usage: Dict[str, int]
35 |
36 |
37 | class SessionClient(ModelClient):
38 | def __init__(
39 | self,
40 | config,
41 | session: Session,
42 | react: Optional[bool] = False,
43 | llm_args: Optional[Dict] = None,
44 | **kwargs
45 | ) -> None:
46 | self._model = config.get("model")
47 | self._session = session
48 | self._react = react
49 | self._llm_args = llm_args or {}
50 |
51 | self._log = log.get_logger("IASessionClient")
52 |
53 | def create(self, params: Dict[str, Any]) -> ModelClient.ModelClientResponseProtocol:
54 | if self._log.isEnabledFor(log.DEBUG):
55 | self._log.debug(json.dumps(params, indent=4, ensure_ascii=False))
56 |
57 | messages = []
58 |
59 | for m in params.get("messages") or []:
60 | messages.append(ChatMessage(
61 | role=m["role"],
62 | content=m["content"] or "",
63 | tool_call_id=m.get("tool_call_id"),
64 | name=m.get("name"),
65 | tool_calls=m.get("tool_calls")
66 | ))
67 |
68 | tool_calls = params.get("tools") or []
69 | use_tools = len(tool_calls) > 0
70 |
71 | if self._react:
72 | m = self._session.react(messages=messages, use_tools=use_tools, auto_exec_tools=False, **self._llm_args)
73 | else:
74 | m = self._session.run(messages=messages, use_tools=use_tools, auto_exec_tools=False, **self._llm_args)
75 |
76 | if not isinstance(m, ChatMessage):
77 | raise ValueError("invalid message type response from SessionClient")
78 |
79 | # if self._log.isEnabledFor(log.DEBUG):
80 | # self._log.debug(json.dumps(m.model_dump(), indent=4, ensure_ascii=False))
81 |
82 | usage = {
83 | "prompt_tokens": 0,
84 | "completion_tokens": 0
85 | }
86 | if m.usage:
87 | usage["prompt_tokens"] = m.usage.input_tokens
88 | usage["completion_tokens"] = m.usage.output_tokens
89 |
90 | resp = SessionResponse(
91 | choices=[
92 | SessionResponse.Choice(
93 | message=SessionResponse.Choice.Message(
94 | role=m.role,
95 | content=m.content,
96 | tool_calls=[t.model_dump() for t in m.tool_calls or []]
97 | )
98 | )
99 | ],
100 | model=self._model,
101 | usage=usage
102 | )
103 |
104 | return resp
105 |
106 | def message_retrieval(
107 | self,
108 | response: ModelClient.ModelClientResponseProtocol
109 | ) -> Union[List[str], List[ModelClient.ModelClientResponseProtocol.Choice.Message]]:
110 | choices = response.choices
111 |
112 | has_tool_calls = False
113 | for choice in choices:
114 | tool_calls = choice.message["tool_calls"]
115 | if tool_calls and len(tool_calls) > 0:
116 | has_tool_calls = True
117 | break
118 |
119 | if has_tool_calls:
120 | return [c.message for c in choices]
121 | else:
122 | return [c.message["content"] for c in choices if c.message["content"] is not None]
123 |
124 | def cost(self, response: SessionResponse) -> float:
125 | return 0
126 |
127 | @staticmethod
128 | def get_usage(response: SessionResponse) -> Dict:
129 | usage = response.usage
130 | usage["cost"] = 0
131 | usage["total_tokens"] = usage["prompt_tokens"] + usage["completion_tokens"]
132 | return usage
133 |
--------------------------------------------------------------------------------
/iauto/api/__init__.py:
--------------------------------------------------------------------------------
1 | from . import _playbooks
2 | from ._api import api
3 | from ._entry import entry
4 | from ._server import start
5 |
--------------------------------------------------------------------------------
/iauto/api/_api.py:
--------------------------------------------------------------------------------
1 | from fastapi import FastAPI
2 |
3 | CURRENT_API_VERSION = "v1"
4 |
5 | # FastAPI
6 | api = FastAPI(
7 | title="docread API",
8 | version=CURRENT_API_VERSION,
9 | docs_url="/swagger",
10 | redoc_url='/docs'
11 | )
12 |
--------------------------------------------------------------------------------
/iauto/api/_entry.py:
--------------------------------------------------------------------------------
1 | from contextlib import asynccontextmanager
2 |
3 | from starlette.applications import Starlette
4 | from starlette.routing import Mount
5 |
6 | from ._api import api
7 |
8 |
9 | @asynccontextmanager
10 | async def lifespan(app: Starlette):
11 | try:
12 | ...
13 | finally:
14 | yield
15 |
16 | # Routes
17 | routes = [
18 | Mount(
19 | '/api',
20 | name='api',
21 | app=api
22 | ),
23 | ]
24 |
25 | entry = Starlette(debug=False, routes=routes, lifespan=lifespan)
26 |
--------------------------------------------------------------------------------
/iauto/api/_playbooks.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/iauto/api/_playbooks.py
--------------------------------------------------------------------------------
/iauto/api/_server.py:
--------------------------------------------------------------------------------
1 | from uvicorn import run
2 |
3 | from ._entry import entry
4 |
5 |
6 | def start(host: str = "0.0.0.0", port: int = 2000):
7 | run(app=entry, host=host, port=port)
8 |
--------------------------------------------------------------------------------
/iauto/db.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from typing import Any, Dict, List, Literal, Optional
4 |
5 | from sqlalchemy.orm import load_only
6 | from sqlmodel import Session, SQLModel, create_engine, select, text
7 |
8 |
9 | class Persistence:
10 | _instance = None
11 |
12 | def __init__(self, database_url: Optional[str] = None, connect_args: Optional[Dict[str, Any]] = None) -> None:
13 | self._database_url = database_url
14 | self._connect_args = connect_args
15 |
16 | if not database_url and 'DATABASE_URL' in os.environ:
17 | self._database_url = os.environ['DATABASE_URL']
18 | if not connect_args and 'DATABASE_CONNECT_ARGS' in os.environ:
19 | self._connect_args = json.loads(
20 | os.environ['DATABASE_CONNECT_ARGS'])
21 | if not self._database_url:
22 | self._database_url = f"sqlite:///{os.path.join(os.getcwd(), 'db.sqlite3')}"
23 | print(f"DATABASE_URL not specified, use {self._database_url}")
24 |
25 | if not self._connect_args:
26 | self._connect_args = {}
27 |
28 | self._engine = create_engine(
29 | self._database_url,
30 | connect_args=self._connect_args,
31 | json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False)
32 | )
33 |
34 | SQLModel.metadata.create_all(self._engine)
35 |
36 | @property
37 | def engine(self):
38 | return self._engine
39 |
40 | def create_session(self) -> Session:
41 | return Session(self._engine, expire_on_commit=False)
42 |
43 | def initialize_database(self):
44 | SQLModel.metadata.create_all(self._engine)
45 |
46 | @staticmethod
47 | def default():
48 | if not Persistence._instance:
49 | Persistence._instance = Persistence()
50 | return Persistence._instance
51 |
52 | def exec(self, sql, **kwargs):
53 | with Persistence.default().engine.connect() as conn:
54 | sql = text(sql)
55 | yield conn.execute(sql, kwargs)
56 | conn.commit()
57 |
58 | def save(self, objs: List[SQLModel]):
59 | with self.create_session() as session:
60 | for o in objs:
61 | session.add(o)
62 | session.commit()
63 |
64 | def get(self, cls, id):
65 | with self.create_session() as session:
66 | return session.get(cls, id)
67 |
68 | def list(
69 | self,
70 | cls,
71 | fields: Optional[List] = None,
72 | filters: Optional[List] = None,
73 | limit: Optional[int] = None,
74 | order_by: Optional[Any] = None,
75 | order: Literal["asc", "desc", None] = None
76 | ):
77 | stmt = select(cls)
78 |
79 | if fields is not None:
80 | stmt = stmt.options(load_only(*fields))
81 |
82 | if filters is not None:
83 | for filter in filters:
84 | stmt = stmt.filter(filter)
85 |
86 | if limit is not None:
87 | stmt = stmt.limit(limit)
88 |
89 | if order_by is not None:
90 | if order not in ["asc", "desc", None]:
91 | raise ValueError("invalid order")
92 | if order == "desc":
93 | stmt = stmt.order_by(-order_by)
94 | else:
95 | stmt = stmt.order_by(order_by)
96 |
97 | with self.create_session() as session:
98 | results = session.exec(stmt)
99 | return [r for r in results]
100 |
--------------------------------------------------------------------------------
/iauto/llms/__init__.py:
--------------------------------------------------------------------------------
1 | from .llm import LLM, ChatMessage, Message
2 | from .llm_factory import create_llm
3 | from .session import Session
4 |
5 | __all__ = [
6 | "LLM",
7 | "ChatMessage",
8 | "Message",
9 | "Session",
10 | "create_llm"
11 | ]
12 |
--------------------------------------------------------------------------------
/iauto/llms/__main__.py:
--------------------------------------------------------------------------------
1 | from iauto.llms import _repl
2 |
3 | _repl.run()
4 |
--------------------------------------------------------------------------------
/iauto/llms/_openai_qwen.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from ..actions import ActionSpec
4 | from . import _qwen
5 | from .llm import ChatMessage, Function, ToolCall
6 | from .openai import OpenAI
7 |
8 |
9 | class QWen(OpenAI):
10 | def chat(self, messages: List[ChatMessage] = [], tools: Optional[List[ActionSpec]] = None, **kwargs) -> ChatMessage:
11 | # Fix bug for qwen fastchat
12 | for m in messages:
13 | if m.role == "tool":
14 | m.role = "user"
15 |
16 | if m.tool_call_id:
17 | m.content += f"\ntool_call_id: {m.tool_call_id}"
18 | m.tool_call_id = None
19 | if m.tool_calls:
20 | m.content += f"\ntool_calls: {m.tool_calls}"
21 | m.tool_calls = None
22 |
23 | if tools is None or len(tools) == 0:
24 | return super().chat(messages, **kwargs)
25 |
26 | # tools call
27 | tools_description = [t.oai_spec() for t in tools]
28 | qe_messages = [m.model_dump() for m in messages]
29 | # qe_messages = _qwen.parse_messages(messages=qe_messages, functions=tools_description)
30 | func_prompt = _qwen.generate_function_instructions(functions=tools_description)
31 | qe_messages.insert(0, {"role": "system", "content": func_prompt})
32 |
33 | messages = [ChatMessage.from_dict(m) for m in qe_messages]
34 |
35 | m = super().chat(messages=messages, tools=tools, **kwargs)
36 | choice = _qwen.parse_response(m.content)
37 |
38 | if choice["finish_reason"] == "tool_calls":
39 | contents = []
40 | content = choice["message"]["content"]
41 | contents.append(f"{content}")
42 |
43 | m.tool_calls = []
44 | for tool_call in choice["message"]["tool_calls"]:
45 | try:
46 | func_name = tool_call["function"]["name"]
47 | func_args = tool_call["function"]["arguments"]
48 | m.tool_calls.append(
49 | ToolCall(
50 | id=tool_call["id"],
51 | type=tool_call["type"],
52 | function=Function(
53 | name=func_name,
54 | arguments=func_args
55 | )
56 | )
57 | )
58 |
59 | contents.append(f"Action: {func_name}")
60 | contents.append(f"Action Input: {func_args}")
61 | except Exception:
62 | self._log.warn(f"ToolCall error: {tool_call}")
63 |
64 | m.content = "\n".join(contents)
65 | elif choice["finish_reason"] == "stop":
66 | m.content = choice["message"]["content"]
67 | return m
68 |
69 | def plain_messages(self, messages: List[ChatMessage], norole=False, nowrap=False):
70 | plain = []
71 | for m in messages:
72 | role = "" if norole else f"{m.role}: "
73 | content = m.content if not nowrap else m.content.replace("\n", "\\n")
74 | plain.append(f"{role}{content}")
75 | return "\n".join(plain)
76 |
--------------------------------------------------------------------------------
/iauto/llms/actions.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, List, Optional, Union
2 |
3 | from ..actions import Action, ActionSpec, Executor, Playbook, loader
4 | from .llm import ChatMessage
5 | from .llm_factory import create_llm
6 | from .session import Session
7 |
8 |
9 | class CreateSessionAction(Action):
10 | def __init__(self) -> None:
11 | super().__init__()
12 |
13 | self.spec = ActionSpec.from_dict({
14 | "name": "llm.session",
15 | "description": "Establish a new LLM chat session.",
16 | "arguments": [
17 | {
18 | "name": "provider",
19 | "type": "str",
20 | "description": "The name of the LLM provider.",
21 | "default": "openai"
22 | },
23 | {
24 | "name": "llm_args",
25 | "type": "dict",
26 | "description": "Arguments to initialize the LLM.",
27 | "default": {}
28 | },
29 | {
30 | "name": "tools",
31 | "type": "List[str]",
32 | "description": "Optional list of tools to include in the session for LLM function calling.",
33 | "default": None
34 | }
35 | ],
36 | })
37 |
38 | def perform(
39 | self,
40 | *args,
41 | provider="openai",
42 | llm_args={},
43 | tools: Optional[List[str]] = None,
44 | executor: Executor,
45 | playbook: Playbook,
46 | **kwargs
47 | ) -> Session:
48 | if executor is None or playbook is None:
49 | raise ValueError("executor and playbook required.")
50 |
51 | llm = create_llm(provider=provider, **llm_args)
52 |
53 | actions = []
54 |
55 | if tools is not None:
56 | for name in tools:
57 | action = loader.get(name)
58 | if action is None:
59 | raise ValueError(f"Action not found: {name}")
60 | actions.append(action)
61 |
62 | if playbook.actions is not None:
63 | for action_pb in playbook.actions:
64 | action = executor.get_action(playbook=action_pb)
65 | if action is None:
66 | raise ValueError(f"Action not found: {action_pb.name}")
67 | if action_pb.name == "playbook":
68 | args_, kwargs_ = executor.eval_args(args=action_pb.args)
69 | pb_run_actions = action.perform(
70 | *args_, execute=False, executor=executor, playbook=action_pb, **kwargs_)
71 | actions.extend(pb_run_actions)
72 | else:
73 | raise ValueError(f"Actions must be playbook, invalid action: {action_pb.name}")
74 |
75 | session = Session(llm=llm, actions=actions)
76 | return session
77 |
78 |
79 | class ChatAction(Action):
80 | def __init__(self) -> None:
81 | super().__init__()
82 |
83 | self.spec = ActionSpec.from_dict({
84 | "name": "llm.chat",
85 | "description": "Initiate a conversation with an LLM by sending a message and receiving a response.",
86 | "arguments": [
87 | {
88 | "name": "session",
89 | "type": "Session",
90 | "description": "The active LLM session to interact with.",
91 | "required": True
92 | },
93 | {
94 | "name": "prompt",
95 | "type": "str",
96 | "description": "The message to send to the LLM.",
97 | "required": True
98 | },
99 | {
100 | "name": "history",
101 | "type": "int",
102 | "description": "The number of past interactions to consider in the conversation.",
103 | "default": 5
104 | },
105 | {
106 | "name": "rewrite",
107 | "type": "bool",
108 | "description": "Whether to rewrite the prompt before sending.",
109 | "default": False
110 | },
111 | {
112 | "name": "expect_json",
113 | "type": "int",
114 | "description": "Whether to expect a JSON response from the LLM.",
115 | "default": 0
116 | }
117 | ],
118 | })
119 |
120 | def perform(
121 | self,
122 | *args,
123 | executor: Optional[Executor] = None,
124 | playbook: Optional[Playbook] = None,
125 | session: Session,
126 | prompt: Optional[str] = None,
127 | messages: Optional[List[Dict]] = None,
128 | history: int = 5,
129 | rewrite: bool = False,
130 | expect_json: int = 0,
131 | **kwargs: Any
132 | ) -> Union[str, Any]:
133 | chat_messages = None
134 | if messages is not None and len(messages) > 0:
135 | chat_messages = []
136 | for msg in messages:
137 | chat_messages.append(ChatMessage(
138 | role=msg["role"],
139 | content=msg["content"]
140 | ))
141 | elif prompt is not None:
142 | session.add(ChatMessage(role="user", content=prompt))
143 | else:
144 | raise ValueError("prompt or message required.")
145 |
146 | m = session.run(
147 | messages=chat_messages,
148 | history=history,
149 | rewrite=rewrite,
150 | expect_json=expect_json,
151 | **kwargs
152 | )
153 | if isinstance(m, dict) or isinstance(m, list):
154 | return m
155 | else:
156 | return m.content if m is not None else None
157 |
158 |
159 | class ReactAction(Action):
160 | def __init__(self) -> None:
161 | super().__init__()
162 |
163 | self.spec = ActionSpec.from_dict({
164 | "name": "llm.react",
165 | "description": "Perform reactive reasoning with an LLM.",
166 | "arguments": [
167 | {
168 | "name": "session",
169 | "type": "Session",
170 | "description": "The active LLM session to interact with.",
171 | "required": True
172 | },
173 | {
174 | "name": "prompt",
175 | "type": "str",
176 | "description": "The message to send to the LLM for reactive reasoning.",
177 | "required": True
178 | },
179 | {
180 | "name": "history",
181 | "type": "int",
182 | "description": "The number of past interactions to consider in the conversation.",
183 | "default": 5
184 | },
185 | {
186 | "name": "rewrite",
187 | "type": "bool",
188 | "description": "Whether to rewrite the prompt before sending.",
189 | "default": False
190 | },
191 | {
192 | "name": "log",
193 | "type": "bool",
194 | "description": "Whether to log the reasoning process.",
195 | "default": False
196 | },
197 | {
198 | "name": "max_steps",
199 | "type": "int",
200 | "description": "The maximum number of reasoning steps to perform.",
201 | "default": 3
202 | }
203 | ],
204 | })
205 |
206 | def perform(
207 | self,
208 | *args,
209 | executor: Optional[Executor] = None,
210 | playbook: Optional[Playbook] = None,
211 | session: Session,
212 | prompt: Optional[str] = None,
213 | messages: Optional[List[Dict]] = None,
214 | history: int = 5,
215 | rewrite: bool = False,
216 | log: bool = False,
217 | max_steps: int = 3,
218 | **kwargs: Any
219 | ) -> str:
220 | chat_messages = None
221 | if messages is not None and len(messages) > 0:
222 | chat_messages = []
223 | for msg in messages:
224 | chat_messages.append(ChatMessage(
225 | role=msg["role"],
226 | content=msg["content"]
227 | ))
228 | elif prompt is not None:
229 | session.add(ChatMessage(role="user", content=prompt))
230 | else:
231 | raise ValueError("prompt or message required.")
232 |
233 | m = session.react(messages=chat_messages, history=history, rewrite=rewrite, log=log, **kwargs)
234 | return m.content
235 |
236 |
237 | def register_actions():
238 | loader.register({
239 | "llm.session": CreateSessionAction(),
240 | "llm.chat": ChatAction(),
241 | "llm.react": ReactAction()
242 | })
243 |
--------------------------------------------------------------------------------
/iauto/llms/chatglm.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from typing import Iterator, List, Optional
4 |
5 | import chatglm_cpp
6 |
7 | from ..actions import ActionSpec
8 | from ..log import DEBUG, get_logger
9 | from .llm import LLM, ChatMessage, Function, Message, ToolCall
10 |
11 | _model_cache = {}
12 |
13 |
14 | class ChatGLM(LLM):
15 | def __init__(self, model_path) -> None:
16 | super().__init__()
17 | if not os.path.isfile(model_path):
18 | raise ValueError(f"model_path must be a ggml file: {model_path}")
19 |
20 | self._model = model_path
21 |
22 | self._llm = _model_cache.get(self._model)
23 | if self._llm is None:
24 | model = chatglm_cpp.Pipeline(model_path=self._model)
25 | _model_cache[self._model] = model
26 | self._llm = model
27 |
28 | self._log = get_logger("ChatGLM")
29 |
30 | def generate(self, instructions: str, **kwargs) -> Message:
31 | """"""
32 | text = self._llm.generate(prompt=instructions, stream=False, **kwargs)
33 | if not isinstance(text, str):
34 | raise ValueError("Invalid generated result.")
35 | return Message(content=text)
36 |
37 | def _function_call_retry(self, messages: List[chatglm_cpp.ChatMessage], retries=3, **kwargs):
38 | r = None
39 | for i in range(retries):
40 | r = self._llm.chat(messages=messages, stream=False, **kwargs)
41 | if isinstance(r, Iterator):
42 | raise ValueError(f"Invalid chat result: {r}")
43 | if r.tool_calls is not None:
44 | for t in r.tool_calls:
45 | if t.function.name is not None and t.function.name != "":
46 | return r
47 | else:
48 | self._log.warn(f"function_name is null, retry: {i + 1}")
49 | return r
50 |
51 | def chat(self, messages: List[ChatMessage] = [], tools: Optional[List[ActionSpec]] = None, **kwargs) -> ChatMessage:
52 | use_tools = tools is not None and len(tools) > 0
53 |
54 | if use_tools:
55 | tools_desciption = [t.oai_spec() for t in tools]
56 | system_instructions = """
57 | Answer the following questions as best as you can. You have access to the following tools:\n
58 | """
59 | system_instructions += json.dumps(tools_desciption, ensure_ascii=False, indent=4)
60 |
61 | messages.insert(-1, ChatMessage(
62 | role="system",
63 | content=system_instructions
64 | ))
65 |
66 | chatglm_messages = []
67 | for m in messages:
68 | role = m.role
69 | if role == "tool":
70 | role = "user"
71 | chatglm_messages.append(chatglm_cpp.ChatMessage(role=role, content=m.content))
72 | if self._log.isEnabledFor(DEBUG):
73 | self._log.debug(chatglm_messages)
74 | if use_tools:
75 | r = self._function_call_retry(messages=chatglm_messages, **kwargs)
76 | else:
77 | r = self._llm.chat(messages=chatglm_messages, stream=False, **kwargs)
78 |
79 | if not isinstance(r, chatglm_cpp.ChatMessage):
80 | raise ValueError(f"invalid message type: {r}, expected: ChatMessage")
81 |
82 | resp = ChatMessage(role=r.role, content=r.content)
83 |
84 | tool_calls = r.tool_calls
85 |
86 | if tool_calls and use_tools:
87 | def tool_call(**kwargs):
88 | return kwargs
89 |
90 | resp.tool_calls = []
91 |
92 | for tc in tool_calls:
93 | func_name = tc.function.name
94 | if not func_name:
95 | continue
96 | self._log.debug(f"Function to call: {func_name}")
97 |
98 | func_args = eval(tc.function.arguments, dict(tool_call=tool_call))
99 |
100 | resp.tool_calls.append(
101 | ToolCall(
102 | id=func_name,
103 | type=tc.type,
104 | function=Function(
105 | name=func_name,
106 | arguments=json.dumps(func_args, ensure_ascii=False)
107 | )
108 | )
109 | )
110 | return resp
111 |
112 | @property
113 | def model(self) -> str:
114 | return self._model
115 |
--------------------------------------------------------------------------------
/iauto/llms/llama.py:
--------------------------------------------------------------------------------
1 | from typing import Iterator, List, Optional
2 |
3 | import llama_cpp
4 | from llama_cpp.llama_chat_format import LlamaChatCompletionHandlerRegistry
5 |
6 | from ..actions import ActionSpec
7 | from ..log import get_logger
8 | from ._qwen import qwen_chat_handler
9 | from .llm import LLM, ChatMessage, Function, Message, ToolCall
10 |
11 | _model_cache = {}
12 |
13 |
14 | class LLaMA(LLM):
15 | """
16 | llamap.cpp: https://github.com/ggerganov/llama.cpp
17 | llama-cpp-python: https://github.com/abetlen/llama-cpp-python
18 | """
19 |
20 | def __init__(self, **kwargs) -> None:
21 | super().__init__()
22 | if "verbose" not in kwargs:
23 | kwargs["verbose"] = False
24 |
25 | if "n_ctx" not in kwargs:
26 | kwargs["n_ctx"] = 0
27 |
28 | if "n_gpu_layers" not in kwargs:
29 | kwargs["n_gpu_layers"] = -1
30 |
31 | self._model = kwargs.get("model_path", "LLaMA")
32 |
33 | self._llm = _model_cache.get(self._model)
34 | if self._llm is None:
35 | model = llama_cpp.Llama(**kwargs)
36 | _model_cache[self._model] = model
37 | self._llm = model
38 |
39 | self._log = get_logger("LLaMA")
40 |
41 | if "qwen" in self._model.lower():
42 | self.register_qwen_fn()
43 |
44 | def generate(self, instructions: str, **kwargs) -> Message:
45 | """"""
46 | r = self._llm.create_completion(prompt=instructions, **kwargs)
47 | print(instructions)
48 | if isinstance(r, Iterator):
49 | raise ValueError(f"Invalid response: {r}")
50 | return Message(content=r["choices"][0]["text"])
51 |
52 | def chat(self, messages: List[ChatMessage] = [], tools: Optional[List[ActionSpec]] = None, **kwargs) -> ChatMessage:
53 | tools_desciption = []
54 | tool_choice = "auto"
55 |
56 | if tools:
57 | tools_desciption = [t.oai_spec() for t in tools]
58 |
59 | msgs = [m.model_dump() for m in messages]
60 | r = self._llm.create_chat_completion(
61 | messages=msgs,
62 | tools=tools_desciption,
63 | tool_choice=tool_choice,
64 | **kwargs
65 | )
66 |
67 | m = r["choices"][0]["message"]
68 |
69 | resp = ChatMessage(role=m["role"], content=m["content"] or "")
70 |
71 | tool_calls = m.get("tool_calls")
72 | if tool_calls:
73 | resp.tool_calls = []
74 | for tool_call in tool_calls:
75 | func_name = tool_call["function"]["name"]
76 | func_args = tool_call["function"]["arguments"]
77 | resp.tool_calls.append(
78 | ToolCall(
79 | id=tool_call["id"],
80 | type=tool_call["type"],
81 | function=Function(
82 | name=func_name,
83 | arguments=func_args
84 | )
85 | )
86 | )
87 | return resp
88 |
89 | @property
90 | def model(self) -> str:
91 | return self._model
92 |
93 | def register_qwen_fn(self):
94 | REGISTER_FLAG = "llama_qwen_chat_handler_registered"
95 | if REGISTER_FLAG not in globals():
96 | registry = LlamaChatCompletionHandlerRegistry()
97 | registry.register_chat_completion_handler(name="qwen-fn", chat_handler=qwen_chat_handler, overwrite=True)
98 | globals()[REGISTER_FLAG] = True
99 |
--------------------------------------------------------------------------------
/iauto/llms/llm.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Dict, List, Optional
3 |
4 | from pydantic import BaseModel
5 |
6 | from ..actions import ActionSpec
7 |
8 |
9 | class Function(BaseModel):
10 | """
11 | Represents a function call with optional arguments.
12 |
13 | Attributes:
14 | name (str): The name of the function being called.
15 | arguments (Optional[str]): The arguments to be passed to the function, if any.
16 | """
17 | name: str
18 | arguments: Optional[str] = None
19 |
20 |
21 | class ToolCall(BaseModel):
22 | """
23 | Represents a call to a specific tool with an optional function call.
24 |
25 | Attributes:
26 | id (str): The unique identifier for the tool call.
27 | type (str): The type of the tool.
28 | function (Optional[Function]): An optional Function instance representing the function call associated with \
29 | the tool, if any.
30 | """
31 |
32 | id: str
33 | type: str
34 | function: Optional[Function] = None
35 |
36 |
37 | class Message(BaseModel):
38 | content: str
39 |
40 |
41 | class Usage(BaseModel):
42 | """
43 | Represents a token usage.
44 |
45 | Attributes:
46 | input_tokens (int): The number of tokens in the input message.
47 | output_tokens (int): The number of tokens in the generated response message.
48 | """
49 | input_tokens: int
50 | output_tokens: int
51 |
52 |
53 | class ChatMessage(Message):
54 | """
55 | Represents a chat message with additional metadata and optional tool call information.
56 |
57 | Attributes:
58 | role (str): The role of the entity sending the message (e.g., "user", "system", "assistant").
59 | tool_calls (Optional[List[ToolCall]]): A list of ToolCall instances representing the tool calls associated \
60 | with this message, if any.
61 | tool_call_id (Optional[str]): The identifier of the tool call associated with this message, if any.
62 | name (Optional[str]): The name of the tool or function called.
63 | useage (Optional[Usage]): The token usage.
64 | """
65 |
66 | role: str
67 | tool_calls: Optional[List[ToolCall]] = None
68 | tool_call_id: Optional[str] = None
69 | name: Optional[str] = None
70 | usage: Optional[Usage] = None
71 |
72 | @staticmethod
73 | def from_dict(d: Dict) -> "ChatMessage":
74 | """
75 | Create a ChatMessage instance from a dictionary.
76 |
77 | Parses the dictionary to populate the ChatMessage fields. If certain keys
78 | are not present in the dictionary, default values are used. For 'tool_calls',
79 | it creates a list of ToolCall instances from the sub-dictionaries.
80 |
81 | Args:
82 | d (Dict): The dictionary containing the ChatMessage data.
83 |
84 | Returns:
85 | ChatMessage: An instance of ChatMessage with properties populated from the dictionary.
86 | """
87 |
88 | m = ChatMessage(role="", content="")
89 | m.role = d.get("role") or ""
90 | m.content = d.get("content") or ""
91 | m.tool_call_id = d.get("tool_call_id")
92 | m.name = d.get("name")
93 |
94 | m.tool_calls = []
95 | tool_calls = d.get("tool_calls") or []
96 | for tool_call in tool_calls:
97 | m.tool_calls.append(
98 | ToolCall(
99 | id=tool_call["id"],
100 | type=tool_call["type"],
101 | function=Function(
102 | name=tool_call["function"]["name"],
103 | arguments=tool_call["function"]["arguments"]
104 | )
105 | )
106 | )
107 | return m
108 |
109 |
110 | class LLM(ABC):
111 | """Abstract base class for a Language Model (LLM) that defines the interface for generating messages and handling \
112 | chat interactions."""
113 |
114 | def __init__(self) -> None:
115 | """Initialize the LLM instance."""
116 | super().__init__()
117 |
118 | @abstractmethod
119 | def generate(self, instructions: str, **kwargs) -> Message:
120 | """
121 | Generate a message based on the given instructions.
122 |
123 | Args:
124 | instructions (str): The instructions or prompt to generate the message from.
125 | **kwargs: Additional keyword arguments that the concrete implementation may use.
126 |
127 | Returns:
128 | Message: The generated message as a Message instance.
129 | """
130 |
131 | @abstractmethod
132 | def chat(self, messages: List[ChatMessage], tools: Optional[List[ActionSpec]] = None, **kwargs) -> ChatMessage:
133 | """
134 | Conduct a chat interaction by processing a list of ChatMessage instances and optionally using tools.
135 |
136 | Args:
137 | messages (List[ChatMessage]): A list of ChatMessage instances representing the conversation history.
138 | tools (Optional[List[ActionSpec]]): An optional list of ActionSpec instances representing tools that can be used in the chat.
139 | **kwargs: Additional keyword arguments that the concrete implementation may use.
140 |
141 | Returns:
142 | ChatMessage: The response as a ChatMessage instance after processing the interaction.
143 | """ # noqa: E501
144 |
145 | @property
146 | @abstractmethod
147 | def model(self) -> str:
148 | """Abstract property that should return the model identifier for the LLM instance.
149 |
150 | Returns:
151 | str: The model identifier.
152 | """
153 |
--------------------------------------------------------------------------------
/iauto/llms/llm_factory.py:
--------------------------------------------------------------------------------
1 | from .llm import LLM
2 | from .openai import OpenAI
3 |
4 |
5 | def create_llm(provider: str = "openai", **kwargs) -> LLM:
6 | """
7 | Create a language model instance based on the specified provider.
8 |
9 | This factory function supports creating instances of different language
10 | models by specifying a provider. Currently supported providers are 'openai',
11 | 'llama', and 'chatglm'. Depending on the provider, additional keyword
12 | arguments may be required or optional.
13 |
14 | Parameters:
15 | - provider (str): The name of the provider for the LLM. Defaults to 'openai'.
16 | - **kwargs: Additional keyword arguments specific to the chosen LLM provider.
17 |
18 | Returns:
19 | - LLM: An instance of the specified language model.
20 |
21 | Raises:
22 | - ImportError: If the required module for the specified provider is not installed.
23 | - ValueError: If an invalid provider name is given.
24 | """
25 |
26 | if provider is None:
27 | provider = ''
28 |
29 | if provider.lower() == "openai":
30 | if kwargs.get("model", "").lower().find("qwen") >= 0:
31 | from ._openai_qwen import QWen
32 | return QWen(**kwargs)
33 | return OpenAI(**kwargs)
34 | elif provider.lower() == "llama":
35 | try:
36 | from .llama import LLaMA
37 | return LLaMA(**kwargs)
38 | except ImportError as e:
39 | raise ImportError(
40 | "Could not create LLaMA. "
41 | "Please install it with `pip install llama-cpp-python`."
42 | ) from e
43 | elif provider.lower() == "chatglm":
44 | try:
45 | from .chatglm import ChatGLM
46 | return ChatGLM(**kwargs)
47 | except ImportError as e:
48 | raise ImportError(
49 | "Could not create ChatGLM. "
50 | "Please install it with `pip install chatglm_cpp`."
51 | ) from e
52 | else:
53 | raise ValueError(f"Invalid LLM provider: {provider}")
54 |
--------------------------------------------------------------------------------
/iauto/llms/openai.py:
--------------------------------------------------------------------------------
1 | import json
2 | import re
3 | from types import SimpleNamespace
4 | from typing import List, Optional
5 |
6 | import openai
7 |
8 | from .. import log
9 | from ..actions import ActionSpec
10 | from .llm import LLM, ChatMessage, Function, Message, ToolCall, Usage
11 |
12 |
13 | class OpenAI(LLM):
14 | """"""
15 |
16 | def __init__(self, model: Optional[str] = None, **kwargs) -> None:
17 | super().__init__()
18 |
19 | self._model = model or "gpt-3.5-turbo"
20 |
21 | self._openai = openai.OpenAI(**kwargs)
22 |
23 | self._log = log.get_logger("OpenAI")
24 |
25 | def generate(self, instructions: str, **kwargs) -> Message:
26 | if "model" not in kwargs:
27 | kwargs["model"] = self._model
28 |
29 | r = self._openai.completions.create(
30 | prompt=instructions,
31 | stream=False,
32 | **kwargs
33 | )
34 | return Message(content=r.choices[0].text)
35 |
36 | def chat(self, messages: List[ChatMessage] = [], tools: Optional[List[ActionSpec]] = None, **kwargs) -> ChatMessage:
37 | if "model" not in kwargs:
38 | kwargs["model"] = self._model
39 |
40 | tools_desciption = None
41 | tool_choice = "auto"
42 |
43 | if tools:
44 | tools_desciption = [t.oai_spec() for t in tools]
45 |
46 | native_tool_call = self.native_tool_call()
47 | use_tool_call_prompt = tools and not native_tool_call
48 |
49 | msgs = []
50 |
51 | for m in messages:
52 | msg = {
53 | "role": "user" if not native_tool_call and m.role == "tool" else m.role,
54 | "content": m.content
55 | }
56 | if m.tool_call_id:
57 | msg["tool_call_id"] = m.tool_call_id
58 | if m.name:
59 | msg["name"] = m.name
60 | if m.tool_calls:
61 | msg["tool_calls"] = [t.model_dump() for t in m.tool_calls]
62 | msgs.append(msg)
63 |
64 | if use_tool_call_prompt:
65 | msgs.insert(-1, {
66 | "role": "user",
67 | "content": self.tool_call_prompt(tools=tools_desciption)
68 | })
69 |
70 | if self._log.isEnabledFor(log.DEBUG):
71 | self._log.debug("Request: " + json.dumps({
72 | "messages": msgs,
73 | "tools": tools_desciption
74 | }, ensure_ascii=False, indent=4))
75 |
76 | if tools_desciption and not use_tool_call_prompt:
77 | kwargs["tools"] = tools_desciption
78 | kwargs["tool_choice"] = tool_choice
79 |
80 | r = self._openai.chat.completions.create(
81 | messages=msgs,
82 | **kwargs
83 | )
84 |
85 | if self._log.isEnabledFor(log.DEBUG):
86 | self._log.debug("Response: " + json.dumps(r.model_dump(), ensure_ascii=False, indent=4))
87 |
88 | m = r.choices[0].message
89 |
90 | resp = ChatMessage(role=m.role, content=m.content or "")
91 | if r.usage:
92 | resp.usage = Usage(
93 | input_tokens=r.usage.prompt_tokens,
94 | output_tokens=r.usage.completion_tokens
95 | )
96 |
97 | if use_tool_call_prompt:
98 | tool_call = self.parse_tool_call(m.content)
99 | if tool_call is not None:
100 | m.tool_calls = [tool_call]
101 |
102 | tool_calls = m.tool_calls
103 | if tool_calls:
104 | resp.tool_calls = []
105 | for tool_call in tool_calls or []:
106 | func_name = tool_call.function.name
107 | func_args = tool_call.function.arguments
108 | resp.tool_calls.append(
109 | ToolCall(
110 | id=tool_call.id,
111 | type=tool_call.type,
112 | function=Function(
113 | name=func_name,
114 | arguments=func_args
115 | )
116 | )
117 | )
118 |
119 | return resp
120 |
121 | @property
122 | def model(self) -> str:
123 | return self._model
124 |
125 | def native_tool_call(self):
126 | """Check if the model support fuction calling"""
127 | models = [
128 | "gpt-3.5",
129 | "gpt-4",
130 | "qwen"
131 | ]
132 | for m in models:
133 | if self._model.lower().startswith(m):
134 | return True
135 | return False
136 |
137 | def tool_call_prompt(self, tools):
138 | tools_texts = []
139 | for tool in tools:
140 | tools_texts.append(TOOL_DESC.format(
141 | name=tool["function"]["name"],
142 | description=tool["function"]["description"],
143 | parameters=json.dumps(tool["function"]["parameters"])
144 | ))
145 |
146 | tools_text = "\n".join(tools_texts)
147 |
148 | return FUNC_INSTRUCTION.format(tools_text=tools_text)
149 |
150 | def parse_tool_call(self, content):
151 | def _parse(s):
152 | ret = SimpleNamespace(
153 | id="dummy_function_call_id",
154 | type="function",
155 | function=SimpleNamespace(
156 | name=None,
157 | arguments=None
158 | )
159 | )
160 | try:
161 | j = json.loads(s)
162 | if "name" in j:
163 | ret.function.name = j["name"]
164 | else:
165 | return None
166 | if "parameters" in j and isinstance(j["parameters"], dict):
167 | ret.function.arguments = json.dumps(j["parameters"], ensure_ascii=False)
168 | return ret
169 | except Exception:
170 | return None
171 |
172 | func_call = _parse(content)
173 | if func_call is None:
174 | json_blocks = re.findall('({.*})', content, re.MULTILINE | re.DOTALL)
175 | for b in json_blocks:
176 | func_call = _parse(b)
177 | if func_call is not None:
178 | return func_call
179 |
180 |
181 | TOOL_DESC = (
182 | 'name: `{name}`; Description: `{description}`; Parameters: {parameters}'
183 | )
184 |
185 | FUNC_INSTRUCTION = """You have access to the following APIs:
186 |
187 | {tools_text}
188 |
189 | You need to decide whether to call an API to generate response based on the conversation.
190 |
191 | If you choose to call an API, follow this steps:
192 | 1. Evaluate the actual parameters of the API as a JSON dict according to your needs.
193 | 2. Generate API call in the format within markdown code block without any additional Notes or Explanations.
194 |
195 | If there is no API that match the conversation, you will skip API selection.
196 |
197 | API call like this:
198 |
199 | ```json
200 | {{
201 | "name": "",
202 | "parameters":
203 | }}
204 | ```
205 |
206 | If there is no API that match the conversation, you will skip API selection.
207 | """
208 |
--------------------------------------------------------------------------------
/iauto/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | DEBUG = logging.DEBUG
5 | INFO = logging.INFO
6 | WARN = logging.WARN
7 | FATAL = logging.FATAL
8 |
9 |
10 | def get_level(name: str):
11 | """Get the log level from string.
12 |
13 | Args:
14 | name (str): Log level string.
15 |
16 | Returns:
17 | int: The corresponding log level.
18 | """
19 |
20 | if name is None:
21 | return logging.INFO
22 |
23 | name = name.strip().upper()
24 | if name == 'DEBUG':
25 | return logging.DEBUG
26 | elif name == 'INFO':
27 | return logging.INFO
28 | elif name == 'WARN' or name == 'WARNING':
29 | return logging.WARN
30 | elif name == 'ERROR':
31 | return logging.ERROR
32 | elif name == 'FATAL' or name == 'CRITICAL':
33 | return logging.FATAL
34 | else:
35 | return logging.NOTSET
36 |
37 |
38 | def get_logger(name, level=None):
39 | """Get a logger with the given name and level.
40 |
41 | Args:
42 | name (str): The name of the logger.
43 | level (int, optional): The logging level. Defaults to None.
44 |
45 | Returns:
46 | logging.Logger: A logger with the specified name and level.
47 | """
48 | level = level or os.environ.get("IA_LOG_LEVEL") or "INFO"
49 | log_level = get_level(level) or logging.INFO
50 | logger = logging.getLogger(name=name)
51 | logger.setLevel(level=log_level)
52 |
53 | if len(logger.handlers) == 0:
54 | handler = logging.StreamHandler()
55 | handler.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s'))
56 | handler.setLevel(level=log_level)
57 | logger.addHandler(handler)
58 |
59 | return logger
60 |
61 |
62 | logger = get_logger("ia", level=os.environ.get("IA_LOG_LEVEL") or "INFO")
63 | """Default logger."""
64 |
--------------------------------------------------------------------------------
/iauto/playground/Agents.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import tempfile
4 |
5 | import streamlit as st
6 |
7 | import iauto
8 | from iauto.llms import ChatMessage
9 | from iauto.playground import st_widgets, utils
10 |
11 | try:
12 | from yaml import CDumper as yaml_dumper
13 | except ImportError:
14 | from yaml import Dumper as yaml_dumper
15 |
16 | from yaml import dump as yaml_dump
17 |
18 | st.set_page_config(
19 | page_title='Agents',
20 | page_icon='🦾',
21 | layout='wide'
22 | )
23 |
24 | utils.logo()
25 |
26 | here = os.path.dirname(__file__)
27 | playbooks_dir = os.path.abspath(os.path.join(here, "playbooks"))
28 |
29 | # Initialize session state
30 | if "messages" not in st.session_state:
31 | st.session_state.messages = []
32 |
33 | messages = st.session_state.messages
34 |
35 | agent_executor = st.session_state.get("agent_executor")
36 |
37 | # Initialize agent
38 |
39 |
40 | def print_received(message, sender, receiver):
41 | content = ""
42 | if isinstance(message, str):
43 | content = message
44 | else:
45 | content = message["content"]
46 | if "tool_calls" in message:
47 | func_name = message["tool_calls"][0]["function"]["name"]
48 | func_args = message["tool_calls"][0]["function"]["arguments"]
49 | content = content + f"\nFunction call:\n```python\n{func_name}({func_args})\n```"
50 |
51 | message = f"**{sender.name}** (to {receiver.name})"
52 | json_obj = None
53 | content = content.strip()
54 | if content.startswith("{") or content.startswith("["):
55 | message = f"{message}\n\n```json\n{content}\n```"
56 | try:
57 | json_obj = json.loads(content)
58 | except Exception:
59 | pass
60 | else:
61 | message = f"{message}\n\n{content}"
62 |
63 | messages.append({"role": "assistant", "content": message, "json": json_obj})
64 |
65 | with st.chat_message("assistant"):
66 | st.markdown(message)
67 | if json_obj:
68 | st.json(json_obj, expanded=False)
69 |
70 |
71 | def create_agent(options):
72 | reset()
73 |
74 | playbook = {
75 | "playbook": {
76 | "actions": [
77 | {
78 | "llm.session": {
79 | "args": {
80 | "provider": options["provider"],
81 | "llm_args": options["oai_args"] if options["provider"] == "openai" else options["llama_args"], # noqa: E501
82 | "tools": options["tools"]
83 | },
84 | "actions": [
85 | {
86 | "playbook": options["playbooks"]
87 | }
88 | ],
89 | "result": "$llm_session"
90 | }
91 | }
92 | ]
93 | }
94 | }
95 |
96 | agents = []
97 | agents_vars = []
98 |
99 | if len(options["agents"]) == 0:
100 | options["agents"].append({
101 | "name": "Assistant",
102 | "instructions": None,
103 | "description": None
104 | })
105 | for idx, agent in enumerate(options["agents"]):
106 | v = f"$agent_{idx}"
107 | agents.append({
108 | "agents.create": {
109 | "args": {
110 | "session": "$llm_session",
111 | "name": f"Assistant-{idx}" if agent["name"] == "" else agent["name"],
112 | "instructions": agent["instructions"] if agent["instructions"] != "" else None,
113 | "description": agent["description"] if agent["description"] != "" else None,
114 | "react": options["agent_react"]
115 | },
116 | "result": v
117 | }
118 | })
119 | agents_vars.append(v)
120 |
121 | playbook["playbook"]["actions"].extend(agents)
122 | playbook["playbook"]["actions"].append({
123 | "agents.executor": {
124 | "args": {
125 | "session": "$llm_session",
126 | "instructions": options["instructions"],
127 | "react": options["agent_react"],
128 | "agents": agents_vars
129 | },
130 | "result": "$agent_executor"
131 | }
132 | })
133 | repl = {
134 | "repeat": {
135 | "actions": [
136 | {
137 | "shell.prompt": {
138 | "args": "Human: ",
139 | "result": "$prompt"
140 | }
141 | },
142 | {
143 | "agents.run": {
144 | "args": {
145 | "agent_executor": "$agent_executor",
146 | "message": "$prompt"
147 | },
148 | "result": "$message"
149 | }
150 | },
151 | {
152 | "shell.print": {
153 | "args": {
154 | "message": "AI: {$message}",
155 | "color": "green"
156 | }
157 | }
158 | }
159 | ]
160 | }
161 | }
162 |
163 | playbook_yml = yaml_dump(
164 | playbook,
165 | Dumper=yaml_dumper,
166 | sort_keys=False,
167 | indent=2,
168 | allow_unicode=True,
169 | default_flow_style=False,
170 | explicit_start=True,
171 | explicit_end=False
172 | ).strip()
173 |
174 | with tempfile.NamedTemporaryFile(delete=False, suffix=".yaml") as f:
175 | f.write(playbook_yml.encode("utf-8"))
176 | f.close()
177 |
178 | agent_executor = iauto.execute(
179 | playbook=f.name
180 | )
181 |
182 | agent_executor.register_print_received(print_received)
183 | agent_executor.set_human_input_mode("NEVER")
184 |
185 | playbook["playbook"]["actions"].append(repl)
186 | playbook_yml = yaml_dump(
187 | playbook,
188 | Dumper=yaml_dumper,
189 | sort_keys=False,
190 | indent=2,
191 | allow_unicode=True,
192 | default_flow_style=False,
193 | explicit_start=True,
194 | explicit_end=False
195 | ).strip()
196 |
197 | st.session_state.playbook_yml = playbook_yml
198 | st.session_state.agent_executor = agent_executor
199 |
200 | return playbook_yml, agent_executor
201 |
202 |
203 | def clear():
204 | if agent_executor is not None:
205 | agent_executor.session.messages.clear()
206 | agent_executor.reset()
207 | messages.clear()
208 |
209 |
210 | def reset():
211 | clear()
212 | st.session_state.agent_executor = None
213 |
214 |
215 | def get_model():
216 | if agent_executor is not None:
217 | return agent_executor.session.llm.model
218 |
219 |
220 | # Sidebar
221 | with st.sidebar:
222 | button_label = "Reload" if agent_executor else "Launch"
223 | options = st_widgets.options(button_label=button_label, func=create_agent)
224 |
225 | # Main container
226 | # st.session_state
227 | if "playbook_yml" in st.session_state:
228 | with st.expander("Generated playbook"):
229 | st.markdown(f"```yaml\n{st.session_state.playbook_yml}\n```")
230 |
231 | if agent_executor:
232 | mode = options["mode"]
233 | mode_name = st_widgets.mode_options[mode]
234 | st.markdown(f"#### {mode_name}")
235 |
236 | if len(messages) == 0:
237 | greeting = "Hello! How can I help you today?"
238 | st.session_state.messages.append({"role": "assistant", "content": greeting})
239 |
240 | # Display chat messages from history on app rerun
241 | for message in st.session_state.messages:
242 | role = message["role"]
243 | content = message["content"]
244 | with st.chat_message(role):
245 | if message["role"] == "user":
246 | content = f"{content}"
247 | st.markdown(content)
248 | if message.get("json"):
249 | st.json(message["json"], expanded=False)
250 |
251 | # Accept user input
252 | if prompt := st.chat_input("What is up?"):
253 | messages.append({"role": "user", "content": prompt})
254 | with st.chat_message("user"):
255 | st.markdown(f"{prompt}")
256 |
257 | agent_executor.session.add(ChatMessage(role="user", content=prompt))
258 |
259 | chat_args = options["chat_args"]
260 | use_tools = options["use_tools"]
261 | resp_message = None
262 | if mode == "chat":
263 | with st.spinner("Generating..."):
264 | resp = agent_executor.session.run(**chat_args, use_tools=use_tools)
265 | resp_message = resp.content
266 | elif mode == "react":
267 | with st.spinner("Reacting..."):
268 | resp = agent_executor.session.react(**chat_args, use_tools=use_tools)
269 | resp_message = resp.content
270 | elif mode == "agent":
271 | with st.status("Agents Conversation", expanded=True):
272 | resp = agent_executor.run(message=ChatMessage(role="user", content=prompt), clear_history=False)
273 | resp_message = resp["summary"]
274 |
275 | with st.chat_message("assistant"):
276 | st.markdown(resp_message)
277 | messages.append({"role": "assistant", "content": resp_message})
278 |
279 | if len(messages) > 1:
280 | st.button("Clear", type="secondary", help="Clear history", on_click=clear)
281 |
282 | model = get_model()
283 | st.markdown(f"```MODEL: {model}```")
284 | else:
285 | st.warning("You need to launch a model first.")
286 |
--------------------------------------------------------------------------------
/iauto/playground/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/iauto/playground/__init__.py
--------------------------------------------------------------------------------
/iauto/playground/pages/100_Settings.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 |
3 | from iauto.playground import runner, utils
4 |
5 | st.set_page_config(
6 | page_title='Settings',
7 | page_icon='⚙️',
8 | layout='wide',
9 | initial_sidebar_state="expanded"
10 | )
11 |
12 | utils.logo()
13 |
14 | st.title("Settings")
15 |
16 | st.json(runner.env)
17 |
--------------------------------------------------------------------------------
/iauto/playground/pages/1_Playbooks.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 |
4 | import streamlit as st
5 |
6 | import iauto
7 | from iauto.playground import utils
8 |
9 | st.set_page_config(
10 | page_title='Playbooks',
11 | page_icon='📚',
12 | layout='centered',
13 | initial_sidebar_state="expanded"
14 | )
15 |
16 | utils.logo()
17 |
18 | st.title("Playbooks")
19 |
20 | if "runs" not in st.session_state:
21 | st.session_state.runs = {}
22 |
23 |
24 | def run_playbook(playbook, args):
25 | future = iauto.execute_in_thread(playbook, variables=args)
26 | st.session_state.runs[playbook] = future
27 |
28 |
29 | def stop_run(playbook):
30 | future = st.session_state.runs.get(playbook)
31 | if future:
32 | future.cancel()
33 | future.result()
34 | st.session_state.runs[playbook] = None
35 |
36 |
37 | def display_arguments(playbook, playbook_file):
38 | if playbook.spec is None or playbook.spec.arguments is None:
39 | return
40 | args = {}
41 | with st.expander("Arguments", expanded=False):
42 | for arg in playbook.spec.arguments:
43 | v = st.text_input(arg.name, help=arg.description, key=f"{playbook_file}_{arg.name}")
44 | env_key = v.replace("$", "")
45 | if os.environ.get(env_key):
46 | v = os.environ[env_key]
47 | args[arg.name] = v
48 | return args
49 |
50 |
51 | playbooks = utils.list_playbooks()
52 |
53 | for k, playbook in playbooks.items():
54 | desc = k[1]
55 | f = k[0]
56 |
57 | with st.container(border=True):
58 | st.write(desc)
59 |
60 | col1, col2 = st.columns((100, 18))
61 | with col1:
62 | # Arguments
63 | args = display_arguments(playbook, f)
64 | with col2:
65 | future = st.session_state.runs.get(f)
66 | running = future is not None and future.running()
67 |
68 | if st.button("Running" if running else "Run", disabled=running, key=f):
69 | if running:
70 | stop_run(f)
71 | else:
72 | run_playbook(f, args=args)
73 | st.rerun()
74 |
75 | # Result
76 | if future and future.done():
77 | with st.spinner("Status"):
78 | with st.expander("Result", expanded=False):
79 | try:
80 | result = future.result()
81 | st.write(result or "```Done with nothing return.```")
82 | except Exception as e:
83 | st.error(e)
84 |
85 | for run in st.session_state.runs.values():
86 | if run and run.running():
87 | time.sleep(0.5)
88 | st.rerun()
89 |
--------------------------------------------------------------------------------
/iauto/playground/pages/3_Developer.py.hide:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | import json
3 | from iauto import llms
4 | from iauto.llms.llm import ChatMessage
5 | from iauto.playground import llm_options, utils
6 |
7 | st.set_page_config(
8 | page_title='Developer',
9 | page_icon='👨🏻💻',
10 | layout='wide'
11 | )
12 |
13 | utils.logo()
14 |
15 | actions = utils.list_actions()
16 | action_specs = []
17 | for action in actions:
18 | action_specs.append(json.dumps(action.dict()))
19 | action_specs = '\n'.join(action_specs)
20 |
21 | instructions = f"""
22 | `iauto` is an automation framework that uses YAML files to describe workflows, with each workflow referred to as a playbook.
23 | You are a helpful assistant in developing playbooks.
24 |
25 | Playbook specification:
26 |
27 | 1. Playbook: A YAML file that describes a workflow.
28 | 2. Variable: Values that can be evaluated at runtime, starting with $,like: $myvar
29 | 3. Action: An action defines a single operation that can be performed. A playbook contains zero or more actions.
30 | 4. Action arguments: Arguments for an action can be keyword arguments or positional arguments. For example:
31 |
32 | Keyword arguments:
33 | ```
34 | action_name:
35 | args:
36 | arg_name_1: arg_value_1
37 | arg_name_2: arg_value_2
38 | ```
39 |
40 | Positional arguments:
41 | ```
42 | action_name:
43 | args:
44 | - first_arg
45 | - second_arg
46 | ```
47 | 5. Condition statements:
48 | not: Represents "not true", usage: {{"not": $var}}
49 | all: Represents "all true", usage: {{"all": [other conditions]}}
50 | any: Represents "any is true", usage: {{"any": [other conditions]}}
51 | lt: Represents "less than", usage: {{"lt": [left_value, right_value]}}
52 | le: Represents "less than or equal to", usage: {{"le": [left_value, right_value]}}
53 | eq: Represents "equal to", usage: {{"eq": [left_value, right_value]}}
54 | ne: Represents "not equal to", usage: {{"ne": [left_value, right_value]}}
55 | ge: Represents "greater than or equal to", usage: {{"ge": [left_value, right_value]}}
56 | gt: Represents "greater than", usage: {{"gt": [left_value, right_value]}}
57 |
58 | 6. Control flow:
59 | when: Execute actions when the conditions are met
60 | repeat: Loop through actions when the conditions are met
61 | each: Iterate through lists and dictionaries
62 | 7. Playbook spec: Playbooks can be used as functions called by LLM. The spec is used to describe the definition for calling the LLM function, which includes the name, description, and arguments.
63 |
64 | You can only use the following pre-defined Actions:
65 | ```
66 | {action_specs}
67 | ```
68 |
69 | A playbook example:
70 | ```yaml
71 | playbook:
72 | description: "Playbook description"
73 | spec:
74 | name: function_name_for_llm_function_calling
75 | description: the description for llm function calling
76 | arguments:
77 | - name: playbook variable name
78 | type: argument data type
79 | description: argument description
80 | required: tru
81 | actions:
82 | - list.append: [$list, 0]
83 | - playbook: ./other_playbook.yaml
84 | - repeat:
85 | description: do forever
86 | actions:
87 | - time.now:
88 | result: $now
89 | - math.mod:
90 | args: [$now, 2]
91 | result: $tick
92 | - when:
93 | args:
94 | eq:
95 | - $tick
96 | - 0
97 | actions:
98 | - log: "tick: {{$tick}}"
99 | - log:
100 | args:
101 | now: $now
102 | tick: $tick
103 | - time.wait: 3
104 | ```
105 |
106 | Write or modify playbook:
107 | """
108 |
109 | with st.sidebar:
110 | llm_options = llm_options.render()
111 |
112 | llm_options["llm_chat_args"]["instructions"] = instructions
113 |
114 | if st.button(label="Reload", type="primary"):
115 | st.session_state["llm_session"] = None
116 |
117 | llm_session = st.session_state.get("llm_session")
118 | if llm_session is None:
119 | llm = llms.create_llm(
120 | provider=llm_options["llm_provider"],
121 | **llm_options["llm_args"]
122 | )
123 |
124 | llm_session = llms.Session(llm=llm)
125 | st.session_state.llm_session = llm_session
126 |
127 |
128 | if len(llm_session.messages) == 0:
129 | greeting = "I can help you develop a playbook. You can give me a task, and the task description should be as clear as possible." # noqa: E501
130 | # llm_session.add(llms.ChatMessage(role="assistant", content=greeting))
131 | with st.chat_message("assistant"):
132 | st.markdown(greeting)
133 |
134 | # Display chat messages from history on app rerun
135 | for message in llm_session.messages:
136 | role = message.role
137 | content = message.content
138 | with st.chat_message(role):
139 | if role == "user":
140 | content = f"{content}"
141 | st.markdown(content)
142 |
143 | # Accept user input
144 | if prompt := st.chat_input("What is up?"):
145 | # Display user message in chat message container
146 | with st.chat_message("user"):
147 | st.markdown(f"{prompt}")
148 |
149 | llm_session.add(llms.ChatMessage(role="user", content=prompt))
150 | with st.spinner("Generating..."):
151 | resp = llm_session.run(**llm_options["llm_chat_args"])
152 |
153 | with st.chat_message("assistant"):
154 | content = resp.content if isinstance(resp, ChatMessage) else resp
155 | st.markdown(content)
156 |
157 |
158 | def reset():
159 | llm_session.messages.clear()
160 |
161 |
162 | if len(llm_session.messages) > 1:
163 | st.button("Clear", type="secondary", help="Clear history", on_click=reset)
164 |
165 | model = llm_session.llm.model
166 | st.markdown(f"```MODEL: {model}```")
167 |
--------------------------------------------------------------------------------
/iauto/playground/pages/4_Actions.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 |
3 | from iauto.playground import utils
4 |
5 | st.set_page_config(
6 | page_title='Actions Reference',
7 | page_icon='🎉',
8 | layout='centered',
9 | initial_sidebar_state="expanded"
10 | )
11 |
12 | utils.logo()
13 |
14 | st.title("Actions Reference")
15 |
16 | actions = utils.list_actions()
17 |
18 | for a in actions:
19 | with st.container(border=True):
20 | st.markdown(f"**{a.name}**")
21 | st.caption(a.description)
22 | args = [arg.dict() for arg in a.arguments] if a.arguments is not None else []
23 | if len(args) > 0:
24 | with st.expander("Arguments", expanded=False):
25 | st.json(args, expanded=True)
26 |
--------------------------------------------------------------------------------
/iauto/playground/pages/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/iauto/playground/pages/__init__.py
--------------------------------------------------------------------------------
/iauto/playground/playbooks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/iauto/playground/playbooks/__init__.py
--------------------------------------------------------------------------------
/iauto/playground/playbooks/agents.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | actions:
3 | - playbook: llm_chat.yaml
4 | - agents.create:
5 | args:
6 | session: $session
7 | react: $react
8 | name: GeneralAssistant
9 | result: $general_assistant
10 | - agents.executor:
11 | args:
12 | session: $session
13 | react: $react
14 | agents:
15 | - $general_assistant
16 | result: $agent_executor
17 |
--------------------------------------------------------------------------------
/iauto/playground/playbooks/llm_chat.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | actions:
3 | - llm.session:
4 | args:
5 | provider: $llm_provider
6 | llm_args: $llm_args
7 | tools: $tools
8 | actions:
9 | - playbook:
10 | args: $playbooks
11 | result: $session
12 |
--------------------------------------------------------------------------------
/iauto/playground/runner.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | import streamlit as st
5 | from streamlit.runtime.scriptrunner import script_runner
6 | from streamlit.web.cli import main as streamlit_main
7 |
8 | from .. import log
9 |
10 | env = {}
11 |
12 |
13 | def run(app=None, playbook_dir=None):
14 | here = os.path.dirname(__file__)
15 |
16 | if app is None:
17 | app = "Agents"
18 |
19 | app_py = os.path.join(here, f"{app}.py")
20 |
21 | if playbook_dir is None:
22 | playbook_dir = str(os.getcwdb(), "UTF-8")
23 |
24 | os.environ["IA_PLAYBOOK_DIR"] = playbook_dir
25 |
26 | handle_uncaught_app_exception = script_runner.handle_uncaught_app_exception
27 |
28 | def exception_handler(e):
29 | # Custom error handling
30 | if os.getenv("IA_LOG_LEVEL", "INFO").lower() == "debug":
31 | handle_uncaught_app_exception(e)
32 | else:
33 | log.logger.warning(e)
34 | st.error(e)
35 |
36 | script_runner.handle_uncaught_app_exception = exception_handler
37 |
38 | sys.argv = [
39 | "streamlit",
40 | "run",
41 | f"{app_py}",
42 | "--theme.base=dark",
43 | "--client.showErrorDetails=False",
44 | "--client.toolbarMode=minimal",
45 | "--server.enableStaticServing=true"
46 | ]
47 | streamlit_main()
48 |
--------------------------------------------------------------------------------
/iauto/playground/st_widgets.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import uuid as _uuid
4 |
5 | import streamlit as st
6 |
7 | from . import utils
8 |
9 | _namespace = _uuid.uuid1()
10 |
11 |
12 | def uuid():
13 | return _uuid.uuid5(_namespace, _uuid.uuid1().hex).hex
14 |
15 |
16 | mode_options = {
17 | "chat": "Chat",
18 | "react": "React",
19 | "agent": "Multi-agent"
20 | }
21 |
22 |
23 | def options(button_label, func):
24 | tab1, tab2 = st.tabs(["Design", "JSON"])
25 |
26 | if "llm_options" not in st.session_state:
27 | opts = {
28 | "instructions": None,
29 | "mode": "chat",
30 | "agent_react": False,
31 | "provider": "openai",
32 | "oai_args": {},
33 | "llama_args": {},
34 | "chat_args": {},
35 | "use_tools": False,
36 | "tools": [],
37 | "playbooks": [],
38 | "agents": []
39 | }
40 | st.session_state.llm_options = opts
41 | else:
42 | opts = st.session_state.llm_options
43 |
44 | # Set default values
45 | def _on_change(key):
46 | opts[key] = st.session_state[f"opts_{key}"]
47 |
48 | def _set_key(key, value):
49 | if key not in st.session_state:
50 | st.session_state[key] = value
51 |
52 | def _remove_all_keys():
53 | keys = list(st.session_state.keys())
54 | for k in keys:
55 | if k.startswith("opts_"):
56 | st.session_state.pop(k)
57 |
58 | def _set_dict_opts(key, d):
59 | for k, v in d.items():
60 | _set_key(f"opts_{key}_{k}", v)
61 |
62 | for k, v in opts.items():
63 | if k == "agents":
64 | for idx, agent in enumerate(v):
65 | _set_key(f"opts_agent_name_{idx}", agent["name"])
66 | _set_key(f"opts_agent_inst_{idx}", agent["instructions"])
67 | _set_key(f"opts_agent_desc_{idx}", agent["description"])
68 | elif isinstance(v, dict):
69 | _set_dict_opts(k, v)
70 | else:
71 | _set_key(f"opts_{k}", v)
72 |
73 | opts["mode"] = tab1.radio(
74 | "Mode",
75 | options=mode_options.keys(),
76 | format_func=lambda x: mode_options[x],
77 | key="opts_mode"
78 | )
79 |
80 | with tab1.expander("Multi-agent Options"):
81 | opts["agent_react"] = st.checkbox(
82 | "Enable ReAct",
83 | key="opts_agent_react",
84 | help="Agents react"
85 | )
86 |
87 | opts["instructions"] = st.text_area(
88 | "Instructions",
89 | placeholder="You are a usefull assistant.",
90 | key="opts_instructions",
91 | value=opts["instructions"]
92 | )
93 |
94 | st.markdown("Agents")
95 |
96 | opts["agent_nums"] = st.slider('Number of agents', min_value=1, max_value=5, key="opts_agent_nums")
97 | opts["agents"] = [None] * opts["agent_nums"]
98 |
99 | for idx in range(opts["agent_nums"]):
100 | st.text(f"# {idx+1}")
101 | name = st.text_input("Name", key=f"opts_agent_name_{idx}",
102 | placeholder="Role name", label_visibility="collapsed")
103 | inst = st.text_input(
104 | "Instructions",
105 | key=f"opts_agent_inst_{idx}",
106 | placeholder="For role instruction",
107 | label_visibility="collapsed"
108 | )
109 | desc = st.text_input(
110 | "Description",
111 | key=f"opts_agent_desc_{idx}",
112 | placeholder="For role selection",
113 | label_visibility="collapsed"
114 | )
115 | opts["agents"][idx] = {"name": name, "instructions": inst, "description": desc}
116 |
117 | provider_options = {
118 | "openai": ("OpenAI", "OpenAI compatible API"),
119 | "llama": ("Local", "GGUF models"),
120 | }
121 |
122 | opts["provider"] = tab1.radio(
123 | "LLM",
124 | options=provider_options.keys(),
125 | captions=[x[1] for x in provider_options.values()],
126 | format_func=lambda x: provider_options[x][0],
127 | key="opts_provider"
128 | )
129 |
130 | with tab1.expander("Options"):
131 | provider = opts["provider"]
132 | if provider == "openai":
133 | api_key = st.text_input(
134 | "API Key",
135 | value="$OPENAI_API_KEY",
136 | key="opts_oai_args_api_key"
137 | )
138 | opts["oai_args"]["api_key"] = api_key
139 |
140 | base_url = st.text_input(
141 | "API Base URL",
142 | value=os.environ.get("OPENAI_API_BASE") or None,
143 | key="opts_oai_args_base_url"
144 | )
145 | if base_url == "":
146 | base_url = None
147 | opts["oai_args"]["base_url"] = base_url
148 |
149 | model = st.text_input(
150 | "Model",
151 | value=os.environ.get("OPENAI_MODEL_NAME") or "gpt-3.5-turbo",
152 | key="opts_oai_args_model"
153 | )
154 | opts["oai_args"]["model"] = model
155 | elif provider == "llama":
156 | model = st.text_input(
157 | "Model path",
158 | value=os.environ.get("MODEL_PATH") or None,
159 | placeholder="GUUF model path",
160 | key="opts_llama_args_model_path"
161 | )
162 | opts["llama_args"]["model_path"] = model
163 |
164 | chat_format = st.text_input(
165 | "Chat format",
166 | placeholder="auto",
167 | value=os.environ.get("CHAT_FORMAT") or None,
168 | key="opts_llama_args_chat_format"
169 | )
170 | opts["llama_args"]["chat_format"] = chat_format
171 |
172 | repeat_penalty = st.number_input(
173 | "repeat_penalty",
174 | placeholder="",
175 | key="opts_chat_args_repeat_penalty"
176 | )
177 |
178 | if repeat_penalty:
179 | opts["chat_args"]["repeat_penalty"] = repeat_penalty
180 |
181 | if provider != "openai":
182 | top_k = st.number_input("top_k", value=2, key="opts_chat_args_top_k")
183 | opts["chat_args"]["top_k"] = top_k
184 | else:
185 | if "top_k" in opts["chat_args"]:
186 | opts["chat_args"].pop("top_k")
187 |
188 | temperature = st.number_input(
189 | "temperature",
190 | value=0.75,
191 | key="opts_chat_args_temperature"
192 | )
193 | opts["chat_args"]["temperature"] = temperature
194 |
195 | opts["use_tools"] = tab1.checkbox("Use tools", key="opts_use_tools")
196 |
197 | # Tools
198 | actions = utils.list_actions()
199 | options_action = [a.name for a in actions]
200 | opts["tools"] = tab1.multiselect(
201 | "Actions",
202 | options=options_action,
203 | placeholder="Select actions",
204 | key="opts_tools"
205 | )
206 |
207 | playbooks = utils.list_playbooks()
208 | options_playbook = dict(list(playbooks.keys()))
209 |
210 | llm_playbook_tools = tab1.multiselect(
211 | "Playbooks",
212 | options=options_playbook.keys(),
213 | format_func=lambda x: options_playbook[x],
214 | placeholder="Select playbooks",
215 | key="opts_playbooks"
216 | )
217 | opts["playbooks"] = llm_playbook_tools
218 |
219 | if tab1.button(label=button_label, type="primary", use_container_width=True):
220 | func(options=opts)
221 | st.rerun()
222 |
223 | opts_json = tab2.text_area(
224 | "JSON",
225 | label_visibility="collapsed",
226 | height=300,
227 | value=json.dumps(opts, indent=4, ensure_ascii=True)
228 | )
229 | if tab2.button("Load", key="load_json"):
230 | opts = json.loads(opts_json)
231 | st.session_state.llm_options = opts
232 | _remove_all_keys()
233 | st.rerun()
234 |
235 | return opts
236 |
--------------------------------------------------------------------------------
/iauto/playground/static/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/iauto/playground/static/__init__.py
--------------------------------------------------------------------------------
/iauto/playground/static/ia.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/iauto/playground/static/ia.png
--------------------------------------------------------------------------------
/iauto/playground/utils.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 |
4 | import streamlit as st
5 |
6 | import iauto
7 |
8 |
9 | def list_actions():
10 | actions = [a.spec for a in iauto.actions.loader.actions]
11 | actions.sort(key=lambda x: x.name)
12 | return actions
13 |
14 |
15 | def list_playbooks():
16 | playbooks = {}
17 |
18 | playbooks_dir = os.environ["IA_PLAYBOOK_DIR"]
19 |
20 | if playbooks_dir and os.path.isdir(playbooks_dir):
21 | files = glob.glob("**/*.y*ml", root_dir=playbooks_dir, recursive=True)
22 | files = [f for f in files if f.endswith((".yml", ".yaml"))]
23 | for name in files:
24 | f = os.path.join(playbooks_dir, name)
25 | if not os.path.isfile(f):
26 | continue
27 | try:
28 | playbbook = iauto.load(f)
29 | except Exception:
30 | continue
31 |
32 | playbbook_desc = name.replace(".yaml", "").replace(".yml", "")
33 | if playbbook.description:
34 | playbbook_desc = playbbook.description
35 | elif playbbook.spec and playbbook.spec.description:
36 | playbbook_desc = playbbook.spec.description
37 |
38 | playbooks[(f, playbbook_desc)] = playbbook
39 |
40 | return playbooks
41 |
42 |
43 | def logo():
44 | pass
45 |
46 |
47 | def _logo():
48 | st.markdown(
49 | """
50 |
67 | """,
68 | unsafe_allow_html=True,
69 | )
70 |
--------------------------------------------------------------------------------
/playbooks/agents.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | description: Build an AgentExecutor with multi agents
3 | actions:
4 | - playbook: llm_chat.yaml
5 | - agents.create:
6 | args:
7 | session: $session
8 | react: $react
9 | name: GeneralAssistant
10 | result: $general_assistant
11 | - agents.create:
12 | args:
13 | session: $session
14 | react: $react
15 | name: Journalist
16 | description: A journalist who is skilled in digging up, analyzing, and writing news.
17 | result: $journalist
18 | - agents.create:
19 | args:
20 | session: $session
21 | react: $react
22 | name: FinacialAnalyst
23 | description: A financial analyst specializing in analyzing financial markets and investment opportunities.
24 | result: $finacial_analyst
25 | - agents.executor:
26 | args:
27 | session: $session
28 | #react: $react
29 | agents:
30 | - $general_assistant
31 | #- $journalist
32 | #- $finacial_analyst
33 | result: $agent_executor
34 |
--------------------------------------------------------------------------------
/playbooks/agents_repl.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | description: Multi-Agent (REPL)
3 | actions:
4 | - playbook: agents.yaml
5 | - repeat:
6 | actions:
7 | - shell.prompt:
8 | args: "Human: "
9 | result: $prompt
10 | - agents.run:
11 | args:
12 | agent_executor: $agent_executor
13 | message: $prompt
14 | result: $message
15 | - shell.print:
16 | args:
17 | message: "AI: {$message}"
18 | color: green
19 |
--------------------------------------------------------------------------------
/playbooks/bing.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | spec:
3 | name: bing_search
4 | description: Search the web via bing.com
5 | arguments:
6 | - name: keywords
7 | type: string
8 | description: Keywords used for searching the web
9 | required: true
10 | actions:
11 | - log: "Bing search: {$keywords}"
12 | - browser.open:
13 | args:
14 | exec: /Applications/Google Chrome.app/Contents/MacOS/Google Chrome
15 | headless: true
16 | user_data_dir: /tmp/.chrome
17 | result: $browser
18 | - browser.goto:
19 | args:
20 | browser: $browser
21 | url: "https://bing.com/search?q={$keywords}&mkt=zh-CN&ensearch=1"
22 | timeout: 120000
23 | result: $page
24 | - time.wait: 10
25 | - browser.eval:
26 | args:
27 | page: $page
28 | javascript: |
29 |
30 | [{{fact: document.querySelector("#b_results .b_ans")?.innerText,
31 | items: [...document.querySelectorAll("#b_results .b_algo")].map(v => {{
32 | title = v.querySelector("h2")?.innerText || v.querySelector(".b_title")?.innerText;
33 | link = v.querySelector("h2 a")?.href || v.querySelector(".b_title a")?.href;
34 | caption = v.querySelector(".b_caption")?.innerText;
35 | return {{title, link, caption}}
36 | }}).filter(v => v.title && v.link)
37 | }}]
38 |
39 | result: $results
40 | - browser.close: $browser
41 | #- log: $results
42 | - json.dumps:
43 | args: $results
44 | result: $results
45 | - echo: $results
46 |
--------------------------------------------------------------------------------
/playbooks/browser.yaml:
--------------------------------------------------------------------------------
1 | # python -m iauto ./playbooks/browser.yaml --kwargs url=https://bing.com
2 | playbook:
3 | spec:
4 | description: Open browser and goto the URL specified
5 | arguments:
6 | - name: url
7 | type: string
8 | description: HTTP URL,
9 | required: false
10 | actions:
11 | - browser.open:
12 | args:
13 | exec: /Applications/Google Chrome.app/Contents/MacOS/Google Chrome
14 | headless: false
15 | result: $browser
16 | - browser.goto:
17 | args:
18 | browser: $browser
19 | url: $url
20 | result: $page
21 | - browser.eval:
22 | args:
23 | page: $page
24 | javascript: new Date()
25 | result: $now
26 | - log: $now
27 | - time.wait: 2
28 | - browser.close: $browser
29 |
--------------------------------------------------------------------------------
/playbooks/caixin.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | spec:
3 | name: caixin_news
4 | description: Get latest news from Caixin website
5 | actions:
6 | - browser.open:
7 | args:
8 | exec: /Applications/Google Chrome.app/Contents/MacOS/Google Chrome
9 | headless: true
10 | user_data_dir: /tmp/.chrome
11 | size: 1024,768
12 | result: $browser
13 | - list.append: [$entries, "https://www.caixin.com/?HOLDZH"]
14 | - list.append: [$entries, "https://en.caixin.com"]
15 | - each:
16 | args: $entries
17 | actions:
18 | - log: $_
19 | - browser.goto:
20 | args:
21 | browser: $browser
22 | url: $_
23 | timeout: 60000
24 | result: $page
25 | - repeat:
26 | args: 1
27 | actions:
28 | - browser.locator:
29 | args:
30 | page: $page
31 | selector: "#moreArticle"
32 | result: $load_more
33 | - browser.click:
34 | args:
35 | locator: $load_more
36 | - time.wait: 3
37 | #- log: click
38 | - browser.eval:
39 | args:
40 | page: $page
41 | javascript: |
42 | [...document.querySelectorAll("a")].map(v => {{
43 | title = v.innerText?.trim()
44 | link = v.href
45 | date = link.match(/\d{{4}}-\d{{2}}-\d{{2}}/)
46 | date = date ? date[0] : null
47 | source = "caixin"
48 | return {{title, link, date, source}}
49 | }}).filter(v => v.title.length > 5 && v.date)
50 | result: $news
51 | - browser.close: $browser
52 | - echo: $news
53 |
--------------------------------------------------------------------------------
/playbooks/collections.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | description: "Example for collections: list, dict"
3 | actions:
4 | - list.append: [$list, 0]
5 | - list.append: [$list, 1]
6 | - log: $list
7 | - dict.set: [$dict, k1, v1]
8 | - dict.set: [$dict, k2, v2]
9 | - log: $dict
10 |
--------------------------------------------------------------------------------
/playbooks/control_flow.yaml:
--------------------------------------------------------------------------------
1 | # Playbook example
2 | #
3 | playbook:
4 | description: "Example: Control Flows"
5 | actions:
6 | - repeat:
7 | description: do forever
8 | actions:
9 | - time.now:
10 | result: $now
11 | - math.mod:
12 | args: [$now, 2]
13 | result: $tick
14 | - when:
15 | args:
16 | eq:
17 | - $tick
18 | - 0
19 | actions:
20 | - log: "tick: {$tick}"
21 | - log:
22 | args:
23 | now: $now
24 | tick: $tick
25 | - time.wait: 3
26 |
--------------------------------------------------------------------------------
/playbooks/fetch_links_from_url.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | spec:
3 | name: fetch_links_from_url
4 | description: Fetch titles and links from a URL
5 | arguments:
6 | - name: url
7 | type: string
8 | description: HTTP URL
9 | required: true
10 | actions:
11 | - browser.open:
12 | args:
13 | exec: /Applications/Google Chrome.app/Contents/MacOS/Google Chrome
14 | headless: true
15 | user_data_dir: /tmp/.chrom
16 | result: $browser
17 | - log: "Fetch Link: {$url}"
18 | - browser.goto:
19 | args:
20 | browser: $browser
21 | url: $url
22 | result: $page
23 | - time.wait: 3
24 | - browser.scroll:
25 | args:
26 | page: $page
27 | y: 150000
28 | - time.wait: 3
29 | - browser.eval:
30 | args:
31 | page: $page
32 | javascript: |
33 | [...document.querySelectorAll("a")].map(v => {{
34 | title = v.innerText?.trim()
35 | link = v.href
36 | return {{title, link}}
37 | }}).filter(v => v.title && v.title.length > 5)
38 | result: $links
39 | - browser.close: $browser
40 | - echo: $links
41 |
--------------------------------------------------------------------------------
/playbooks/get_readability_text_from_url.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | spec:
3 | name: get_readability_text_from_url
4 | description: Get web content suitable for humans to read
5 | arguments:
6 | - name: url
7 | type: string
8 | description: HTTP URL
9 | required: true
10 | actions:
11 | - browser.open:
12 | args:
13 | exec: /Applications/Google Chrome.app/Contents/MacOS/Google Chrome
14 | headless: true
15 | user_data_dir: /tmp/.chrom
16 | result: $browser
17 | - log: "Readability: {$url}"
18 | - browser.goto:
19 | args:
20 | browser: $browser
21 | url: $url
22 | result: $page
23 | #- browser.scroll:
24 | # args:
25 | # page: $page
26 | # y: 150000
27 | #- time.wait: 3
28 | - browser.content:
29 | args:
30 | page: $page
31 | result: $html
32 | - browser.readability:
33 | args:
34 | content: $html
35 | result: $text
36 | - browser.close: $browser
37 | - echo: $text
38 |
--------------------------------------------------------------------------------
/playbooks/google_flights_search.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | spec:
3 | name: google_flights_search
4 | description: Google flights search
5 | arguments:
6 | - name: from
7 | type: string
8 | description: Departure
9 | required: true
10 | - name: to
11 | type: string
12 | description: Destination
13 | required: true
14 | - name: date
15 | type: string
16 | description: "Departure, format like: Apr 1"
17 | required: true
18 | actions:
19 | - browser.open:
20 | args:
21 | exec: /Applications/Google Chrome.app/Contents/MacOS/Google Chrome
22 | headless: false
23 | slow_mo: 1000
24 | result: $browser
25 | - browser.replay:
26 | args:
27 | browser: $browser
28 | script: ./google_flights_search_replay_script.json
29 | - time.wait: 3
30 | - browser.content:
31 | args:
32 | browser: $browser
33 | selector: ".pIav2d .JMc5Xc"
34 | result: $text
35 | - browser.close: $browser
36 | - echo: $text
37 |
--------------------------------------------------------------------------------
/playbooks/google_flights_search_replay_script.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Recording 2/28/2024 at 1:05:30 PM",
3 | "steps": [
4 | {
5 | "type": "setViewport",
6 | "width": 1369,
7 | "height": 1213,
8 | "deviceScaleFactor": 1,
9 | "isMobile": false,
10 | "hasTouch": false,
11 | "isLandscape": false
12 | },
13 | {
14 | "type": "navigate",
15 | "url": "https://www.google.com/travel/flights",
16 | "assertedEvents": [
17 | {
18 | "type": "navigation",
19 | "url": "https://www.google.com/travel/flights",
20 | "title": "Google Flights - Find Cheap Flight Options & Track Prices"
21 | }
22 | ]
23 | },
24 | {
25 | "type": "click",
26 | "target": "main",
27 | "selectors": [
28 | [
29 | "aria/Change ticket type. Round trip"
30 | ],
31 | [
32 | "div.RLVa8 div.VfPpkd-TkwUic"
33 | ],
34 | [
35 | "xpath///*[@id=\"yDmH0d\"]/c-wiz[2]/div/div[2]/c-wiz/div[1]/c-wiz/div[2]/div[1]/div[1]/div[1]/div/div[1]/div[1]/div/div/div/div[1]"
36 | ],
37 | [
38 | "pierce/div.RLVa8 div.VfPpkd-TkwUic"
39 | ]
40 | ],
41 | "offsetY": 21,
42 | "offsetX": 56.5
43 | },
44 | {
45 | "type": "click",
46 | "target": "main",
47 | "selectors": [
48 | [
49 | "aria/One way"
50 | ],
51 | [
52 | "div.RLVa8 li:nth-of-type(2)"
53 | ],
54 | [
55 | "xpath///*[@id=\"yDmH0d\"]/c-wiz[2]/div/div[2]/c-wiz/div[1]/c-wiz/div[2]/div[1]/div[1]/div[1]/div/div[1]/div[1]/div/div/div/div[2]/ul/li[2]"
56 | ],
57 | [
58 | "pierce/div.RLVa8 li:nth-of-type(2)"
59 | ]
60 | ],
61 | "offsetY": 40,
62 | "offsetX": 75.5
63 | },
64 | {
65 | "type": "click",
66 | "target": "main",
67 | "selectors": [
68 | [
69 | "aria/Where from?"
70 | ],
71 | [
72 | "div.BGeFcf div.cQnuXe input"
73 | ],
74 | [
75 | "xpath///*[@id=\"i21\"]/div[1]/div/div/div[1]/div/div/input"
76 | ],
77 | [
78 | "pierce/div.BGeFcf div.cQnuXe input"
79 | ]
80 | ],
81 | "offsetY": 20,
82 | "offsetX": 70.5
83 | },
84 | {
85 | "type": "change",
86 | "value": "{$from}",
87 | "selectors": [
88 | [
89 | "aria/Where else?"
90 | ],
91 | [
92 | "#i21 > div.ZGEB9c input"
93 | ],
94 | [
95 | "xpath///*[@id=\"i21\"]/div[6]/div[2]/div[2]/div[1]/div/input"
96 | ],
97 | [
98 | "pierce/#i21 > div.ZGEB9c input"
99 | ]
100 | ],
101 | "target": "main"
102 | },
103 | {
104 | "type": "keyDown",
105 | "target": "main",
106 | "key": "Enter"
107 | },
108 | {
109 | "type": "keyUp",
110 | "key": "Enter",
111 | "target": "main"
112 | },
113 | {
114 | "type": "keyDown",
115 | "target": "main",
116 | "key": "Tab"
117 | },
118 | {
119 | "type": "keyUp",
120 | "key": "Tab",
121 | "target": "main"
122 | },
123 | {
124 | "type": "change",
125 | "value": "{$to}",
126 | "selectors": [
127 | [
128 | "aria/Where to? "
129 | ],
130 | [
131 | "div.vxNK6d div.cQnuXe input"
132 | ],
133 | [
134 | "xpath///*[@id=\"i21\"]/div[4]/div/div/div[1]/div/div/input"
135 | ],
136 | [
137 | "pierce/div.vxNK6d div.cQnuXe input"
138 | ]
139 | ],
140 | "target": "main"
141 | },
142 | {
143 | "type": "keyDown",
144 | "target": "main",
145 | "key": "Enter"
146 | },
147 | {
148 | "type": "keyUp",
149 | "key": "Enter",
150 | "target": "main"
151 | },
152 | {
153 | "type": "keyDown",
154 | "target": "main",
155 | "key": "Tab"
156 | },
157 | {
158 | "type": "keyUp",
159 | "key": "Tab",
160 | "target": "main"
161 | },
162 | {
163 | "type": "change",
164 | "target": "main",
165 | "selectors": [
166 | "div.ZGEB9c div.kStSsc input",
167 | "xpath///*[@id=\"ow83\"]/div[2]/div/div[2]/div[1]/div[1]/div[1]/div/input",
168 | "pierce/div.ZGEB9c div.kStSsc input"
169 | ],
170 | "value": "{$date}"
171 | },
172 | {
173 | "type": "keyDown",
174 | "target": "main",
175 | "key": "Enter"
176 | },
177 | {
178 | "type": "keyUp",
179 | "key": "Enter",
180 | "target": "main"
181 | },
182 | {
183 | "type": "waitForElement",
184 | "selectors": [
185 | "pierce/div.akjk5c span"
186 | ]
187 | },
188 | {
189 | "type": "click",
190 | "timeout": 10000,
191 | "target": "main",
192 | "selectors": [
193 | "div.akjk5c span",
194 | "xpath///*[@id=\"ow83\"]/div[2]/div/div[3]/div[3]/div/button/span",
195 | "pierce/div.akjk5c span"
196 | ],
197 | "offsetX": 9.484375,
198 | "offsetY": 8,
199 | "duration": 3000
200 | },
201 | {
202 | "type": "click",
203 | "target": "main",
204 | "selectors": [
205 | "div.MXvFbd span.VfPpkd-vQzf8d",
206 | "xpath///*[@id=\"yDmH0d\"]/c-wiz[2]/div/div[2]/c-wiz/div[1]/c-wiz/div[2]/div[1]/div[1]/div[2]/div/button/span[2]",
207 | "pierce/div.MXvFbd span.VfPpkd-vQzf8d",
208 | "text/Search"
209 | ],
210 | "offsetX": 21.453125,
211 | "offsetY": 11,
212 | "duration": 1000
213 | }
214 | ]
215 | }
216 |
--------------------------------------------------------------------------------
/playbooks/google_news.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | spec:
3 | name: google_news
4 | description: Search google news
5 | arguments:
6 | - name: keywords
7 | type: string
8 | description: Keywords used for searching news topics
9 | required: true
10 | actions:
11 | - when:
12 | args: { eq: [$keywords, null] }
13 | actions:
14 | - setvar: [url, https://news.google.com]
15 | - when:
16 | args: { ne: [$keywords, null] }
17 | actions:
18 | - setvar: [url, "https://news.google.com/search?q={$keywords}"]
19 | - log: "Visit GoogleNews: {$url}"
20 | - browser.open:
21 | args:
22 | exec: /Applications/Google Chrome.app/Contents/MacOS/Google Chrome
23 | headless: true
24 | result: $browser
25 | - browser.goto:
26 | args:
27 | browser: $browser
28 | url: $url
29 | timeout: 120000
30 | result: $page
31 | - browser.eval:
32 | args:
33 | page: $page
34 | javascript: |
35 | [...document.querySelectorAll("a")].map(v => {{
36 | title = v.innerText;
37 | link = v.href;
38 | return {{title, link}}
39 | }}).filter(v => v.title !== '' && v.link.startsWith("https://news.google.com/articles/"))
40 | result: $news
41 | - browser.close: $browser
42 | - each:
43 | args: $news
44 | actions:
45 | - dict.get:
46 | args: [$_, title]
47 | result: $title
48 | #- log: $title
49 | - list.append: [$news_titles, $title]
50 | - echo: $news_titles
51 |
--------------------------------------------------------------------------------
/playbooks/llama_repl.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | description: Chat to LLaMA via llama.cpp (REPL)
3 | actions:
4 | - llm.session:
5 | args:
6 | provider: llama
7 | llm_args:
8 | model_path: /Volumes/Workspaces/models/Qwen-1_8B-Chat/ggml-model-q4_0.gguf
9 | chat_format: qwen-fn
10 | result: $session
11 | - repeat:
12 | actions:
13 | - shell.prompt:
14 | args: "Human: "
15 | result: $prompt
16 | - llm.chat:
17 | args:
18 | session: $session
19 | prompt: $prompt
20 | top_k: 2
21 | repeat_penalty: 1.2
22 | max_tokens: 500
23 | result: $message
24 | - shell.print:
25 | args:
26 | message: "AI: {$message}"
27 | color: green
28 |
--------------------------------------------------------------------------------
/playbooks/llm_chat.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | description: Create a LLM chat session
3 | actions:
4 | - llm.session:
5 | args:
6 | provider: $llm_provider
7 | llm_args: $llm_args
8 | tools:
9 | - shell.cmd
10 | actions:
11 | - playbook:
12 | args:
13 | - ./bing.yaml
14 | - ./fetch_links_from_url.yaml
15 | - ./get_readability_text_from_url.yaml
16 | result: $session
17 |
--------------------------------------------------------------------------------
/playbooks/llm_function_calling.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | description: "Example: Function calling (REPL)"
3 | actions:
4 | - llm.session:
5 | args:
6 | provider: llama
7 | llm_args:
8 | model_path: /Volumes/Workspaces/models/Qwen-1_8B-Chat/ggml-model-q4_0.gguf
9 | chat_format: qwen-fn
10 | actions:
11 | - playbook: ./bing.yaml
12 | result: $session
13 | - repeat:
14 | actions:
15 | - shell.prompt:
16 | args: "Human: "
17 | result: $prompt
18 | - llm.chat:
19 | args:
20 | session: $session
21 | prompt: "{$prompt}"
22 | result: $message
23 | - shell.print: "AI: {$message}"
24 |
--------------------------------------------------------------------------------
/playbooks/llm_react_repl.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | description: LLM ReAct reasoning (REPL)
3 | actions:
4 | - llm.session:
5 | args:
6 | provider: chatglm
7 | llm_args:
8 | model_path: /Volumes/Workspaces/models/chatglm3-6b/chatglm3-6b-ggml.bin
9 | #model_path: /Volumes/Workspaces/models/Qwen-1_8B-Chat/ggml-model-q4_0.gguf
10 | #chat_format: qwen-fn
11 | actions:
12 | - playbook:
13 | args:
14 | - ./bing.yaml
15 | - ./get_readability_text_from_url.yaml
16 | result: $session
17 | - repeat:
18 | actions:
19 | - shell.prompt:
20 | args: "Human: "
21 | result: $prompt
22 | - llm.react:
23 | args:
24 | session: $session
25 | prompt: $prompt
26 | rewrite: True
27 | log: False
28 | history: 1
29 | top_k: 2
30 | #repeat_penalty: 1.2
31 | temperature: 0
32 | result: $message
33 | - shell.print:
34 | args:
35 | message: "AI: {$message}"
36 | color: green
37 |
--------------------------------------------------------------------------------
/playbooks/openai_repl.yaml:
--------------------------------------------------------------------------------
1 | playbook:
2 | description: Chat to OpenAI-compatible API (REPL)
3 | actions:
4 | - llm.session:
5 | #args:
6 | # llm_args:
7 | # api_key: sk-xxx
8 | # base_url: https:///v1
9 | # model: gpt-4
10 | result: $session
11 | - repeat:
12 | actions:
13 | - shell.prompt:
14 | args: "Human: "
15 | result: $prompt
16 | - llm.chat:
17 | args:
18 | session: $session
19 | prompt: $prompt
20 | result: $message
21 | - shell.print: "AI: {$message}"
22 |
--------------------------------------------------------------------------------
/playbooks/webdriver.yaml:
--------------------------------------------------------------------------------
1 | # python -m iauto ./playbooks/webdriver.yaml --kwargs keywords=LLM
2 | playbook:
3 | spec:
4 | name: search_engine
5 | description: "Example for appium webdriver"
6 | arguments:
7 | - name: keywords
8 | type: string
9 | description: keywords to search for,
10 | required: true
11 | actions:
12 | - wd.connect:
13 | args:
14 | server: http://192.168.64.7:4723
15 | caps:
16 | platformName: Windows
17 | automationName: Chromium
18 | browserName: chrome
19 | useSystemExecutable: true
20 | newCommandTimeout: 2
21 | goog:chromeOptions:
22 | args:
23 | - user-data-dir=C:\\Users\\shellc\\AppData\\Local\\Google\\Chrome\\User Data
24 | - app=https://bing.com
25 | noReset: true
26 | verbose: false
27 | unicodeKeyboard: true
28 | resetKeyboard: true
29 | result: $wd
30 | - wd.get_element:
31 | args:
32 | webdriver: $wd
33 | selector: "[id='sb_form_q']"
34 | result: $search_input
35 | - wd.send_keys:
36 | args:
37 | element: $search_input
38 | content: "{$keywords}"
39 | - wd.get_element:
40 | args:
41 | webdriver: $wd
42 | selector: "[id='search_icon']"
43 | result: $search_btn
44 | - wd.click: $search_btn
45 | - wd.get_elements:
46 | args:
47 | webdriver: $wd
48 | selector: "[id='b_results'] .b_algo .tilk"
49 | result: $search_results
50 | - each:
51 | args: $search_results
52 | actions:
53 | - wd.get_attr:
54 | args:
55 | element: $_
56 | name: href
57 | result: $link
58 | - wd.text:
59 | args: $_
60 | result: $text
61 | - log:
62 | - $link
63 | - $text
64 | - wd.execute_script:
65 | args:
66 | webdriver: $wd
67 | javascript: "return new Date().toString()"
68 | result: $now
69 | - log: $now
70 |
--------------------------------------------------------------------------------
/pydoc-markdown.yaml:
--------------------------------------------------------------------------------
1 | loaders:
2 | - type: python
3 | search_path: [.]
4 | renderer:
5 | # https://github.com/NiklasRosenstein/pydoc-markdown/blob/develop/src/pydoc_markdown/contrib/renderers/markdown.py
6 | type: markdown
7 | render_page_title: false
8 | render_toc: true
9 | render_toc_title: "iauto Python API Reference"
10 | toc_maxdepth: 1
11 | processors:
12 | # #https://github.com/NiklasRosenstein/pydoc-markdown/blob/develop/src/pydoc_markdown/contrib/processors/filter.py
13 | - type: filter
14 | expression: not name.startswith('_') and '.buildin.' not in name and '.distrib.' not in name and default()
15 | documented_only: true
16 | exclude_private: true
17 | do_not_filter_modules: false
18 | #skip_empty_modules: true
19 | - type: smart
20 | - type: crossref
21 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools", 'cython', "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [tool.pyright]
6 | venvPath = "."
7 | venv = ".venv"
8 |
9 | [tool.yaml]
10 | validate = false
11 |
--------------------------------------------------------------------------------
/release.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ./build.sh
4 | python -m twine upload --repository pypi dist/*$1*.whl
5 |
--------------------------------------------------------------------------------
/requirements-appium.txt:
--------------------------------------------------------------------------------
1 | Appium-Python-Client
--------------------------------------------------------------------------------
/requirements-basic.txt:
--------------------------------------------------------------------------------
1 | pydantic
2 | PyYAML
3 |
4 | python-dotenv
5 | prompt_toolkit
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | build
2 | twine
3 | flake8
4 | autopep8
5 | isort
6 | pre-commit
7 | pydoc-markdown
8 |
--------------------------------------------------------------------------------
/requirements-llm-local.txt:
--------------------------------------------------------------------------------
1 | llama-cpp-python
--------------------------------------------------------------------------------
/requirements-llm.txt:
--------------------------------------------------------------------------------
1 | openai
2 | pyautogen
--------------------------------------------------------------------------------
/requirements-playground.txt:
--------------------------------------------------------------------------------
1 | streamlit
2 | watchdog
--------------------------------------------------------------------------------
/requirements-playwright.txt:
--------------------------------------------------------------------------------
1 | playwright
2 | Appium-Python-Client
--------------------------------------------------------------------------------
/requirements-sql.txt:
--------------------------------------------------------------------------------
1 | SQLAlchemy
2 | pandas
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # Basic
2 | pydantic
3 | PyYAML
4 |
5 | python-dotenv
6 | prompt_toolkit
7 |
8 | # Server
9 | uvicorn
10 | fastapi
11 |
12 | # LLM
13 | openai
14 | pyautogen
15 | #llama-cpp-python
16 |
17 | # Appium & Playwright
18 | playwright
19 | Appium-Python-Client
20 |
21 | # SQL
22 | SQLAlchemy
23 | pandas
24 |
25 | # Playground
26 | streamlit
27 | watchdog
28 |
29 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = iauto
3 | version = attr: iauto.VERSION
4 | author = shellc
5 | author_email = shenggong.wang@gmail.com
6 | description = iauto is a low-code engine for building and deploying AI agents
7 | long_description_content_type = text/markdown
8 | long_description = file: README.md
9 | license = MIT
10 | url = https://github.com/shellc/iauto
11 | keywords = AI, Automation, LLM, RPA
12 | classifiers =
13 | Programming Language :: Python :: 3
14 |
15 | [options]
16 | python_requires = >=3.8
17 | packages = find:
18 | install_requires = file: requirements-base.txt
19 | include_package_data = True
20 |
21 | [options.package_data]
22 | * = *.json, *.yml, *.yaml, *.png
23 |
24 | [options.entry_points]
25 | console_scripts =
26 | ia = iauto.__main__:main
27 |
28 | [options.extras_require]
29 | dev = file: requirements-dev.txt
30 | all = file: requirements.txt
31 | playground = file: requirements-playground.txt
32 | sql = file: requirements-sql.txt
33 | appium = file: requirements-appium.txt
34 | playwright = file: requirements-playwright.txt
35 | llm = file: requirements-llm.txt
36 | llm-local = file: requirements-llm-local.txt
37 |
38 | [options.packages.find]
39 | exclude =
40 | tests/*
41 |
42 | [flake8]
43 | per-file-ignores =
44 | # imported but unused
45 | __init__.py: F401
46 | _buildin.py: F401
47 | max-line-length = 120
48 |
49 | [tool.isort]
50 | multi_line_output = 3
51 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools
2 |
3 | setuptools.setup()
4 |
--------------------------------------------------------------------------------
/test.sh:
--------------------------------------------------------------------------------
1 | python -m unittest discover tests
2 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/tests/__init__.py
--------------------------------------------------------------------------------
/tests/data/playbooks/playbook_load_test.json:
--------------------------------------------------------------------------------
1 | {
2 | "actions": [
3 | {
4 | "actions": [
5 | {
6 | "metadata": {
7 | "__root__": "./tests/data/playbooks"
8 | },
9 | "name": "time.now",
10 | "result": "$now"
11 | },
12 | {
13 | "args": [
14 | "$now",
15 | 2
16 | ],
17 | "metadata": {
18 | "__root__": "./tests/data/playbooks"
19 | },
20 | "name": "math.mod",
21 | "result": "$tick"
22 | },
23 | {
24 | "actions": [
25 | {
26 | "args": [
27 | "tick: {$tick}"
28 | ],
29 | "metadata": {
30 | "__root__": "./tests/data/playbooks"
31 | },
32 | "name": "log"
33 | }
34 | ],
35 | "args": {
36 | "eq": [
37 | "$tick",
38 | 0
39 | ]
40 | },
41 | "metadata": {
42 | "__root__": "./tests/data/playbooks"
43 | },
44 | "name": "when"
45 | },
46 | {
47 | "args": {
48 | "now": "$now",
49 | "tick": "$tick"
50 | },
51 | "metadata": {
52 | "__root__": "./tests/data/playbooks"
53 | },
54 | "name": "log"
55 | },
56 | {
57 | "args": [
58 | 3
59 | ],
60 | "metadata": {
61 | "__root__": "./tests/data/playbooks"
62 | },
63 | "name": "time.wait"
64 | }
65 | ],
66 | "description": "do forever",
67 | "metadata": {
68 | "__root__": "./tests/data/playbooks"
69 | },
70 | "name": "repeat"
71 | }
72 | ],
73 | "description": "playbook_load_test",
74 | "metadata": {},
75 | "name": "playbook"
76 | }
--------------------------------------------------------------------------------
/tests/data/playbooks/playbook_load_test.yaml:
--------------------------------------------------------------------------------
1 | # Playbook example
2 | #
3 | playbook:
4 | description: "playbook_load_test"
5 | actions:
6 | - repeat:
7 | description: do forever
8 | actions:
9 | - time.now:
10 | result: $now
11 | - math.mod:
12 | args: [$now, 2]
13 | result: $tick
14 | - when:
15 | args:
16 | eq:
17 | - $tick
18 | - 0
19 | actions:
20 | - log: "tick: {$tick}"
21 | - log:
22 | args:
23 | now: $now
24 | tick: $tick
25 | - time.wait: 3
26 |
--------------------------------------------------------------------------------
/tests/iauto/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/tests/iauto/__init__.py
--------------------------------------------------------------------------------
/tests/iauto/actions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shellc/iauto/13d0f4f34ff54a05d94a8f48adb307f89d840100/tests/iauto/actions/__init__.py
--------------------------------------------------------------------------------
/tests/iauto/actions/test_json.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from iauto.actions.buildin import json
4 |
5 |
6 | class TestJson(unittest.TestCase):
7 | def test_json_dumps(self):
8 | s = json.dumps({"k": "v"})
9 | d = json.loads(s)
10 |
11 | self.assertEqual(d["k"], "v")
12 |
--------------------------------------------------------------------------------
/tests/iauto/actions/test_playbook.py:
--------------------------------------------------------------------------------
1 |
2 | import unittest
3 |
4 | from iauto.actions import playbook
5 |
6 |
7 | class TestPlaybookLoading(unittest.TestCase):
8 | def test_playbook_loading(self):
9 | playbook.load("./tests/data/playbooks/playbook_load_test.yaml")
10 | # playbook.dump(pb, "./tests/data/playbooks/playbook_load_test_dump.yaml", format="yaml")
11 | playbook.load("./tests/data/playbooks/playbook_load_test.json")
12 | # playbook.dump(pb, "./tests/data/playbooks/playbook_load_test_dump.json", format="json")
13 |
--------------------------------------------------------------------------------