├── .coveragerc
├── .devcontainer
├── Dockerfile
├── devcontainer.json
└── docker-compose.yml
├── .dockerignore
├── .env.template
├── .envrc
├── .flake8
├── .gitattributes
├── .github
├── CODEOWNERS
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── 1.bug.yml
│ └── 2.feature.yml
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── ci.yml
│ ├── scripts
│ ├── docker-ci-summary.sh
│ └── docker-release-summary.sh
│ └── sponsors_readme.yml
├── .gitignore
├── .isort.cfg
├── .pre-commit-config.yaml
├── .sourcery.yaml
├── BULLETIN.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── README.md
├── autogpt
├── __init__.py
├── __main__.py
├── agent
│ ├── __init__.py
│ ├── agent.py
│ └── agent_manager.py
├── app.py
├── cli.py
├── commands
│ ├── __init__.py
│ ├── analyze_code.py
│ ├── audio_text.py
│ ├── command.py
│ ├── execute_code.py
│ ├── file_operations.py
│ ├── git_operations.py
│ ├── google_search.py
│ ├── image_gen.py
│ ├── improve_code.py
│ ├── review_pr.py
│ ├── task_statuses.py
│ ├── times.py
│ ├── twitter.py
│ ├── web_playwright.py
│ ├── web_requests.py
│ ├── web_selenium.py
│ └── write_tests.py
├── config
│ ├── __init__.py
│ ├── ai_config.py
│ ├── config.py
│ └── prompt_config.py
├── configurator.py
├── js
│ └── overlay.js
├── json_utils
│ ├── __init__.py
│ ├── json_fix_general.py
│ ├── json_fix_llm.py
│ ├── llm_response_format_1.json
│ └── utilities.py
├── llm
│ ├── __init__.py
│ ├── api_manager.py
│ ├── base.py
│ ├── chat.py
│ ├── llm_utils.py
│ ├── modelsinfo.py
│ ├── providers
│ │ ├── __init__.py
│ │ └── openai.py
│ └── token_counter.py
├── log_cycle
│ ├── __init__.py
│ ├── json_handler.py
│ └── log_cycle.py
├── logs.py
├── main.py
├── memory
│ ├── __init__.py
│ ├── base.py
│ ├── local.py
│ ├── milvus.py
│ ├── no_memory.py
│ ├── pinecone.py
│ ├── redismem.py
│ └── weaviate.py
├── memory_management
│ ├── store_memory.py
│ └── summary_memory.py
├── models
│ └── base_open_ai_plugin.py
├── plugins.py
├── processing
│ ├── __init__.py
│ ├── html.py
│ └── text.py
├── prompts
│ ├── __init__.py
│ ├── default_prompts.py
│ ├── generator.py
│ └── prompt.py
├── setup.py
├── singleton.py
├── speech
│ ├── __init__.py
│ ├── base.py
│ ├── brian.py
│ ├── eleven_labs.py
│ ├── gtts.py
│ ├── macos_tts.py
│ └── say.py
├── spinner.py
├── url_utils
│ ├── __init__.py
│ └── validators.py
├── utils.py
└── workspace
│ ├── __init__.py
│ └── workspace.py
├── azure.yaml.template
├── benchmark
├── __init__.py
└── benchmark_entrepreneur_gpt_with_difficult_user.py
├── codecov.yml
├── data
└── .keep
├── data_ingestion.py
├── docker-compose.yml
├── docs
├── challenges
│ ├── beat.md
│ ├── building_challenges.md
│ ├── challenge_template.md
│ ├── information_retrieval
│ │ ├── challenge_a.md
│ │ └── introduction.md
│ ├── introduction.md
│ ├── list.md
│ ├── memory
│ │ ├── challenge_a.md
│ │ ├── challenge_b.md
│ │ ├── challenge_c.md
│ │ └── introduction.md
│ └── submit.md
├── code-of-conduct.md
├── configuration
│ ├── imagegen.md
│ ├── memory.md
│ ├── search.md
│ └── voice.md
├── contributing.md
├── imgs
│ └── openai-api-key-billing-paid-account.png
├── index.md
├── plugins.md
├── setup.md
├── testing.md
└── usage.md
├── main.py
├── mkdocs.yml
├── mypy.ini
├── plugin.png
├── plugins
└── __PUT_PLUGIN_ZIPS_HERE__
├── prompt_settings.yaml
├── pyproject.toml
├── requirements.txt
├── run.bat
├── run.sh
├── run_continuous.bat
├── run_continuous.sh
├── scripts
├── __init__.py
├── check_requirements.py
└── install_plugin_deps.py
├── tests.py
└── tests
├── __init__.py
├── conftest.py
├── context.py
├── integration
├── __init__.py
├── agent_factory.py
├── cassettes
│ ├── test_llm_utils
│ │ ├── test_get_ada_embedding.yaml
│ │ └── test_get_ada_embedding_large_context.yaml
│ ├── test_local_cache
│ │ └── test_get_relevant.yaml
│ ├── test_memory_management
│ │ └── test_save_memory_trimmed_from_context_window.yaml
│ └── test_setup
│ │ ├── test_generate_aiconfig_automatic_default.yaml
│ │ ├── test_generate_aiconfig_automatic_fallback.yaml
│ │ └── test_generate_aiconfig_automatic_typical.yaml
├── challenges
│ ├── __init__.py
│ ├── basic_abilities
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ │ ├── test_browse_website
│ │ │ │ └── test_browse_website.yaml
│ │ │ └── test_write_file
│ │ │ │ └── test_write_file.yaml
│ │ ├── goal_oriented_tasks.md
│ │ ├── test_browse_website.py
│ │ └── test_write_file.py
│ ├── conftest.py
│ ├── information_retrieval
│ │ └── test_information_retrieval_challenge_a.py
│ ├── kubernetes
│ │ └── test_kubernetes_template_challenge_a.py
│ ├── memory
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ │ ├── test_memory_challenge_a
│ │ │ │ └── test_memory_challenge_a.yaml
│ │ │ ├── test_memory_challenge_b
│ │ │ │ └── test_memory_challenge_b.yaml
│ │ │ └── test_memory_challenge_c
│ │ │ │ └── test_memory_challenge_c.yaml
│ │ ├── test_memory_challenge_a.py
│ │ ├── test_memory_challenge_b.py
│ │ └── test_memory_challenge_c.py
│ ├── pr_review
│ │ ├── base.py
│ │ ├── test_basic_pr_review.py
│ │ └── test_basic_pr_review_variable.py
│ ├── test_challenge_should_be_formatted_properly.py
│ └── utils.py
├── conftest.py
├── goal_oriented
│ └── __init__.py
├── memory_tests.py
├── milvus_memory_tests.py
├── test_commands.py
├── test_execute_code.py
├── test_git_commands.py
├── test_google_search.py
├── test_llm_utils.py
├── test_local_cache.py
├── test_memory_management.py
├── test_setup.py
└── weaviate_memory_tests.py
├── milvus_memory_test.py
├── mocks
├── __init__.py
└── mock_commands.py
├── test_agent.py
├── test_agent_manager.py
├── test_ai_config.py
├── test_api_manager.py
├── test_commands.py
├── test_config.py
├── test_image_gen.py
├── test_logs.py
├── test_prompt_config.py
├── test_prompt_generator.py
├── test_token_counter.py
├── test_utils.py
├── test_workspace.py
├── unit
├── __init__.py
├── _test_json_parser.py
├── data
│ └── test_plugins
│ │ └── Auto-GPT-Plugin-Test-master.zip
├── models
│ └── test_base_open_api_plugin.py
├── test_browse_scrape_links.py
├── test_browse_scrape_text.py
├── test_chat.py
├── test_file_operations.py
├── test_get_self_feedback.py
├── test_json_parser.py
├── test_json_utils_llm.py
├── test_llm_utils.py
├── test_plugins.py
├── test_spinner.py
├── test_url_validation.py
└── test_web_selenium.py
├── utils.py
└── vcr
├── __init__.py
├── openai_filter.py
└── vcr_filter.py
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | relative_files = true
--------------------------------------------------------------------------------
/.devcontainer/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use an official Python base image from the Docker Hub
2 | FROM python:3.10
3 |
4 | # Install browsers
5 | RUN apt-get update && apt-get install -y \
6 | chromium-driver firefox-esr \
7 | ca-certificates
8 |
9 | # Install utilities
10 | RUN apt-get install -y curl jq wget git
11 |
12 | # Declare working directory
13 | WORKDIR /workspace/Auto-GPT
14 |
--------------------------------------------------------------------------------
/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "dockerComposeFile": "./docker-compose.yml",
3 | "service": "auto-gpt",
4 | "workspaceFolder": "/workspace/Auto-GPT",
5 | "shutdownAction": "stopCompose",
6 | "features": {
7 | "ghcr.io/devcontainers/features/common-utils:2": {
8 | "installZsh": "true",
9 | "username": "vscode",
10 | "userUid": "6942",
11 | "userGid": "6942",
12 | "upgradePackages": "true"
13 | },
14 | "ghcr.io/devcontainers/features/desktop-lite:1": {},
15 | "ghcr.io/devcontainers/features/python:1": "none",
16 | "ghcr.io/devcontainers/features/node:1": "none",
17 | "ghcr.io/devcontainers/features/git:1": {
18 | "version": "latest",
19 | "ppa": "false"
20 | }
21 | },
22 | // Configure tool-specific properties.
23 | "customizations": {
24 | // Configure properties specific to VS Code.
25 | "vscode": {
26 | // Set *default* container specific settings.json values on container create.
27 | "settings": {
28 | "python.defaultInterpreterPath": "/usr/local/bin/python"
29 | }
30 | }
31 | },
32 | // Use 'forwardPorts' to make a list of ports inside the container available locally.
33 | // "forwardPorts": [],
34 |
35 | // Use 'postCreateCommand' to run commands after the container is created.
36 | // "postCreateCommand": "pip3 install --user -r requirements.txt",
37 |
38 | // Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
39 | "remoteUser": "vscode"
40 | }
41 |
--------------------------------------------------------------------------------
/.devcontainer/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # To boot the app run the following:
2 | # docker-compose run auto-gpt
3 | version: '3.9'
4 |
5 | services:
6 | auto-gpt:
7 | depends_on:
8 | - redis
9 | build:
10 | dockerfile: .devcontainer/Dockerfile
11 | context: ../
12 | tty: true
13 | environment:
14 | MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
15 | REDIS_HOST: ${REDIS_HOST:-redis}
16 | volumes:
17 | - ../:/workspace/Auto-GPT
18 | redis:
19 | image: 'redis/redis-stack-server:latest'
20 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | .*
2 | *.template
3 | *.yaml
4 | *.yml
5 |
6 | *.md
7 | *.png
8 | !BULLETIN.md
9 |
--------------------------------------------------------------------------------
/.envrc:
--------------------------------------------------------------------------------
1 | # Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.
2 | # Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use Auto-GPT.
3 |
4 | [[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt
5 |
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 88
3 | select = "E303, W293, W291, W292, E305, E231, E302"
4 | exclude =
5 | .tox,
6 | __pycache__,
7 | *.pyc,
8 | .env
9 | venv*/*,
10 | .venv/*,
11 | reports/*,
12 | dist/*,
13 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Exclude VCR cassettes from stats
2 | tests/**/cassettes/**.y*ml linguist-generated
3 |
4 | # Mark documentation as such
5 | docs/**.md linguist-documentation
6 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | .github/workflows/ @Significant-Gravitas/Auto-GPT-Source
2 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: Torantulino
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/2.feature.yml:
--------------------------------------------------------------------------------
1 | name: Feature request 🚀
2 | description: Suggest a new idea for Auto-GPT!
3 | labels: ['status: needs triage']
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing)
9 | Please provide a searchable summary of the issue in the title above ⬆️.
10 | - type: checkboxes
11 | attributes:
12 | label: Duplicates
13 | description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem.
14 | options:
15 | - label: I have searched the existing issues
16 | required: true
17 | - type: textarea
18 | attributes:
19 | label: Summary 💡
20 | description: Describe how it should work.
21 | - type: textarea
22 | attributes:
23 | label: Examples 🌈
24 | description: Provide a link to other implementations, or screenshots of the expected behavior.
25 | - type: textarea
26 | attributes:
27 | label: Motivation 🔦
28 | description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
29 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
7 |
8 |
20 |
21 | ### Background
22 |
23 |
24 | ### Changes
25 |
26 |
27 | ### Documentation
28 |
29 |
30 | ### Test Plan
31 |
32 |
33 | ### PR Quality Checklist
34 | - [ ] My pull request is atomic and focuses on a single change.
35 | - [ ] I have thoroughly tested my changes with multiple different prompts.
36 | - [ ] I have considered potential risks and mitigations for my changes.
37 | - [ ] I have documented my changes clearly and comprehensively.
38 | - [ ] I have not snuck in any "extra" small tweaks changes.
39 | - [ ] I have run `black .` and `isort .` against my code to ensure it passes our linter.
40 |
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: Challenges
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 | pull_request_target:
7 | branches: [ master, stable ]
8 |
9 | concurrency:
10 | group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
11 | cancel-in-progress: ${{ github.event_name == 'pull_request_target' }}
12 |
13 | jobs:
14 | lint:
15 | runs-on: ubuntu-latest
16 | env:
17 | min-python-version: "3.10"
18 |
19 | steps:
20 | - name: Checkout repository
21 | uses: actions/checkout@v3
22 | with:
23 | fetch-depth: 0
24 | ref: ${{ github.event.pull_request.head.ref }}
25 | repository: ${{ github.event.pull_request.head.repo.full_name }}
26 |
27 | - name: Set up Python ${{ env.min-python-version }}
28 | uses: actions/setup-python@v2
29 | with:
30 | python-version: ${{ env.min-python-version }}
31 |
32 | - name: Install dependencies
33 | run: |
34 | python -m pip install --upgrade pip
35 | pip install -r requirements.txt
36 |
37 | - name: Lint with flake8
38 | run: flake8
39 |
40 | - name: Check black formatting
41 | run: black . --check
42 | if: success() || failure()
43 |
44 | - name: Check isort formatting
45 | run: isort . --check
46 | if: success() || failure()
47 |
48 | pr_review:
49 | permissions:
50 | # Gives the action the necessary permissions for publishing new
51 | # comments in pull requests.
52 | pull-requests: write
53 | # Gives the action the necessary permissions for pushing data to the
54 | # python-coverage-comment-action branch, and for editing existing
55 | # comments (to avoid publishing multiple comments in the same PR)
56 | contents: write
57 | runs-on: ubuntu-latest
58 | strategy:
59 | matrix:
60 | python-version: [ "3.10" ]
61 |
62 | steps:
63 | - name: Check out repository
64 | uses: actions/checkout@v3
65 | with:
66 | fetch-depth: 0
67 | ref: ${{ github.event.pull_request.head.ref }}
68 | repository: ${{ github.event.pull_request.head.repo.full_name }}
69 |
70 | - name: Set up Python ${{ matrix.python-version }}
71 | uses: actions/setup-python@v2
72 | with:
73 | python-version: ${{ matrix.python-version }}
74 |
75 | - name: Install dependencies
76 | run: |
77 | python -m pip install --upgrade pip
78 | pip install -r requirements.txt
79 |
80 | - name: Extract team member name
81 | run: |
82 | TEAM_MEMBER_NAME=$(echo "${{ github.event.pull_request.head.repo.full_name }}" | awk -F'/' '{print $1}')
83 | echo "TEAM_MEMBER_NAME=$TEAM_MEMBER_NAME" >> $GITHUB_ENV
84 |
85 | - name: Run unittest tests with coverage
86 | run: |
87 | pytest -s tests/integration/challenges/pr_review -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
88 | env:
89 | CI: true
90 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
91 | GITHUB_PAT: ${{ secrets.PAT }}
92 | TEAM_MEMBER_NAME: ${{ env.TEAM_MEMBER_NAME }}
93 |
94 | - name: Upload coverage reports to Codecov
95 | uses: codecov/codecov-action@v3
96 |
--------------------------------------------------------------------------------
/.github/workflows/scripts/docker-ci-summary.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
3 | head_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$current_ref/" <<< $compare_url_template)
4 | ref_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$commit_hash/" <<< $compare_url_template)
5 |
6 | EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
7 |
8 | cat << $EOF
9 | # Docker Build summary 🔨
10 |
11 | **Source:** branch \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
12 |
13 | **Build type:** \`$build_type\`
14 |
15 | **Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
16 |
17 | ## Image details
18 |
19 | **Tags:**
20 | $(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
21 |
22 |
23 | Layers
24 |
25 | | Age | Size | Created by instruction |
26 | | --------- | ------ | ---------------------- |
27 | $(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
28 | | grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
29 | | cut -f-3 `# yeet Comment column`\
30 | | sed 's/ ago//' `# fix Layer age`\
31 | | sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
32 | | sed 's/\$/\\$/g' `# escape variable and shell expansions`\
33 | | sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
34 | | column -t -s$'\t' -o' | ' `# align columns and add separator`\
35 | | sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
36 |
37 |
38 |
39 | ENV
40 |
41 | | Variable | Value |
42 | | -------- | -------- |
43 | $(jq -r \
44 | '.Config.Env
45 | | map(
46 | split("=")
47 | | "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
48 | )
49 | | map("| \(.) |")
50 | | .[]' <<< $meta
51 | )
52 |
53 |
54 |
55 | Raw metadata
56 |
57 | \`\`\`JSON
58 | $meta
59 | \`\`\`
60 |
61 |
62 | ## Build details
63 | **Build trigger:** $push_forced_label $event_name \`$event_ref\`
64 |
65 |
66 | github
context
67 |
68 | \`\`\`JSON
69 | $github_context_json
70 | \`\`\`
71 |
72 |
73 | ### Source
74 | **HEAD:** [$repository@\`${commit_hash:0:7}\`]($source_url) on branch [$current_ref]($ref_compare_url)
75 |
76 | **Diff with previous HEAD:** $head_compare_url
77 |
78 | #### New commits
79 | $(jq -r 'map([
80 | "**Commit [`\(.id[0:7])`](\(.url)) by \(if .author.username then "@"+.author.username else .author.name end):**",
81 | .message,
82 | (if .committer.name != .author.name then "\n> **Committer:** \(.committer.name) <\(.committer.email)>" else "" end),
83 | "**Timestamp:** \(.timestamp)"
84 | ] | map("> \(.)\n") | join("")) | join("\n")' <<< $new_commits_json)
85 |
86 | ### Job environment
87 |
88 | #### \`vars\` context:
89 | \`\`\`JSON
90 | $vars_json
91 | \`\`\`
92 |
93 | #### \`env\` context:
94 | \`\`\`JSON
95 | $job_env_json
96 | \`\`\`
97 |
98 | $EOF
99 |
--------------------------------------------------------------------------------
/.github/workflows/scripts/docker-release-summary.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
3 |
4 | EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
5 |
6 | cat << $EOF
7 | # Docker Release Build summary 🚀🔨
8 |
9 | **Source:** $ref_type \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
10 |
11 | **Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
12 |
13 | ## Image details
14 |
15 | **Tags:**
16 | $(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
17 |
18 |
19 | Layers
20 |
21 | | Age | Size | Created by instruction |
22 | | --------- | ------ | ---------------------- |
23 | $(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
24 | | grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
25 | | cut -f-3 `# yeet Comment column`\
26 | | sed 's/ ago//' `# fix Layer age`\
27 | | sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
28 | | sed 's/\$/\\$/g' `# escape variable and shell expansions`\
29 | | sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
30 | | column -t -s$'\t' -o' | ' `# align columns and add separator`\
31 | | sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
32 |
33 |
34 |
35 | ENV
36 |
37 | | Variable | Value |
38 | | -------- | -------- |
39 | $(jq -r \
40 | '.Config.Env
41 | | map(
42 | split("=")
43 | | "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
44 | )
45 | | map("| \(.) |")
46 | | .[]' <<< $meta
47 | )
48 |
49 |
50 |
51 | Raw metadata
52 |
53 | \`\`\`JSON
54 | $meta
55 | \`\`\`
56 |
57 |
58 | ## Build details
59 | **Build trigger:** $event_name \`$current_ref\`
60 |
61 | | Parameter | Value |
62 | | -------------- | ------------ |
63 | | \`no_cache\` | \`$inputs_no_cache\` |
64 |
65 |
66 | github
context
67 |
68 | \`\`\`JSON
69 | $github_context_json
70 | \`\`\`
71 |
72 |
73 | ### Job environment
74 |
75 | #### \`vars\` context:
76 | \`\`\`JSON
77 | $vars_json
78 | \`\`\`
79 |
80 | #### \`env\` context:
81 | \`\`\`JSON
82 | $job_env_json
83 | \`\`\`
84 |
85 | $EOF
86 |
--------------------------------------------------------------------------------
/.github/workflows/sponsors_readme.yml:
--------------------------------------------------------------------------------
1 | name: Generate Sponsors README
2 |
3 | on:
4 | workflow_dispatch:
5 | schedule:
6 | - cron: '0 */12 * * *'
7 |
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout 🛎️
13 | uses: actions/checkout@v3
14 |
15 | - name: Generate Sponsors 💖
16 | uses: JamesIves/github-sponsors-readme-action@v1
17 | with:
18 | token: ${{ secrets.README_UPDATER_PAT }}
19 | file: 'README.md'
20 | minimum: 2500
21 | maximum: 99999
22 |
23 | - name: Deploy to GitHub Pages 🚀
24 | uses: JamesIves/github-pages-deploy-action@v4
25 | with:
26 | branch: master
27 | folder: '.'
28 | token: ${{ secrets.README_UPDATER_PAT }}
29 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Original ignores
2 | autogpt/keys.py
3 | autogpt/*json
4 | autogpt/node_modules/
5 | autogpt/__pycache__/keys.cpython-310.pyc
6 | autogpt/auto_gpt_workspace
7 | package-lock.json
8 | *.pyc
9 | auto_gpt_workspace/*
10 | *.mpeg
11 | .env
12 | azure.yaml
13 | ai_settings.yaml
14 | last_run_ai_settings.yaml
15 | .vscode
16 | .idea/*
17 | auto-gpt.json
18 | log.txt
19 | log-ingestion.txt
20 | logs
21 | *.log
22 | *.mp3
23 | mem.sqlite3
24 |
25 | # Byte-compiled / optimized / DLL files
26 | __pycache__/
27 | *.py[cod]
28 | *$py.class
29 |
30 | # C extensions
31 | *.so
32 |
33 | # Distribution / packaging
34 | .Python
35 | build/
36 | develop-eggs/
37 | dist/
38 | plugins/
39 | downloads/
40 | eggs/
41 | .eggs/
42 | lib/
43 | lib64/
44 | parts/
45 | sdist/
46 | var/
47 | wheels/
48 | pip-wheel-metadata/
49 | share/python-wheels/
50 | *.egg-info/
51 | .installed.cfg
52 | *.egg
53 | MANIFEST
54 |
55 | # PyInstaller
56 | # Usually these files are written by a python script from a template
57 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
58 | *.manifest
59 | *.spec
60 |
61 | # Installer logs
62 | pip-log.txt
63 | pip-delete-this-directory.txt
64 |
65 | # Unit test / coverage reports
66 | htmlcov/
67 | .tox/
68 | .nox/
69 | .coverage
70 | .coverage.*
71 | .cache
72 | nosetests.xml
73 | coverage.xml
74 | *.cover
75 | *.py,cover
76 | .hypothesis/
77 | .pytest_cache/
78 |
79 | # Translations
80 | *.mo
81 | *.pot
82 |
83 | # Django stuff:
84 | *.log
85 | local_settings.py
86 | db.sqlite3
87 | db.sqlite3-journal
88 |
89 | # Flask stuff:
90 | instance/
91 | .webassets-cache
92 |
93 | # Scrapy stuff:
94 | .scrapy
95 |
96 | # Sphinx documentation
97 | docs/_build/
98 | site/
99 |
100 | # PyBuilder
101 | target/
102 |
103 | # Jupyter Notebook
104 | .ipynb_checkpoints
105 |
106 | # IPython
107 | profile_default/
108 | ipython_config.py
109 |
110 | # pyenv
111 | .python-version
112 |
113 | # pipenv
114 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
115 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
116 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
117 | # install all needed dependencies.
118 | #Pipfile.lock
119 |
120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
121 | __pypackages__/
122 |
123 | # Celery stuff
124 | celerybeat-schedule
125 | celerybeat.pid
126 |
127 | # SageMath parsed files
128 | *.sage.py
129 |
130 | # Environments
131 | .direnv/
132 | .env
133 | .venv
134 | env/
135 | venv*/
136 | ENV/
137 | env.bak/
138 |
139 | # Spyder project settings
140 | .spyderproject
141 | .spyproject
142 |
143 | # Rope project settings
144 | .ropeproject
145 |
146 | # mkdocs documentation
147 | /site
148 |
149 | # mypy
150 | .mypy_cache/
151 | .dmypy.json
152 | dmypy.json
153 |
154 | # Pyre type checker
155 | .pyre/
156 | llama-*
157 | vicuna-*
158 |
159 | # mac
160 | .DS_Store
161 |
162 | openai/
163 |
164 | # news
165 | CURRENT_BULLETIN.md
--------------------------------------------------------------------------------
/.isort.cfg:
--------------------------------------------------------------------------------
1 | [settings]
2 | profile = black
3 | multi_line_output = 3
4 | include_trailing_comma = true
5 | force_grid_wrap = 0
6 | use_parentheses = true
7 | ensure_newline_before_comments = true
8 | line_length = 88
9 | sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
10 | skip = .tox,__pycache__,*.pyc,venv*/*,reports,venv,env,node_modules,.env,.venv,dist
11 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.4.0
4 | hooks:
5 | - id: check-added-large-files
6 | args: ['--maxkb=500']
7 | - id: check-byte-order-marker
8 | - id: check-case-conflict
9 | - id: check-merge-conflict
10 | - id: check-symlinks
11 | - id: debug-statements
12 |
13 | - repo: https://github.com/pycqa/isort
14 | rev: 5.12.0
15 | hooks:
16 | - id: isort
17 | language_version: python3.10
18 |
19 | - repo: https://github.com/psf/black
20 | rev: 23.3.0
21 | hooks:
22 | - id: black
23 | language_version: python3.10
24 | - repo: https://github.com/pre-commit/mirrors-mypy
25 | rev: 'v1.3.0'
26 | hooks:
27 | - id: mypy
28 |
29 | - repo: local
30 | hooks:
31 | - id: pytest-check
32 | name: pytest-check
33 | entry: pytest --cov=autogpt --without-integration --without-slow-integration
34 | language: system
35 | pass_filenames: false
36 | always_run: true
37 |
--------------------------------------------------------------------------------
/.sourcery.yaml:
--------------------------------------------------------------------------------
1 | # 🪄 This is your project's Sourcery configuration file.
2 |
3 | # You can use it to get Sourcery working in the way you want, such as
4 | # ignoring specific refactorings, skipping directories in your project,
5 | # or writing custom rules.
6 |
7 | # 📚 For a complete reference to this file, see the documentation at
8 | # https://docs.sourcery.ai/Configuration/Project-Settings/
9 |
10 | # This file was auto-generated by Sourcery on 2023-02-25 at 21:07.
11 |
12 | version: '1' # The schema version of this config file
13 |
14 | ignore: # A list of paths or files which Sourcery will ignore.
15 | - .git
16 | - venv
17 | - .venv
18 | - build
19 | - dist
20 | - env
21 | - .env
22 | - .tox
23 |
24 | rule_settings:
25 | enable:
26 | - default
27 | - gpsg
28 | disable: [] # A list of rule IDs Sourcery will never suggest.
29 | rule_types:
30 | - refactoring
31 | - suggestion
32 | - comment
33 | python_version: '3.10' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version.
34 |
35 | # rules: # A list of custom rules Sourcery will include in its analysis.
36 | # - id: no-print-statements
37 | # description: Do not use print statements in the test directory.
38 | # pattern: print(...)
39 | # language: python
40 | # replacement:
41 | # condition:
42 | # explanation:
43 | # paths:
44 | # include:
45 | # - test
46 | # exclude:
47 | # - conftest.py
48 | # tests: []
49 | # tags: []
50 |
51 | # rule_tags: {} # Additional rule tags.
52 |
53 | # metrics:
54 | # quality_threshold: 25.0
55 |
56 | # github:
57 | # labels: []
58 | # ignore_labels:
59 | # - sourcery-ignore
60 | # request_review: author
61 | # sourcery_branch: sourcery/{base_branch}
62 |
63 | # clone_detection:
64 | # min_lines: 3
65 | # min_duplicates: 2
66 | # identical_clones_only: false
67 |
68 | # proxy:
69 | # url:
70 | # ssl_certs_file:
71 | # no_ssl_verify: false
72 |
--------------------------------------------------------------------------------
/BULLETIN.md:
--------------------------------------------------------------------------------
1 | plz
2 |
3 | # Website and Documentation Site 📰📖
4 | Check out *https://agpt.co*, the official news & updates site for Auto-GPT!
5 | The documentation also has a place here, at *https://docs.agpt.co*
6 |
7 | # 🚀 v0.3.1 Release 🚀
8 | Over a week and 47 pull requests have passed since v0.3.0, and we are happy to announce
9 | the release of v0.3.1!
10 |
11 | Highlights and notable changes in this release:
12 |
13 | ## Changes to Docker configuration 🐋
14 | The workdir has been changed from */home/appuser* to */app*.
15 | Be sure to update any volume mounts accordingly!
16 |
17 | # ⚠️ Command `send_tweet` is DEPRECATED, and will be removed in v0.4.0 ⚠️
18 | Twitter functionality (and more) is now covered by plugins, see [Plugin support 🔌]
19 |
20 | ## Documentation
21 | - Docker-compose 1.29.0 is now required, as documented.
22 | - Path to the workspace directory in the setup guide has been corrected.
23 | - Memory setup links have been updated.
24 |
25 | ## Logs
26 | - Log functionality has been improved for better understanding and easier summarization.
27 | - User input is now logged in the logs/Debug Folder.
28 |
29 | ## Other
30 | - Edge browser support has been added using EdgeChromiumDriverManager.
31 | - Users now have the ability to disable commands via the .env file.
32 | - Run scripts for both Windows (.bat) and Unix (.sh) have been updated.
33 |
34 | ## BugFix
35 | - DuckDuckGo dependency has been updated, with a minimum version set to 2.9.5.
36 | - Package versions parsing has been enabled for forced upgrades.
37 | - Docker volume mounts have been fixed.
38 | - A fix was made to the plugin.post_planning call.
39 | - A selenium driver object reference bug in the browsing results was fixed.
40 | - JSON error in summary_memory.py has been handled.
41 | - Dockerfile has been updated to add missing scripts and plugins directories.
42 |
43 | ## CI
44 | - The CI pipeline has been tightened up for improved performance.
45 | - pytest-xdist Plugin has been integrated for parallel and concurrent testing.
46 | - Tests have been conducted for a new CI pipeline.
47 | - A code owners policy has been added.
48 | - Test against Python 3.10 (not 3.10 + 3.11) to halve the number of tests that are executed.
49 |
50 | ## Plugin support 🔌
51 | Auto-GPT now has support for plugins! With plugins, you can extend Auto-GPT's abilities,
52 | adding support for third-party services and more.
53 | See https://github.com/Significant-Gravitas/Auto-GPT-Plugins for instructions and available plugins.
54 | Denylist handling for plugins is now available.
55 |
56 | *From now on, we will be focusing on major improvements* rather
57 | than bugfixes, as we feel stability has reached a reasonable level. Most remaining
58 | issues relate to limitations in prompt generation and the memory system, which will be
59 | the focus of our efforts for the next release.
60 |
61 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct for Auto-GPT
2 |
3 | ## 1. Purpose
4 |
5 | The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
6 |
7 | ## 2. Scope
8 |
9 | This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
10 |
11 | ## 3. Our Standards
12 |
13 | We encourage the following behavior:
14 |
15 | * Being respectful and considerate to others
16 | * Actively seeking diverse perspectives
17 | * Providing constructive feedback and assistance
18 | * Demonstrating empathy and understanding
19 |
20 | We discourage the following behavior:
21 |
22 | * Harassment or discrimination of any kind
23 | * Disrespectful, offensive, or inappropriate language or content
24 | * Personal attacks or insults
25 | * Unwarranted criticism or negativity
26 |
27 | ## 4. Reporting and Enforcement
28 |
29 | If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary.
30 |
31 | Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations.
32 |
33 | ## 5. Acknowledgements
34 |
35 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html).
36 |
37 | ## 6. Contact
38 |
39 | If you have any questions or concerns, please contact the project maintainers.
40 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | This document now lives at https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
2 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # 'dev' or 'release' container build
2 | ARG BUILD_TYPE=dev
3 |
4 | # Use an official Python base image from the Docker Hub
5 | FROM python:3.10-slim AS autogpt-base
6 |
7 | # Install browsers
8 | RUN apt-get update && apt-get install -y \
9 | chromium-driver firefox-esr \
10 | ca-certificates
11 |
12 | # Install utilities
13 | RUN apt-get install -y curl jq wget git
14 |
15 | # Set environment variables
16 | ENV PIP_NO_CACHE_DIR=yes \
17 | PYTHONUNBUFFERED=1 \
18 | PYTHONDONTWRITEBYTECODE=1
19 |
20 | # Install the required python packages globally
21 | ENV PATH="$PATH:/root/.local/bin"
22 | COPY requirements.txt .
23 |
24 | # Set the entrypoint
25 | ENTRYPOINT ["python", "-m", "autogpt", "--install-plugin-deps"]
26 |
27 | # dev build -> include everything
28 | FROM autogpt-base as autogpt-dev
29 | RUN pip install --no-cache-dir -r requirements.txt
30 | WORKDIR /app
31 | ONBUILD COPY . ./
32 |
33 | # release build -> include bare minimum
34 | FROM autogpt-base as autogpt-release
35 | RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
36 | pip install --no-cache-dir -r requirements.txt
37 | WORKDIR /app
38 | ONBUILD COPY autogpt/ ./autogpt
39 | ONBUILD COPY scripts/ ./scripts
40 | ONBUILD COPY plugins/ ./plugins
41 |
42 | FROM autogpt-${BUILD_TYPE} AS auto-gpt
43 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Toran Bruce Richards
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | deleting README
2 |
--------------------------------------------------------------------------------
/autogpt/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import sys
4 |
5 | from dotenv import load_dotenv
6 |
7 | if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"):
8 | print("Setting random seed to 42")
9 | random.seed(42)
10 |
11 | # Load the users .env file into environment variables
12 | load_dotenv(verbose=True, override=True)
13 |
14 | del load_dotenv
15 |
--------------------------------------------------------------------------------
/autogpt/__main__.py:
--------------------------------------------------------------------------------
1 | """Auto-GPT: A GPT powered AI Assistant"""
2 | import autogpt.cli
3 |
4 | if __name__ == "__main__":
5 | autogpt.cli.main()
6 |
--------------------------------------------------------------------------------
/autogpt/agent/__init__.py:
--------------------------------------------------------------------------------
1 | from autogpt.agent.agent import Agent
2 | from autogpt.agent.agent_manager import AgentManager
3 |
4 | __all__ = ["Agent", "AgentManager"]
5 |
--------------------------------------------------------------------------------
/autogpt/commands/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/autogpt/commands/__init__.py
--------------------------------------------------------------------------------
/autogpt/commands/analyze_code.py:
--------------------------------------------------------------------------------
1 | """Code evaluation module."""
2 | from __future__ import annotations
3 |
4 | from autogpt.commands.command import command
5 | from autogpt.llm import call_ai_function
6 |
7 |
8 | @command(
9 | "analyze_code",
10 | "Analyze Code",
11 | '"code": ""',
12 | )
13 | def analyze_code(code: str) -> list[str]:
14 | """
15 | A function that takes in a string and returns a response from create chat
16 | completion api call.
17 |
18 | Parameters:
19 | code (str): Code to be evaluated.
20 | Returns:
21 | A result string from create chat completion. A list of suggestions to
22 | improve the code.
23 | """
24 |
25 | function_string = "def analyze_code(code: str) -> list[str]:"
26 | args = [code]
27 | description_string = (
28 | "Analyzes the given code and returns a list of suggestions for improvements."
29 | )
30 |
31 | return call_ai_function(function_string, args, description_string)
32 |
--------------------------------------------------------------------------------
/autogpt/commands/audio_text.py:
--------------------------------------------------------------------------------
1 | """Commands for converting audio to text."""
2 | import json
3 |
4 | import requests
5 |
6 | from autogpt.commands.command import command
7 | from autogpt.config import Config
8 |
9 | CFG = Config()
10 |
11 |
12 | @command(
13 | "read_audio_from_file",
14 | "Convert Audio to text",
15 | '"filename": ""',
16 | CFG.huggingface_audio_to_text_model,
17 | "Configure huggingface_audio_to_text_model.",
18 | )
19 | def read_audio_from_file(filename: str) -> str:
20 | """
21 | Convert audio to text.
22 |
23 | Args:
24 | filename (str): The path to the audio file
25 |
26 | Returns:
27 | str: The text from the audio
28 | """
29 | with open(filename, "rb") as audio_file:
30 | audio = audio_file.read()
31 | return read_audio(audio)
32 |
33 |
34 | def read_audio(audio: bytes) -> str:
35 | """
36 | Convert audio to text.
37 |
38 | Args:
39 | audio (bytes): The audio to convert
40 |
41 | Returns:
42 | str: The text from the audio
43 | """
44 | model = CFG.huggingface_audio_to_text_model
45 | api_url = f"https://api-inference.huggingface.co/models/{model}"
46 | api_token = CFG.huggingface_api_token
47 | headers = {"Authorization": f"Bearer {api_token}"}
48 |
49 | if api_token is None:
50 | raise ValueError(
51 | "You need to set your Hugging Face API token in the config file."
52 | )
53 |
54 | response = requests.post(
55 | api_url,
56 | headers=headers,
57 | data=audio,
58 | )
59 |
60 | text = json.loads(response.content.decode("utf-8"))["text"]
61 | return f"The audio says: {text}"
62 |
--------------------------------------------------------------------------------
/autogpt/commands/git_operations.py:
--------------------------------------------------------------------------------
1 | """Git operations for autogpt"""
2 | from git.repo import Repo
3 |
4 | from autogpt.commands.command import command
5 | from autogpt.config import Config
6 | from autogpt.url_utils.validators import validate_url
7 |
8 | CFG = Config()
9 |
10 |
11 | @command(
12 | "clone_repository",
13 | "Clone Repository",
14 | '"url": "", "clone_path": ""',
15 | CFG.github_username and CFG.github_api_key,
16 | "Configure github_username and github_api_key.",
17 | )
18 | @validate_url
19 | def clone_repository(url: str, clone_path: str) -> str:
20 | """Clone a GitHub repository locally.
21 |
22 | Args:
23 | url (str): The URL of the repository to clone.
24 | clone_path (str): The path to clone the repository to.
25 |
26 | Returns:
27 | str: The result of the clone operation.
28 | """
29 | # useless comment
30 | split_url = url.split("//")
31 | auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
32 | try:
33 | Repo.clone_from(url=auth_repo_url, to_path=clone_path)
34 | return f"""Cloned {url} to {clone_path}"""
35 | except Exception as e:
36 | return f"Error: {str(e)}"
37 |
--------------------------------------------------------------------------------
/autogpt/commands/improve_code.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | from autogpt.commands.command import command
6 | from autogpt.llm import call_ai_function
7 |
8 |
9 | @command(
10 | "improve_code",
11 | "Get Improved Code",
12 | '"suggestions": "", "code": ""',
13 | )
14 | def improve_code(suggestions: list[str], code: str) -> str:
15 | """
16 | A function that takes in code and suggestions and returns a response from create
17 | chat completion api call.
18 |
19 | Parameters:
20 | suggestions (list): A list of suggestions around what needs to be improved.
21 | code (str): Code to be improved.
22 | Returns:
23 | A result string from create chat completion. Improved code in response.
24 | """
25 |
26 | function_string = (
27 | "def generate_improved_code(suggestions: list[str], code: str) -> str:"
28 | )
29 | args = [json.dumps(suggestions), code]
30 | description_string = (
31 | "Improves the provided code based on the suggestions"
32 | " provided, making no other changes."
33 | )
34 |
35 | return call_ai_function(function_string, args, description_string)
36 |
--------------------------------------------------------------------------------
/autogpt/commands/review_pr.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import requests
4 |
5 | from autogpt.commands.command import command
6 | from autogpt.llm import create_chat_completion
7 | from autogpt.config import Config
8 |
9 |
10 | @command(
11 | "review_pr",
12 | "Review PR",
13 | '"pr_link": ""',
14 | )
15 | def review_diff(pr_link: str) -> str:
16 | """
17 | A function that takes in code and suggestions and returns a response from create
18 | chat completion api call.
19 |
20 | to get diff add ".diff to the end of the PR" and then make an http request
21 |
22 | Make a review comment on a pull request
23 |
24 | Request change or approve PR with the github api
25 |
26 | Parameters:
27 | suggestions (list): A list of suggestions around what needs to be improved.
28 | code (str): Code to be improved.
29 | Returns:
30 | A result string from create chat completion. Improved code in response.
31 | """
32 | # use requests to get the pr diff
33 | diff_link = pr_link + '.diff'
34 | response = requests.get(diff_link)
35 | if response.status_code != 200:
36 | raise ValueError(f'Invalid response status: {response.status_code}. '
37 | f'Response text is: {response.text} ')
38 | diff = response.text
39 |
40 | # now we need to make llm call to evaluate the reponse
41 | response = _process_diff(diff)
42 |
43 |
44 | return "Successfully reviewed PR."
45 |
46 | def _process_diff(diff):
47 | """
48 | Given a diff
49 | """
50 | system_prompt = """
51 | Instructions:
52 | You are a github diff reviewer. Below is are the contribution guidelines for a project you are doing reviews for.
53 |
54 | The user is going to provide you with a diff to review. Your job is to determine if the diff is acceptable or not. You have very high standards for accepting a diff.
55 |
56 | If the diff is acceptable, respond with "Acceptable". If the diff is not acceptable, respond with "Request Changes" and explain the needed changes.
57 |
58 | Below are guidelines for acceptable PRs.
59 |
60 | - Your pull request should be atomic and focus on a single change.
61 | - Your pull request should include tests for your change. We automatically enforce this with [CodeCov](https://docs.codecov.com/docs/commit-status)
62 | - You should have thoroughly tested your changes with multiple different prompts.
63 | - You should have considered potential risks and mitigations for your changes.
64 | - You should have documented your changes clearly and comprehensively.
65 | - You should not include any unrelated or "extra" small tweaks or changes.
66 | """
67 | cfg = Config()
68 | model = cfg.smart_llm_model
69 | # parse args to comma separated string
70 | messages = [
71 | {
72 | "role": "system",
73 | "content": system_prompt,
74 | },
75 | {"role": "user", "content": diff},
76 | ]
77 |
78 | response = create_chat_completion(model=model, messages=messages, temperature=0)
79 | return response
80 |
--------------------------------------------------------------------------------
/autogpt/commands/task_statuses.py:
--------------------------------------------------------------------------------
1 | """Task Statuses module."""
2 | from __future__ import annotations
3 |
4 | from typing import NoReturn
5 |
6 | from autogpt.commands.command import command
7 | from autogpt.logs import logger
8 |
9 |
10 | @command(
11 | "task_complete",
12 | "Task Complete (Shutdown)",
13 | '"reason": ""',
14 | )
15 | def task_complete(reason: str) -> NoReturn:
16 | """
17 | A function that takes in a string and exits the program
18 |
19 | Parameters:
20 | reason (str): The reason for shutting down.
21 | Returns:
22 | A result string from create chat completion. A list of suggestions to
23 | improve the code.
24 | """
25 | logger.info(title="Shutting down...\n", message=reason)
26 | quit()
27 |
--------------------------------------------------------------------------------
/autogpt/commands/times.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 |
3 |
4 | def get_datetime() -> str:
5 | """Return the current date and time
6 |
7 | Returns:
8 | str: The current date and time
9 | """
10 | return "Current date and time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S")
11 |
--------------------------------------------------------------------------------
/autogpt/commands/twitter.py:
--------------------------------------------------------------------------------
1 | """A module that contains a command to send a tweet."""
2 | import os
3 |
4 | import tweepy
5 |
6 | from autogpt.commands.command import command
7 |
8 |
9 | @command(
10 | "send_tweet",
11 | "Send Tweet",
12 | '"tweet_text": ""',
13 | )
14 | def send_tweet(tweet_text: str) -> str:
15 | """
16 | A function that takes in a string and returns a response from create chat
17 | completion api call.
18 |
19 | Args:
20 | tweet_text (str): Text to be tweeted.
21 |
22 | Returns:
23 | A result from sending the tweet.
24 | """
25 | consumer_key = os.environ.get("TW_CONSUMER_KEY")
26 | consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
27 | access_token = os.environ.get("TW_ACCESS_TOKEN")
28 | access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET")
29 | # Authenticate to Twitter
30 | auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
31 | auth.set_access_token(access_token, access_token_secret)
32 |
33 | # Create API object
34 | api = tweepy.API(auth)
35 |
36 | # Send tweet
37 | try:
38 | api.update_status(tweet_text)
39 | return "Tweet sent successfully!"
40 | except tweepy.TweepyException as e:
41 | return f"Error sending tweet: {e.reason}"
42 |
--------------------------------------------------------------------------------
/autogpt/commands/web_playwright.py:
--------------------------------------------------------------------------------
1 | """Web scraping commands using Playwright"""
2 | from __future__ import annotations
3 |
4 | from autogpt.logs import logger
5 |
6 | try:
7 | from playwright.sync_api import sync_playwright
8 | except ImportError:
9 | logger.info(
10 | "Playwright not installed. Please install it with 'pip install playwright' to use."
11 | )
12 | from bs4 import BeautifulSoup
13 |
14 | from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
15 |
16 |
17 | def scrape_text(url: str) -> str:
18 | """Scrape text from a webpage
19 |
20 | Args:
21 | url (str): The URL to scrape text from
22 |
23 | Returns:
24 | str: The scraped text
25 | """
26 | with sync_playwright() as p:
27 | browser = p.chromium.launch()
28 | page = browser.new_page()
29 |
30 | try:
31 | page.goto(url)
32 | html_content = page.content()
33 | soup = BeautifulSoup(html_content, "html.parser")
34 |
35 | for script in soup(["script", "style"]):
36 | script.extract()
37 |
38 | text = soup.get_text()
39 | lines = (line.strip() for line in text.splitlines())
40 | chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
41 | text = "\n".join(chunk for chunk in chunks if chunk)
42 |
43 | except Exception as e:
44 | text = f"Error: {str(e)}"
45 |
46 | finally:
47 | browser.close()
48 |
49 | return text
50 |
51 |
52 | def scrape_links(url: str) -> str | list[str]:
53 | """Scrape links from a webpage
54 |
55 | Args:
56 | url (str): The URL to scrape links from
57 |
58 | Returns:
59 | Union[str, List[str]]: The scraped links
60 | """
61 | with sync_playwright() as p:
62 | browser = p.chromium.launch()
63 | page = browser.new_page()
64 |
65 | try:
66 | page.goto(url)
67 | html_content = page.content()
68 | soup = BeautifulSoup(html_content, "html.parser")
69 |
70 | for script in soup(["script", "style"]):
71 | script.extract()
72 |
73 | hyperlinks = extract_hyperlinks(soup, url)
74 | formatted_links = format_hyperlinks(hyperlinks)
75 |
76 | except Exception as e:
77 | formatted_links = f"Error: {str(e)}"
78 |
79 | finally:
80 | browser.close()
81 |
82 | return formatted_links
83 |
--------------------------------------------------------------------------------
/autogpt/commands/write_tests.py:
--------------------------------------------------------------------------------
1 | """A module that contains a function to generate test cases for the submitted code."""
2 | from __future__ import annotations
3 |
4 | import json
5 |
6 | from autogpt.commands.command import command
7 | from autogpt.llm import call_ai_function
8 |
9 |
10 | @command(
11 | "write_tests",
12 | "Write Tests",
13 | '"code": "", "focus": ""',
14 | )
15 | def write_tests(code: str, focus: list[str]) -> str:
16 | """
17 | A function that takes in code and focus topics and returns a response from create
18 | chat completion api call.
19 |
20 | Parameters:
21 | focus (list): A list of suggestions around what needs to be improved.
22 | code (str): Code for test cases to be generated against.
23 | Returns:
24 | A result string from create chat completion. Test cases for the submitted code
25 | in response.
26 | """
27 |
28 | function_string = (
29 | "def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
30 | )
31 | args = [code, json.dumps(focus)]
32 | description_string = (
33 | "Generates test cases for the existing code, focusing on"
34 | " specific areas if required."
35 | )
36 |
37 | return call_ai_function(function_string, args, description_string)
38 |
--------------------------------------------------------------------------------
/autogpt/config/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | This module contains the configuration classes for AutoGPT.
3 | """
4 | from autogpt.config.ai_config import AIConfig
5 | from autogpt.config.config import Config, check_openai_api_key
6 |
7 | __all__ = [
8 | "check_openai_api_key",
9 | "AIConfig",
10 | "Config",
11 | ]
12 |
--------------------------------------------------------------------------------
/autogpt/config/prompt_config.py:
--------------------------------------------------------------------------------
1 | # sourcery skip: do-not-use-staticmethod
2 | """
3 | A module that contains the PromptConfig class object that contains the configuration
4 | """
5 | import yaml
6 | from colorama import Fore
7 |
8 | from autogpt import utils
9 | from autogpt.config.config import Config
10 | from autogpt.logs import logger
11 |
12 | CFG = Config()
13 |
14 |
15 | class PromptConfig:
16 | """
17 | A class object that contains the configuration information for the prompt, which will be used by the prompt generator
18 |
19 | Attributes:
20 | constraints (list): Constraints list for the prompt generator.
21 | resources (list): Resources list for the prompt generator.
22 | performance_evaluations (list): Performance evaluation list for the prompt generator.
23 | """
24 |
25 | def __init__(
26 | self,
27 | config_file: str = CFG.prompt_settings_file,
28 | ) -> None:
29 | """
30 | Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from
31 | yaml file if yaml file exists,
32 | else raises error.
33 |
34 | Parameters:
35 | constraints (list): Constraints list for the prompt generator.
36 | resources (list): Resources list for the prompt generator.
37 | performance_evaluations (list): Performance evaluation list for the prompt generator.
38 | Returns:
39 | None
40 | """
41 | # Validate file
42 | (validated, message) = utils.validate_yaml_file(config_file)
43 | if not validated:
44 | logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
45 | logger.double_check()
46 | exit(1)
47 |
48 | with open(config_file, encoding="utf-8") as file:
49 | config_params = yaml.load(file, Loader=yaml.FullLoader)
50 |
51 | self.constraints = config_params.get("constraints", [])
52 | self.resources = config_params.get("resources", [])
53 | self.performance_evaluations = config_params.get("performance_evaluations", [])
54 |
--------------------------------------------------------------------------------
/autogpt/js/overlay.js:
--------------------------------------------------------------------------------
1 | const overlay = document.createElement('div');
2 | Object.assign(overlay.style, {
3 | position: 'fixed',
4 | zIndex: 999999,
5 | top: 0,
6 | left: 0,
7 | width: '100%',
8 | height: '100%',
9 | background: 'rgba(0, 0, 0, 0.7)',
10 | color: '#fff',
11 | fontSize: '24px',
12 | fontWeight: 'bold',
13 | display: 'flex',
14 | justifyContent: 'center',
15 | alignItems: 'center',
16 | });
17 | const textContent = document.createElement('div');
18 | Object.assign(textContent.style, {
19 | textAlign: 'center',
20 | });
21 | textContent.textContent = 'AutoGPT Analyzing Page';
22 | overlay.appendChild(textContent);
23 | document.body.append(overlay);
24 | document.body.style.overflow = 'hidden';
25 | let dotCount = 0;
26 | setInterval(() => {
27 | textContent.textContent = 'AutoGPT Analyzing Page' + '.'.repeat(dotCount);
28 | dotCount = (dotCount + 1) % 4;
29 | }, 1000);
30 |
--------------------------------------------------------------------------------
/autogpt/json_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/autogpt/json_utils/__init__.py
--------------------------------------------------------------------------------
/autogpt/json_utils/llm_response_format_1.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "http://json-schema.org/draft-07/schema#",
3 | "type": "object",
4 | "properties": {
5 | "thoughts": {
6 | "type": "object",
7 | "properties": {
8 | "text": {"type": "string"},
9 | "reasoning": {"type": "string"},
10 | "plan": {"type": "string"},
11 | "criticism": {"type": "string"},
12 | "speak": {"type": "string"}
13 | },
14 | "required": ["text", "reasoning", "plan", "criticism", "speak"],
15 | "additionalProperties": false
16 | },
17 | "command": {
18 | "type": "object",
19 | "properties": {
20 | "name": {"type": "string"},
21 | "args": {
22 | "type": "object"
23 | }
24 | },
25 | "required": ["name", "args"],
26 | "additionalProperties": false
27 | }
28 | },
29 | "required": ["thoughts", "command"],
30 | "additionalProperties": false
31 | }
32 |
--------------------------------------------------------------------------------
/autogpt/json_utils/utilities.py:
--------------------------------------------------------------------------------
1 | """Utilities for the json_fixes package."""
2 | import json
3 | import os.path
4 | import re
5 |
6 | from jsonschema import Draft7Validator
7 |
8 | from autogpt.config import Config
9 | from autogpt.logs import logger
10 |
11 | CFG = Config()
12 | LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"
13 |
14 |
15 | def extract_char_position(error_message: str) -> int:
16 | """Extract the character position from the JSONDecodeError message.
17 |
18 | Args:
19 | error_message (str): The error message from the JSONDecodeError
20 | exception.
21 |
22 | Returns:
23 | int: The character position.
24 | """
25 |
26 | char_pattern = re.compile(r"\(char (\d+)\)")
27 | if match := char_pattern.search(error_message):
28 | return int(match[1])
29 | else:
30 | raise ValueError("Character position not found in the error message.")
31 |
32 |
33 | def validate_json(json_object: object, schema_name: str) -> dict | None:
34 | """
35 | :type schema_name: object
36 | :param schema_name: str
37 | :type json_object: object
38 | """
39 | scheme_file = os.path.join(os.path.dirname(__file__), f"{schema_name}.json")
40 | with open(scheme_file, "r") as f:
41 | schema = json.load(f)
42 | validator = Draft7Validator(schema)
43 |
44 | if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
45 | logger.error("The JSON object is invalid.")
46 | if CFG.debug_mode:
47 | logger.error(
48 | json.dumps(json_object, indent=4)
49 | ) # Replace 'json_object' with the variable containing the JSON data
50 | logger.error("The following issues were found:")
51 |
52 | for error in errors:
53 | logger.error(f"Error: {error.message}")
54 | else:
55 | logger.debug("The JSON object is valid.")
56 |
57 | return json_object
58 |
59 |
60 | def validate_json_string(json_string: str, schema_name: str) -> dict | None:
61 | """
62 | :type schema_name: object
63 | :param schema_name: str
64 | :type json_object: object
65 | """
66 |
67 | try:
68 | json_loaded = json.loads(json_string)
69 | return validate_json(json_loaded, schema_name)
70 | except:
71 | return None
72 |
73 |
74 | def is_string_valid_json(json_string: str, schema_name: str) -> bool:
75 | """
76 | :type schema_name: object
77 | :param schema_name: str
78 | :type json_object: object
79 | """
80 |
81 | return validate_json_string(json_string, schema_name) is not None
82 |
--------------------------------------------------------------------------------
/autogpt/llm/__init__.py:
--------------------------------------------------------------------------------
1 | from autogpt.llm.api_manager import ApiManager
2 | from autogpt.llm.base import (
3 | ChatModelInfo,
4 | ChatModelResponse,
5 | EmbeddingModelInfo,
6 | EmbeddingModelResponse,
7 | LLMResponse,
8 | Message,
9 | ModelInfo,
10 | )
11 | from autogpt.llm.chat import chat_with_ai, create_chat_message, generate_context
12 | from autogpt.llm.llm_utils import (
13 | call_ai_function,
14 | chunked_tokens,
15 | create_chat_completion,
16 | get_ada_embedding,
17 | )
18 | from autogpt.llm.modelsinfo import COSTS
19 | from autogpt.llm.token_counter import count_message_tokens, count_string_tokens
20 |
21 | __all__ = [
22 | "ApiManager",
23 | "Message",
24 | "ModelInfo",
25 | "ChatModelInfo",
26 | "EmbeddingModelInfo",
27 | "LLMResponse",
28 | "ChatModelResponse",
29 | "EmbeddingModelResponse",
30 | "create_chat_message",
31 | "generate_context",
32 | "chat_with_ai",
33 | "call_ai_function",
34 | "create_chat_completion",
35 | "get_ada_embedding",
36 | "chunked_tokens",
37 | "COSTS",
38 | "count_message_tokens",
39 | "count_string_tokens",
40 | ]
41 |
--------------------------------------------------------------------------------
/autogpt/llm/base.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 | from typing import List, TypedDict
3 |
4 |
5 | class Message(TypedDict):
6 | """OpenAI Message object containing a role and the message content"""
7 |
8 | role: str
9 | content: str
10 |
11 |
12 | @dataclass
13 | class ModelInfo:
14 | """Struct for model information.
15 |
16 | Would be lovely to eventually get this directly from APIs, but needs to be scraped from
17 | websites for now.
18 |
19 | """
20 |
21 | name: str
22 | prompt_token_cost: float
23 | completion_token_cost: float
24 | max_tokens: int
25 |
26 |
27 | @dataclass
28 | class ChatModelInfo(ModelInfo):
29 | """Struct for chat model information."""
30 |
31 | pass
32 |
33 |
34 | @dataclass
35 | class EmbeddingModelInfo(ModelInfo):
36 | """Struct for embedding model information."""
37 |
38 | embedding_dimensions: int
39 |
40 |
41 | @dataclass
42 | class LLMResponse:
43 | """Standard response struct for a response from an LLM model."""
44 |
45 | model_info: ModelInfo
46 | prompt_tokens_used: int = 0
47 | completion_tokens_used: int = 0
48 |
49 |
50 | @dataclass
51 | class EmbeddingModelResponse(LLMResponse):
52 | """Standard response struct for a response from an embedding model."""
53 |
54 | embedding: List[float] = field(default_factory=list)
55 |
56 | def __post_init__(self):
57 | if self.completion_tokens_used:
58 | raise ValueError("Embeddings should not have completion tokens used.")
59 |
60 |
61 | @dataclass
62 | class ChatModelResponse(LLMResponse):
63 | """Standard response struct for a response from an LLM model."""
64 |
65 | content: str = None
66 |
--------------------------------------------------------------------------------
/autogpt/llm/modelsinfo.py:
--------------------------------------------------------------------------------
1 | COSTS = {
2 | "gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002},
3 | "gpt-3.5-turbo-0301": {"prompt": 0.002, "completion": 0.002},
4 | "gpt-4-0314": {"prompt": 0.03, "completion": 0.06},
5 | "gpt-4": {"prompt": 0.03, "completion": 0.06},
6 | "gpt-4-0314": {"prompt": 0.03, "completion": 0.06},
7 | "gpt-4-32k": {"prompt": 0.06, "completion": 0.12},
8 | "gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12},
9 | "text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
10 | }
11 |
--------------------------------------------------------------------------------
/autogpt/llm/providers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/autogpt/llm/providers/__init__.py
--------------------------------------------------------------------------------
/autogpt/llm/providers/openai.py:
--------------------------------------------------------------------------------
1 | from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo
2 |
3 | OPEN_AI_CHAT_MODELS = {
4 | "gpt-3.5-turbo": ChatModelInfo(
5 | name="gpt-3.5-turbo",
6 | prompt_token_cost=0.002,
7 | completion_token_cost=0.002,
8 | max_tokens=4096,
9 | ),
10 | "gpt-4": ChatModelInfo(
11 | name="gpt-4",
12 | prompt_token_cost=0.03,
13 | completion_token_cost=0.06,
14 | max_tokens=8192,
15 | ),
16 | "gpt-4-32k": ChatModelInfo(
17 | name="gpt-4-32k",
18 | prompt_token_cost=0.06,
19 | completion_token_cost=0.12,
20 | max_tokens=32768,
21 | ),
22 | }
23 |
24 | OPEN_AI_EMBEDDING_MODELS = {
25 | "text-embedding-ada-002": EmbeddingModelInfo(
26 | name="text-embedding-ada-002",
27 | prompt_token_cost=0.0004,
28 | completion_token_cost=0.0,
29 | max_tokens=8191,
30 | embedding_dimensions=1536,
31 | ),
32 | }
33 |
34 | OPEN_AI_MODELS = {
35 | **OPEN_AI_CHAT_MODELS,
36 | **OPEN_AI_EMBEDDING_MODELS,
37 | }
38 |
--------------------------------------------------------------------------------
/autogpt/llm/token_counter.py:
--------------------------------------------------------------------------------
1 | """Functions for counting the number of tokens in a message or string."""
2 | from __future__ import annotations
3 |
4 | from typing import List
5 |
6 | import tiktoken
7 |
8 | from autogpt.llm.base import Message
9 | from autogpt.logs import logger
10 |
11 |
12 | def count_message_tokens(
13 | messages: List[Message], model: str = "gpt-3.5-turbo-0301"
14 | ) -> int:
15 | """
16 | Returns the number of tokens used by a list of messages.
17 |
18 | Args:
19 | messages (list): A list of messages, each of which is a dictionary
20 | containing the role and content of the message.
21 | model (str): The name of the model to use for tokenization.
22 | Defaults to "gpt-3.5-turbo-0301".
23 |
24 | Returns:
25 | int: The number of tokens used by the list of messages.
26 | """
27 | try:
28 | encoding = tiktoken.encoding_for_model(model)
29 | except KeyError:
30 | logger.warn("Warning: model not found. Using cl100k_base encoding.")
31 | encoding = tiktoken.get_encoding("cl100k_base")
32 | if model == "gpt-3.5-turbo":
33 | # !Note: gpt-3.5-turbo may change over time.
34 | # Returning num tokens assuming gpt-3.5-turbo-0301.")
35 | return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
36 | elif model == "gpt-4":
37 | # !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
38 | return count_message_tokens(messages, model="gpt-4-0314")
39 | elif model == "gpt-3.5-turbo-0301":
40 | tokens_per_message = (
41 | 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
42 | )
43 | tokens_per_name = -1 # if there's a name, the role is omitted
44 | elif model == "gpt-4-0314":
45 | tokens_per_message = 3
46 | tokens_per_name = 1
47 | else:
48 | raise NotImplementedError(
49 | f"num_tokens_from_messages() is not implemented for model {model}.\n"
50 | " See https://github.com/openai/openai-python/blob/main/chatml.md for"
51 | " information on how messages are converted to tokens."
52 | )
53 | num_tokens = 0
54 | for message in messages:
55 | num_tokens += tokens_per_message
56 | for key, value in message.items():
57 | num_tokens += len(encoding.encode(value))
58 | if key == "name":
59 | num_tokens += tokens_per_name
60 | num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
61 | return num_tokens
62 |
63 |
64 | def count_string_tokens(string: str, model_name: str) -> int:
65 | """
66 | Returns the number of tokens in a text string.
67 |
68 | Args:
69 | string (str): The text string.
70 | model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
71 |
72 | Returns:
73 | int: The number of tokens in the text string.
74 | """
75 | encoding = tiktoken.encoding_for_model(model_name)
76 | return len(encoding.encode(string))
77 |
--------------------------------------------------------------------------------
/autogpt/log_cycle/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/autogpt/log_cycle/__init__.py
--------------------------------------------------------------------------------
/autogpt/log_cycle/json_handler.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 |
4 |
5 | class JsonFileHandler(logging.FileHandler):
6 | def __init__(self, filename, mode="a", encoding=None, delay=False):
7 | super().__init__(filename, mode, encoding, delay)
8 |
9 | def emit(self, record):
10 | json_data = json.loads(self.format(record))
11 | with open(self.baseFilename, "w", encoding="utf-8") as f:
12 | json.dump(json_data, f, ensure_ascii=False, indent=4)
13 |
14 |
15 | import logging
16 |
17 |
18 | class JsonFormatter(logging.Formatter):
19 | def format(self, record):
20 | return record.msg
21 |
--------------------------------------------------------------------------------
/autogpt/log_cycle/log_cycle.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from typing import Any, Dict, Union
4 |
5 | from autogpt.logs import logger
6 |
7 | DEFAULT_PREFIX = "agent"
8 | FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json"
9 | CURRENT_CONTEXT_FILE_NAME = "current_context.json"
10 | NEXT_ACTION_FILE_NAME = "next_action.json"
11 | PROMPT_SUMMARY_FILE_NAME = "prompt_summary.json"
12 | SUMMARY_FILE_NAME = "summary.txt"
13 | SUPERVISOR_FEEDBACK_FILE_NAME = "supervisor_feedback.txt"
14 | PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME = "prompt_supervisor_feedback.json"
15 | USER_INPUT_FILE_NAME = "user_input.txt"
16 |
17 |
18 | class LogCycleHandler:
19 | """
20 | A class for logging cycle data.
21 | """
22 |
23 | def __init__(self):
24 | self.log_count_within_cycle = 0
25 |
26 | @staticmethod
27 | def create_directory_if_not_exists(directory_path: str) -> None:
28 | if not os.path.exists(directory_path):
29 | os.makedirs(directory_path, exist_ok=True)
30 |
31 | def create_outer_directory(self, ai_name: str, created_at: str) -> str:
32 | log_directory = logger.get_log_directory()
33 |
34 | if os.environ.get("OVERWRITE_DEBUG") == "1":
35 | outer_folder_name = "auto_gpt"
36 | else:
37 | ai_name_short = ai_name[:15] if ai_name else DEFAULT_PREFIX
38 | outer_folder_name = f"{created_at}_{ai_name_short}"
39 |
40 | outer_folder_path = os.path.join(log_directory, "DEBUG", outer_folder_name)
41 | self.create_directory_if_not_exists(outer_folder_path)
42 |
43 | return outer_folder_path
44 |
45 | def create_inner_directory(self, outer_folder_path: str, cycle_count: int) -> str:
46 | nested_folder_name = str(cycle_count).zfill(3)
47 | nested_folder_path = os.path.join(outer_folder_path, nested_folder_name)
48 | self.create_directory_if_not_exists(nested_folder_path)
49 |
50 | return nested_folder_path
51 |
52 | def create_nested_directory(
53 | self, ai_name: str, created_at: str, cycle_count: int
54 | ) -> str:
55 | outer_folder_path = self.create_outer_directory(ai_name, created_at)
56 | nested_folder_path = self.create_inner_directory(outer_folder_path, cycle_count)
57 |
58 | return nested_folder_path
59 |
60 | def log_cycle(
61 | self,
62 | ai_name: str,
63 | created_at: str,
64 | cycle_count: int,
65 | data: Union[Dict[str, Any], Any],
66 | file_name: str,
67 | ) -> None:
68 | """
69 | Log cycle data to a JSON file.
70 |
71 | Args:
72 | data (Any): The data to be logged.
73 | file_name (str): The name of the file to save the logged data.
74 | """
75 | nested_folder_path = self.create_nested_directory(
76 | ai_name, created_at, cycle_count
77 | )
78 |
79 | json_data = json.dumps(data, ensure_ascii=False, indent=4)
80 | log_file_path = os.path.join(
81 | nested_folder_path, f"{self.log_count_within_cycle}_{file_name}"
82 | )
83 |
84 | logger.log_json(json_data, log_file_path)
85 | self.log_count_within_cycle += 1
86 |
--------------------------------------------------------------------------------
/autogpt/memory/__init__.py:
--------------------------------------------------------------------------------
1 | from autogpt.logs import logger
2 | from autogpt.memory.local import LocalCache
3 | from autogpt.memory.no_memory import NoMemory
4 |
5 | # List of supported memory backends
6 | # Add a backend to this list if the import attempt is successful
7 | supported_memory = ["local", "no_memory"]
8 |
9 | try:
10 | from autogpt.memory.redismem import RedisMemory
11 |
12 | supported_memory.append("redis")
13 | except ImportError:
14 | RedisMemory = None
15 |
16 | try:
17 | from autogpt.memory.pinecone import PineconeMemory
18 |
19 | supported_memory.append("pinecone")
20 | except ImportError:
21 | PineconeMemory = None
22 |
23 | try:
24 | from autogpt.memory.weaviate import WeaviateMemory
25 |
26 | supported_memory.append("weaviate")
27 | except ImportError:
28 | WeaviateMemory = None
29 |
30 | try:
31 | from autogpt.memory.milvus import MilvusMemory
32 |
33 | supported_memory.append("milvus")
34 | except ImportError:
35 | MilvusMemory = None
36 |
37 |
38 | def get_memory(cfg, init=False):
39 | memory = None
40 | if cfg.memory_backend == "pinecone":
41 | if not PineconeMemory:
42 | logger.warn(
43 | "Error: Pinecone is not installed. Please install pinecone"
44 | " to use Pinecone as a memory backend."
45 | )
46 | else:
47 | memory = PineconeMemory(cfg)
48 | if init:
49 | memory.clear()
50 | elif cfg.memory_backend == "redis":
51 | if not RedisMemory:
52 | logger.warn(
53 | "Error: Redis is not installed. Please install redis-py to"
54 | " use Redis as a memory backend."
55 | )
56 | else:
57 | memory = RedisMemory(cfg)
58 | elif cfg.memory_backend == "weaviate":
59 | if not WeaviateMemory:
60 | logger.warn(
61 | "Error: Weaviate is not installed. Please install weaviate-client to"
62 | " use Weaviate as a memory backend."
63 | )
64 | else:
65 | memory = WeaviateMemory(cfg)
66 | elif cfg.memory_backend == "milvus":
67 | if not MilvusMemory:
68 | logger.warn(
69 | "Error: pymilvus sdk is not installed."
70 | "Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
71 | )
72 | else:
73 | memory = MilvusMemory(cfg)
74 | elif cfg.memory_backend == "no_memory":
75 | memory = NoMemory(cfg)
76 |
77 | if memory is None:
78 | memory = LocalCache(cfg)
79 | if init:
80 | memory.clear()
81 | return memory
82 |
83 |
84 | def get_supported_memory_backends():
85 | return supported_memory
86 |
87 |
88 | __all__ = [
89 | "get_memory",
90 | "LocalCache",
91 | "RedisMemory",
92 | "PineconeMemory",
93 | "NoMemory",
94 | "MilvusMemory",
95 | "WeaviateMemory",
96 | ]
97 |
--------------------------------------------------------------------------------
/autogpt/memory/base.py:
--------------------------------------------------------------------------------
1 | """Base class for memory providers."""
2 | import abc
3 |
4 | from autogpt.singleton import AbstractSingleton
5 |
6 |
7 | class MemoryProviderSingleton(AbstractSingleton):
8 | @abc.abstractmethod
9 | def add(self, data):
10 | """Adds to memory"""
11 | pass
12 |
13 | @abc.abstractmethod
14 | def get(self, data):
15 | """Gets from memory"""
16 | pass
17 |
18 | @abc.abstractmethod
19 | def clear(self):
20 | """Clears memory"""
21 | pass
22 |
23 | @abc.abstractmethod
24 | def get_relevant(self, data, num_relevant=5):
25 | """Gets relevant memory for"""
26 | pass
27 |
28 | @abc.abstractmethod
29 | def get_stats(self):
30 | """Get stats from memory"""
31 | pass
32 |
--------------------------------------------------------------------------------
/autogpt/memory/no_memory.py:
--------------------------------------------------------------------------------
1 | """A class that does not store any data. This is the default memory provider."""
2 | from __future__ import annotations
3 |
4 | from typing import Any
5 |
6 | from autogpt.memory.base import MemoryProviderSingleton
7 |
8 |
9 | class NoMemory(MemoryProviderSingleton):
10 | """
11 | A class that does not store any data. This is the default memory provider.
12 | """
13 |
14 | def __init__(self, cfg):
15 | """
16 | Initializes the NoMemory provider.
17 |
18 | Args:
19 | cfg: The config object.
20 |
21 | Returns: None
22 | """
23 | pass
24 |
25 | def add(self, data: str) -> str:
26 | """
27 | Adds a data point to the memory. No action is taken in NoMemory.
28 |
29 | Args:
30 | data: The data to add.
31 |
32 | Returns: An empty string.
33 | """
34 | return ""
35 |
36 | def get(self, data: str) -> list[Any] | None:
37 | """
38 | Gets the data from the memory that is most relevant to the given data.
39 | NoMemory always returns None.
40 |
41 | Args:
42 | data: The data to compare to.
43 |
44 | Returns: None
45 | """
46 | return None
47 |
48 | def clear(self) -> str:
49 | """
50 | Clears the memory. No action is taken in NoMemory.
51 |
52 | Returns: An empty string.
53 | """
54 | return ""
55 |
56 | def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
57 | """
58 | Returns all the data in the memory that is relevant to the given data.
59 | NoMemory always returns None.
60 |
61 | Args:
62 | data: The data to compare to.
63 | num_relevant: The number of relevant data to return.
64 |
65 | Returns: None
66 | """
67 | return None
68 |
69 | def get_stats(self):
70 | """
71 | Returns: An empty dictionary as there are no stats in NoMemory.
72 | """
73 | return {}
74 |
--------------------------------------------------------------------------------
/autogpt/memory/pinecone.py:
--------------------------------------------------------------------------------
1 | import pinecone
2 | from colorama import Fore, Style
3 |
4 | from autogpt.llm import get_ada_embedding
5 | from autogpt.logs import logger
6 | from autogpt.memory.base import MemoryProviderSingleton
7 |
8 |
9 | class PineconeMemory(MemoryProviderSingleton):
10 | def __init__(self, cfg):
11 | pinecone_api_key = cfg.pinecone_api_key
12 | pinecone_region = cfg.pinecone_region
13 | pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)
14 | dimension = 1536
15 | metric = "cosine"
16 | pod_type = "p1"
17 | table_name = "auto-gpt"
18 | # this assumes we don't start with memory.
19 | # for now this works.
20 | # we'll need a more complicated and robust system if we want to start with
21 | # memory.
22 | self.vec_num = 0
23 |
24 | try:
25 | pinecone.whoami()
26 | except Exception as e:
27 | logger.typewriter_log(
28 | "FAILED TO CONNECT TO PINECONE",
29 | Fore.RED,
30 | Style.BRIGHT + str(e) + Style.RESET_ALL,
31 | )
32 | logger.double_check(
33 | "Please ensure you have setup and configured Pinecone properly for use."
34 | + f"You can check out {Fore.CYAN + Style.BRIGHT}"
35 | "https://docs.agpt.co/configuration/memory/#pinecone-api-key-setup"
36 | f"{Style.RESET_ALL} to ensure you've set up everything correctly."
37 | )
38 | exit(1)
39 |
40 | if table_name not in pinecone.list_indexes():
41 | logger.typewriter_log(
42 | "Connecting Pinecone. This may take some time...", Fore.MAGENTA, ""
43 | )
44 | pinecone.create_index(
45 | table_name, dimension=dimension, metric=metric, pod_type=pod_type
46 | )
47 | self.index = pinecone.Index(table_name)
48 |
49 | def add(self, data):
50 | vector = get_ada_embedding(data)
51 | # no metadata here. We may wish to change that long term.
52 | self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
53 | _text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
54 | self.vec_num += 1
55 | return _text
56 |
57 | def get(self, data):
58 | return self.get_relevant(data, 1)
59 |
60 | def clear(self):
61 | self.index.delete(deleteAll=True)
62 | return "Obliviated"
63 |
64 | def get_relevant(self, data, num_relevant=5):
65 | """
66 | Returns all the data in the memory that is relevant to the given data.
67 | :param data: The data to compare to.
68 | :param num_relevant: The number of relevant data to return. Defaults to 5
69 | """
70 | query_embedding = get_ada_embedding(data)
71 | results = self.index.query(
72 | query_embedding, top_k=num_relevant, include_metadata=True
73 | )
74 | sorted_results = sorted(results.matches, key=lambda x: x.score)
75 | return [str(item["metadata"]["raw_text"]) for item in sorted_results]
76 |
77 | def get_stats(self):
78 | return self.index.describe_index_stats()
79 |
--------------------------------------------------------------------------------
/autogpt/memory_management/store_memory.py:
--------------------------------------------------------------------------------
1 | from autogpt.json_utils.utilities import (
2 | LLM_DEFAULT_RESPONSE_FORMAT,
3 | is_string_valid_json,
4 | )
5 | from autogpt.logs import logger
6 |
7 |
8 | def format_memory(assistant_reply, next_message_content):
9 | # the next_message_content is a variable to stores either the user_input or the command following the assistant_reply
10 | result = (
11 | "None" if next_message_content.startswith("Command") else next_message_content
12 | )
13 | user_input = (
14 | "None"
15 | if next_message_content.startswith("Human feedback")
16 | else next_message_content
17 | )
18 |
19 | return f"Assistant Reply: {assistant_reply}\nResult: {result}\nHuman Feedback:{user_input}"
20 |
21 |
22 | def save_memory_trimmed_from_context_window(
23 | full_message_history, next_message_to_add_index, permanent_memory
24 | ):
25 | while next_message_to_add_index >= 0:
26 | message_content = full_message_history[next_message_to_add_index]["content"]
27 | if is_string_valid_json(message_content, LLM_DEFAULT_RESPONSE_FORMAT):
28 | next_message = full_message_history[next_message_to_add_index + 1]
29 | memory_to_add = format_memory(message_content, next_message["content"])
30 | logger.debug(f"Storing the following memory: {memory_to_add}")
31 | permanent_memory.add(memory_to_add)
32 |
33 | next_message_to_add_index -= 1
34 |
--------------------------------------------------------------------------------
/autogpt/processing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/autogpt/processing/__init__.py
--------------------------------------------------------------------------------
/autogpt/processing/html.py:
--------------------------------------------------------------------------------
1 | """HTML processing functions"""
2 | from __future__ import annotations
3 |
4 | from bs4 import BeautifulSoup
5 | from requests.compat import urljoin
6 |
7 |
8 | def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
9 | """Extract hyperlinks from a BeautifulSoup object
10 |
11 | Args:
12 | soup (BeautifulSoup): The BeautifulSoup object
13 | base_url (str): The base URL
14 |
15 | Returns:
16 | List[Tuple[str, str]]: The extracted hyperlinks
17 | """
18 | return [
19 | (link.text, urljoin(base_url, link["href"]))
20 | for link in soup.find_all("a", href=True)
21 | ]
22 |
23 |
24 | def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
25 | """Format hyperlinks to be displayed to the user
26 |
27 | Args:
28 | hyperlinks (List[Tuple[str, str]]): The hyperlinks to format
29 |
30 | Returns:
31 | List[str]: The formatted hyperlinks
32 | """
33 | return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
34 |
--------------------------------------------------------------------------------
/autogpt/prompts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/autogpt/prompts/__init__.py
--------------------------------------------------------------------------------
/autogpt/prompts/default_prompts.py:
--------------------------------------------------------------------------------
1 | #########################Setup.py#################################
2 |
3 | DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC = """
4 | Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task.
5 |
6 | The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation.
7 |
8 | Example input:
9 | Help me with marketing my business
10 |
11 | Example output:
12 | Name: CMOGPT
13 | Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more.
14 | Goals:
15 | - Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer.
16 |
17 | - Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations.
18 |
19 | - Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment.
20 |
21 | - Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track.
22 | """
23 |
24 | DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC = (
25 | "Task: '{{user_prompt}}'\n"
26 | "Respond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n"
27 | )
28 |
29 | DEFAULT_USER_DESIRE_PROMPT = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt
30 |
--------------------------------------------------------------------------------
/autogpt/singleton.py:
--------------------------------------------------------------------------------
1 | """The singleton metaclass for ensuring only one instance of a class."""
2 | import abc
3 |
4 |
5 | class Singleton(abc.ABCMeta, type):
6 | """
7 | Singleton metaclass for ensuring only one instance of a class.
8 | """
9 |
10 | _instances = {}
11 |
12 | def __call__(cls, *args, **kwargs):
13 | """Call method for the singleton metaclass."""
14 | if cls not in cls._instances:
15 | cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
16 | return cls._instances[cls]
17 |
18 |
19 | class AbstractSingleton(abc.ABC, metaclass=Singleton):
20 | """
21 | Abstract singleton class for ensuring only one instance of a class.
22 | """
23 |
24 | pass
25 |
--------------------------------------------------------------------------------
/autogpt/speech/__init__.py:
--------------------------------------------------------------------------------
1 | """This module contains the speech recognition and speech synthesis functions."""
2 | from autogpt.speech.say import say_text
3 |
4 | __all__ = ["say_text"]
5 |
--------------------------------------------------------------------------------
/autogpt/speech/base.py:
--------------------------------------------------------------------------------
1 | """Base class for all voice classes."""
2 | import abc
3 | from threading import Lock
4 |
5 | from autogpt.singleton import AbstractSingleton
6 |
7 |
8 | class VoiceBase(AbstractSingleton):
9 | """
10 | Base class for all voice classes.
11 | """
12 |
13 | def __init__(self):
14 | """
15 | Initialize the voice class.
16 | """
17 | self._url = None
18 | self._headers = None
19 | self._api_key = None
20 | self._voices = []
21 | self._mutex = Lock()
22 | self._setup()
23 |
24 | def say(self, text: str, voice_index: int = 0) -> bool:
25 | """
26 | Say the given text.
27 |
28 | Args:
29 | text (str): The text to say.
30 | voice_index (int): The index of the voice to use.
31 | """
32 | with self._mutex:
33 | return self._speech(text, voice_index)
34 |
35 | @abc.abstractmethod
36 | def _setup(self) -> None:
37 | """
38 | Setup the voices, API key, etc.
39 | """
40 | pass
41 |
42 | @abc.abstractmethod
43 | def _speech(self, text: str, voice_index: int = 0) -> bool:
44 | """
45 | Play the given text.
46 |
47 | Args:
48 | text (str): The text to play.
49 | """
50 | pass
51 |
--------------------------------------------------------------------------------
/autogpt/speech/brian.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | import requests
5 | from playsound import playsound
6 |
7 | from autogpt.speech.base import VoiceBase
8 |
9 |
10 | class BrianSpeech(VoiceBase):
11 | """Brian speech module for autogpt"""
12 |
13 | def _setup(self) -> None:
14 | """Setup the voices, API key, etc."""
15 | pass
16 |
17 | def _speech(self, text: str, _: int = 0) -> bool:
18 | """Speak text using Brian with the streamelements API
19 |
20 | Args:
21 | text (str): The text to speak
22 |
23 | Returns:
24 | bool: True if the request was successful, False otherwise
25 | """
26 | tts_url = (
27 | f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}"
28 | )
29 | response = requests.get(tts_url)
30 |
31 | if response.status_code == 200:
32 | with open("speech.mp3", "wb") as f:
33 | f.write(response.content)
34 | playsound("speech.mp3")
35 | os.remove("speech.mp3")
36 | return True
37 | else:
38 | logging.error(
39 | "Request failed with status code: %s, response content: %s",
40 | response.status_code,
41 | response.content,
42 | )
43 | return False
44 |
--------------------------------------------------------------------------------
/autogpt/speech/eleven_labs.py:
--------------------------------------------------------------------------------
1 | """ElevenLabs speech module"""
2 | import os
3 |
4 | import requests
5 | from playsound import playsound
6 |
7 | from autogpt.config import Config
8 | from autogpt.speech.base import VoiceBase
9 |
10 | PLACEHOLDERS = {"your-voice-id"}
11 |
12 |
13 | class ElevenLabsSpeech(VoiceBase):
14 | """ElevenLabs speech class"""
15 |
16 | def _setup(self) -> None:
17 | """Set up the voices, API key, etc.
18 |
19 | Returns:
20 | None: None
21 | """
22 |
23 | cfg = Config()
24 | default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
25 | voice_options = {
26 | "Rachel": "21m00Tcm4TlvDq8ikWAM",
27 | "Domi": "AZnzlk1XvdvUeBnXmlld",
28 | "Bella": "EXAVITQu4vr4xnSDxMaL",
29 | "Antoni": "ErXwobaYiN019PkySvjV",
30 | "Elli": "MF3mGyEYCl7XYWbV9V6O",
31 | "Josh": "TxGEqnHWrfWFTfGW9XjX",
32 | "Arnold": "VR6AewLTigWG4xSOukaG",
33 | "Adam": "pNInz6obpgDQGcFmaJgB",
34 | "Sam": "yoZ06aMxZJJ28mfd3POQ",
35 | }
36 | self._headers = {
37 | "Content-Type": "application/json",
38 | "xi-api-key": cfg.elevenlabs_api_key,
39 | }
40 | self._voices = default_voices.copy()
41 | if cfg.elevenlabs_voice_1_id in voice_options:
42 | cfg.elevenlabs_voice_1_id = voice_options[cfg.elevenlabs_voice_1_id]
43 | if cfg.elevenlabs_voice_2_id in voice_options:
44 | cfg.elevenlabs_voice_2_id = voice_options[cfg.elevenlabs_voice_2_id]
45 | self._use_custom_voice(cfg.elevenlabs_voice_1_id, 0)
46 | self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1)
47 |
48 | def _use_custom_voice(self, voice, voice_index) -> None:
49 | """Use a custom voice if provided and not a placeholder
50 |
51 | Args:
52 | voice (str): The voice ID
53 | voice_index (int): The voice index
54 |
55 | Returns:
56 | None: None
57 | """
58 | # Placeholder values that should be treated as empty
59 | if voice and voice not in PLACEHOLDERS:
60 | self._voices[voice_index] = voice
61 |
62 | def _speech(self, text: str, voice_index: int = 0) -> bool:
63 | """Speak text using elevenlabs.io's API
64 |
65 | Args:
66 | text (str): The text to speak
67 | voice_index (int, optional): The voice to use. Defaults to 0.
68 |
69 | Returns:
70 | bool: True if the request was successful, False otherwise
71 | """
72 | from autogpt.logs import logger
73 |
74 | tts_url = (
75 | f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
76 | )
77 | response = requests.post(tts_url, headers=self._headers, json={"text": text})
78 |
79 | if response.status_code == 200:
80 | with open("speech.mpeg", "wb") as f:
81 | f.write(response.content)
82 | playsound("speech.mpeg", True)
83 | os.remove("speech.mpeg")
84 | return True
85 | else:
86 | logger.warn("Request failed with status code:", response.status_code)
87 | logger.info("Response content:", response.content)
88 | return False
89 |
--------------------------------------------------------------------------------
/autogpt/speech/gtts.py:
--------------------------------------------------------------------------------
1 | """ GTTS Voice. """
2 | import os
3 |
4 | import gtts
5 | from playsound import playsound
6 |
7 | from autogpt.speech.base import VoiceBase
8 |
9 |
10 | class GTTSVoice(VoiceBase):
11 | """GTTS Voice."""
12 |
13 | def _setup(self) -> None:
14 | pass
15 |
16 | def _speech(self, text: str, _: int = 0) -> bool:
17 | """Play the given text."""
18 | tts = gtts.gTTS(text)
19 | tts.save("speech.mp3")
20 | playsound("speech.mp3", True)
21 | os.remove("speech.mp3")
22 | return True
23 |
--------------------------------------------------------------------------------
/autogpt/speech/macos_tts.py:
--------------------------------------------------------------------------------
1 | """ MacOS TTS Voice. """
2 | import os
3 |
4 | from autogpt.speech.base import VoiceBase
5 |
6 |
7 | class MacOSTTS(VoiceBase):
8 | """MacOS TTS Voice."""
9 |
10 | def _setup(self) -> None:
11 | pass
12 |
13 | def _speech(self, text: str, voice_index: int = 0) -> bool:
14 | """Play the given text."""
15 | if voice_index == 0:
16 | os.system(f'say "{text}"')
17 | elif voice_index == 1:
18 | os.system(f'say -v "Ava (Premium)" "{text}"')
19 | else:
20 | os.system(f'say -v Samantha "{text}"')
21 | return True
22 |
--------------------------------------------------------------------------------
/autogpt/speech/say.py:
--------------------------------------------------------------------------------
1 | """ Text to speech module """
2 | import threading
3 | from threading import Semaphore
4 |
5 | from autogpt.config import Config
6 | from autogpt.speech.base import VoiceBase
7 | from autogpt.speech.brian import BrianSpeech
8 | from autogpt.speech.eleven_labs import ElevenLabsSpeech
9 | from autogpt.speech.gtts import GTTSVoice
10 | from autogpt.speech.macos_tts import MacOSTTS
11 |
12 | _QUEUE_SEMAPHORE = Semaphore(
13 | 1
14 | ) # The amount of sounds to queue before blocking the main thread
15 |
16 |
17 | def say_text(text: str, voice_index: int = 0) -> None:
18 | """Speak the given text using the given voice index"""
19 | cfg = Config()
20 | default_voice_engine, voice_engine = _get_voice_engine(cfg)
21 |
22 | def speak() -> None:
23 | success = voice_engine.say(text, voice_index)
24 | if not success:
25 | default_voice_engine.say(text)
26 |
27 | _QUEUE_SEMAPHORE.release()
28 |
29 | _QUEUE_SEMAPHORE.acquire(True)
30 | thread = threading.Thread(target=speak)
31 | thread.start()
32 |
33 |
34 | def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]:
35 | """Get the voice engine to use for the given configuration"""
36 | default_voice_engine = GTTSVoice()
37 | if config.elevenlabs_api_key:
38 | voice_engine = ElevenLabsSpeech()
39 | elif config.use_mac_os_tts == "True":
40 | voice_engine = MacOSTTS()
41 | elif config.use_brian_tts == "True":
42 | voice_engine = BrianSpeech()
43 | else:
44 | voice_engine = GTTSVoice()
45 |
46 | return default_voice_engine, voice_engine
47 |
--------------------------------------------------------------------------------
/autogpt/spinner.py:
--------------------------------------------------------------------------------
1 | """A simple spinner module"""
2 | import itertools
3 | import sys
4 | import threading
5 | import time
6 |
7 |
8 | class Spinner:
9 | """A simple spinner class"""
10 |
11 | def __init__(self, message: str = "Loading...", delay: float = 0.1) -> None:
12 | """Initialize the spinner class
13 |
14 | Args:
15 | message (str): The message to display.
16 | delay (float): The delay between each spinner update.
17 | """
18 | self.spinner = itertools.cycle(["-", "/", "|", "\\"])
19 | self.delay = delay
20 | self.message = message
21 | self.running = False
22 | self.spinner_thread = None
23 |
24 | def spin(self) -> None:
25 | """Spin the spinner"""
26 | while self.running:
27 | sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
28 | sys.stdout.flush()
29 | time.sleep(self.delay)
30 | sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
31 |
32 | def __enter__(self):
33 | """Start the spinner"""
34 | self.running = True
35 | self.spinner_thread = threading.Thread(target=self.spin)
36 | self.spinner_thread.start()
37 |
38 | return self
39 |
40 | def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
41 | """Stop the spinner
42 |
43 | Args:
44 | exc_type (Exception): The exception type.
45 | exc_value (Exception): The exception value.
46 | exc_traceback (Exception): The exception traceback.
47 | """
48 | self.running = False
49 | if self.spinner_thread is not None:
50 | self.spinner_thread.join()
51 | sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
52 | sys.stdout.flush()
53 |
54 | def update_message(self, new_message, delay=0.1):
55 | """Update the spinner message
56 | Args:
57 | new_message (str): New message to display.
58 | delay (float): The delay in seconds between each spinner update.
59 | """
60 | time.sleep(delay)
61 | sys.stdout.write(
62 | f"\r{' ' * (len(self.message) + 2)}\r"
63 | ) # Clear the current message
64 | sys.stdout.flush()
65 | self.message = new_message
66 |
--------------------------------------------------------------------------------
/autogpt/url_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/autogpt/url_utils/__init__.py
--------------------------------------------------------------------------------
/autogpt/url_utils/validators.py:
--------------------------------------------------------------------------------
1 | import functools
2 | from typing import Any, Callable
3 | from urllib.parse import urljoin, urlparse
4 |
5 | from requests.compat import urljoin
6 |
7 |
8 | def validate_url(func: Callable[..., Any]) -> Any:
9 | """The method decorator validate_url is used to validate urls for any command that requires
10 | a url as an argument"""
11 |
12 | @functools.wraps(func)
13 | def wrapper(url: str, *args, **kwargs) -> Any:
14 | """Check if the URL is valid using a basic check, urllib check, and local file check
15 |
16 | Args:
17 | url (str): The URL to check
18 |
19 | Returns:
20 | the result of the wrapped function
21 |
22 | Raises:
23 | ValueError if the url fails any of the validation tests
24 | """
25 | # Most basic check if the URL is valid:
26 | if not url.startswith("http://") and not url.startswith("https://"):
27 | raise ValueError("Invalid URL format")
28 | if not is_valid_url(url):
29 | raise ValueError("Missing Scheme or Network location")
30 | # Restrict access to local files
31 | if check_local_file_access(url):
32 | raise ValueError("Access to local files is restricted")
33 |
34 | return func(sanitize_url(url), *args, **kwargs)
35 |
36 | return wrapper
37 |
38 |
39 | def is_valid_url(url: str) -> bool:
40 | """Check if the URL is valid
41 |
42 | Args:
43 | url (str): The URL to check
44 |
45 | Returns:
46 | bool: True if the URL is valid, False otherwise
47 | """
48 | try:
49 | result = urlparse(url)
50 | return all([result.scheme, result.netloc])
51 | except ValueError:
52 | return False
53 |
54 |
55 | def sanitize_url(url: str) -> str:
56 | """Sanitize the URL
57 |
58 | Args:
59 | url (str): The URL to sanitize
60 |
61 | Returns:
62 | str: The sanitized URL
63 | """
64 | parsed_url = urlparse(url)
65 | reconstructed_url = f"{parsed_url.path}{parsed_url.params}?{parsed_url.query}"
66 | return urljoin(url, reconstructed_url)
67 |
68 |
69 | def check_local_file_access(url: str) -> bool:
70 | """Check if the URL is a local file
71 |
72 | Args:
73 | url (str): The URL to check
74 |
75 | Returns:
76 | bool: True if the URL is a local file, False otherwise
77 | """
78 | local_prefixes = [
79 | "file:///",
80 | "file://localhost/",
81 | "file://localhost",
82 | "http://localhost",
83 | "http://localhost/",
84 | "https://localhost",
85 | "https://localhost/",
86 | "http://2130706433",
87 | "http://2130706433/",
88 | "https://2130706433",
89 | "https://2130706433/",
90 | "http://127.0.0.1/",
91 | "http://127.0.0.1",
92 | "https://127.0.0.1/",
93 | "https://127.0.0.1",
94 | "https://0.0.0.0/",
95 | "https://0.0.0.0",
96 | "http://0.0.0.0/",
97 | "http://0.0.0.0",
98 | "http://0000",
99 | "http://0000/",
100 | "https://0000",
101 | "https://0000/",
102 | ]
103 | return any(url.startswith(prefix) for prefix in local_prefixes)
104 |
--------------------------------------------------------------------------------
/autogpt/workspace/__init__.py:
--------------------------------------------------------------------------------
1 | from autogpt.workspace.workspace import Workspace
2 |
3 | __all__ = [
4 | "Workspace",
5 | ]
6 |
--------------------------------------------------------------------------------
/azure.yaml.template:
--------------------------------------------------------------------------------
1 | azure_api_type: azure
2 | azure_api_base: your-base-url-for-azure
3 | azure_api_version: api-version-for-azure
4 | azure_model_map:
5 | fast_llm_model_deployment_id: gpt35-deployment-id-for-azure
6 | smart_llm_model_deployment_id: gpt4-deployment-id-for-azure
7 | embedding_model_deployment_id: embedding-deployment-id-for-azure
8 |
--------------------------------------------------------------------------------
/benchmark/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/benchmark/__init__.py
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | coverage:
2 | status:
3 | project:
4 | default:
5 | target: auto
6 | threshold: 1%
7 | informational: true
8 | patch:
9 | default:
10 | target: 80%
11 |
12 | ## Please add this section once you've separated your coverage uploads for unit and integration tests
13 | #
14 | # flags:
15 | # unit-tests:
16 | # carryforward: true
17 | # integration-tests:
18 | # carryforward: true
19 |
--------------------------------------------------------------------------------
/data/.keep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/data/.keep
--------------------------------------------------------------------------------
/data_ingestion.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 |
4 | from autogpt.commands.file_operations import ingest_file, list_files
5 | from autogpt.config import Config
6 | from autogpt.memory import get_memory
7 |
8 | cfg = Config()
9 |
10 |
11 | def configure_logging():
12 | logging.basicConfig(
13 | format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
14 | datefmt="%H:%M:%S",
15 | level=logging.DEBUG,
16 | handlers=[
17 | logging.FileHandler(filename="log-ingestion.txt", mode="a"),
18 | logging.StreamHandler(),
19 | ],
20 | )
21 | return logging.getLogger("AutoGPT-Ingestion")
22 |
23 |
24 | def ingest_directory(directory, memory, args):
25 | """
26 | Ingest all files in a directory by calling the ingest_file function for each file.
27 |
28 | :param directory: The directory containing the files to ingest
29 | :param memory: An object with an add() method to store the chunks in memory
30 | """
31 | global logger
32 | try:
33 | files = list_files(directory)
34 | for file in files:
35 | ingest_file(file, memory, args.max_length, args.overlap)
36 | except Exception as e:
37 | logger.error(f"Error while ingesting directory '{directory}': {str(e)}")
38 |
39 |
40 | def main() -> None:
41 | logger = configure_logging()
42 |
43 | parser = argparse.ArgumentParser(
44 | description="Ingest a file or a directory with multiple files into memory. "
45 | "Make sure to set your .env before running this script."
46 | )
47 | group = parser.add_mutually_exclusive_group(required=True)
48 | group.add_argument("--file", type=str, help="The file to ingest.")
49 | group.add_argument(
50 | "--dir", type=str, help="The directory containing the files to ingest."
51 | )
52 | parser.add_argument(
53 | "--init",
54 | action="store_true",
55 | help="Init the memory and wipe its content (default: False)",
56 | default=False,
57 | )
58 | parser.add_argument(
59 | "--overlap",
60 | type=int,
61 | help="The overlap size between chunks when ingesting files (default: 200)",
62 | default=200,
63 | )
64 | parser.add_argument(
65 | "--max_length",
66 | type=int,
67 | help="The max_length of each chunk when ingesting files (default: 4000)",
68 | default=4000,
69 | )
70 | args = parser.parse_args()
71 |
72 | # Initialize memory
73 | memory = get_memory(cfg, init=args.init)
74 | logger.debug("Using memory of type: " + memory.__class__.__name__)
75 |
76 | if args.file:
77 | try:
78 | ingest_file(args.file, memory, args.max_length, args.overlap)
79 | logger.info(f"File '{args.file}' ingested successfully.")
80 | except Exception as e:
81 | logger.error(f"Error while ingesting file '{args.file}': {str(e)}")
82 | elif args.dir:
83 | try:
84 | ingest_directory(args.dir, memory, args)
85 | logger.info(f"Directory '{args.dir}' ingested successfully.")
86 | except Exception as e:
87 | logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}")
88 | else:
89 | logger.warn(
90 | "Please provide either a file path (--file) or a directory name (--dir)"
91 | " inside the auto_gpt_workspace directory as input."
92 | )
93 |
94 |
95 | if __name__ == "__main__":
96 | main()
97 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # To boot the app run the following:
2 | # docker-compose run auto-gpt
3 | # NOTE: Version 3.9 requires at least docker-compose version 1.29.0 !
4 | version: "3.9"
5 |
6 | services:
7 | auto-gpt:
8 | depends_on:
9 | - redis
10 | build: ./
11 | env_file:
12 | - .env
13 | environment:
14 | MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
15 | REDIS_HOST: ${REDIS_HOST:-redis}
16 | volumes:
17 | - ./:/app
18 | profiles: ["exclude-from-up"]
19 |
20 | redis:
21 | image: "redis/redis-stack-server:latest"
22 |
--------------------------------------------------------------------------------
/docs/challenges/beat.md:
--------------------------------------------------------------------------------
1 | # Beat a Challenge
2 |
3 | If you have a solution or idea to tackle an existing challenge, you can contribute by working on it and submitting your solution. Here's how to get started:
4 |
5 | ## Guidelines for Beating a Challenge
6 |
7 | 1. **Choose a challenge**: Browse the [List of Challenges](list.md) and choose one that interests you or aligns with your expertise.
8 |
9 | 2. **Understand the problem**: Make sure you thoroughly understand the problem at hand, its scope, and the desired outcome.
10 |
11 | 3. **Develop a solution**: Work on creating a solution for the challenge. This may/
12 |
--------------------------------------------------------------------------------
/docs/challenges/challenge_template.md:
--------------------------------------------------------------------------------
1 | # Challenge Title
2 |
3 | ## Description
4 |
5 | Provide a clear and concise description of the challenge. Include any relevant examples or files to illustrate the problem.
6 |
7 | ## Input
8 |
9 | If the challenge involves specific input files, describe them here. Provide the file names and their contents, if necessary. Use triple backticks (```) to format the content as a code block.
10 |
11 | For example:
12 |
13 | instructions_1.txt
14 |
15 | The current task_id is 4563.\n[NOISE intended to confuse the agent]
16 | Read the file instructions_2.txt using the read_file command.
17 |
18 | ## Scope
19 |
20 | Define the scope of the challenge, including any relevant constraints, requirements, or limitations.
21 |
22 | ## Success Evaluation
23 |
24 | Explain how success will be measured or evaluated for the challenge. This helps others understand what the desired outcome is and how to work towards it.
25 |
--------------------------------------------------------------------------------
/docs/challenges/information_retrieval/challenge_a.md:
--------------------------------------------------------------------------------
1 | # Information Retrieval Challenge A
2 |
3 | **Status**: Current level to beat: level 1
4 |
5 | **Command to try**:
6 |
7 | ```
8 | pytest -s tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py
9 | ```
10 |
11 | ## Description
12 |
13 | The agent's goal is to find the revenue of Tesla in 2022.
14 |
15 | It should write the result in a file called output.txt.
16 |
17 | The agent should be able to beat this test consistently (this is the hardest part).
18 | ## Objective
19 |
20 | The objective of this challenge is to test the agent's ability to retrieve information in a consistent way.
21 |
--------------------------------------------------------------------------------
/docs/challenges/information_retrieval/introduction.md:
--------------------------------------------------------------------------------
1 | # Information Retrieval
2 |
3 | Information retrieval challenges are designed to evaluate the proficiency of an AI agent, such as Auto-GPT, in searching, extracting, and presenting relevant information from a vast array of sources. These challenges often encompass tasks such as interpreting user queries, browsing the web, and filtering through unstructured data.
4 |
--------------------------------------------------------------------------------
/docs/challenges/introduction.md:
--------------------------------------------------------------------------------
1 | introduction.md
2 | # Introduction to Challenges
3 |
4 | Welcome to the Auto-GPT Challenges page! This is a space where we encourage community members to collaborate and contribute towards improving Auto-GPT by identifying and solving challenges that Auto-GPT is not yet able to achieve.
5 |
6 | ## What are challenges?
7 |
8 | Challenges are tasks or problems that Auto-GPT has difficulty solving or has not yet been able to accomplish. These may include improving specific functionalities, enhancing the model's understanding of specific domains, or even developing new features that the current version of Auto-GPT lacks.
9 |
10 | ## Why are challenges important?
11 |
12 | Addressing challenges helps us improve Auto-GPT's performance, usability, and versatility. By working together to tackle these challenges, we can create a more powerful and efficient tool for everyone. It also allows the community to actively contribute to the project, making it a true open-source effort.
13 |
14 | ## How can you participate?
15 |
16 | There are two main ways to get involved with challenges:
17 |
18 | 1. **Submit a Challenge**: If you have identified a task that Auto-GPT struggles with, you can submit it as a challenge. This allows others to see the issue and collaborate on finding a solution.
19 | 2. **Beat a Challenge**: If you have a solution or idea to tackle an existing challenge, you can contribute by working on the challenge and submitting your solution.
20 |
21 | To learn more about submitting and beating challenges, please visit the [List of Challenges](list.md), [Submit a Challenge](submit.md), and [Beat a Challenge](beat.md) pages.
22 |
23 | We look forward to your contributions and the exciting solutions that the community will develop together to make Auto-GPT even better!
24 |
--------------------------------------------------------------------------------
/docs/challenges/list.md:
--------------------------------------------------------------------------------
1 | # List of Challenges
2 |
3 | This page contains a curated list of challenges that Auto-GPT currently faces. If you think you have a solution or idea to tackle any of these challenges, feel free to dive in and start working on them! New challenges can also be submitted by following the guidelines on the [Submit a Challenge](challenges/submit.md) page.
4 |
5 | Memory Challenges: [List of Challenges](memory/introduction.md)
6 |
--------------------------------------------------------------------------------
/docs/challenges/memory/challenge_a.md:
--------------------------------------------------------------------------------
1 | # Memory Challenge A
2 |
3 | **Status**: Challenge Completed
4 |
5 |
6 | ## Description
7 |
8 | The agent, Follow-Instructions-GPT, has the following goals:
9 |
10 | - Use the command `read_file` to read the `instructions_1.txt` file.
11 | - Follow the instructions in the `instructions_1.txt` file.
12 |
13 | The challenge involves a series of text files containing instructions. The agent must follow the instructions and keep track of the task ID as it progresses through the files.
14 |
15 | ## Files
16 |
17 | - `instructions_1.txt`
18 |
19 | The task_id is 1234
20 | Read the file instructions_1.txt
21 |
22 |
23 |
24 | - `instructions_2.txt`
25 |
26 | Read the file instructions_2.txt using the read_file command.
27 |
28 |
29 | ...and so on.
30 |
31 | - `instructions_n.txt`
32 |
33 | Write the task_id into the file output.txt.
34 |
35 |
36 | ## Objective
37 |
38 | The objective of this challenge is to test the agent's ability to follow instructions and maintain memory of the task Id throughout the process. The agent successfully completed this challenge if it wrote the task id in a file.
39 |
40 |
--------------------------------------------------------------------------------
/docs/challenges/memory/challenge_b.md:
--------------------------------------------------------------------------------
1 | # Memory Challenge B
2 |
3 | **Status**: Current level to beat: level 3
4 |
5 | **Command to try**:
6 | ```
7 | pytest -s tests/integration/challenges/memory/test_memory_challenge_b.py --level=3
8 | ``
9 |
10 | ## Description
11 |
12 | The agent, Follow-Instructions-GPT, has the following goals:
13 |
14 | - Use the command `read_file` to read the `instructions_1.txt` file.
15 | - Follow the instructions in the `instructions_1.txt` file.
16 |
17 | The challenge involves a series of text files containing instructions and task IDs. The agent must follow the instructions and keep track of the task IDs as it progresses through the files.
18 |
19 | ## Files
20 |
21 | - `instructions_1.txt`
22 |
23 | The current task_id is 4563.\n[NOISE intended to confuse the agent]
24 | Read the file instructions_2.txt using the read_file command.
25 |
26 |
27 | - `instructions_2.txt`
28 |
29 | The current task_id is 6182.\n[NOISE intended to confuse the agent]
30 | Read the file instructions_3.txt using the read_file command.
31 |
32 |
33 | ...and so on.
34 |
35 | - `instructions_n.txt`
36 |
37 | The current task_id is 8912.
38 | Write all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command.
39 |
40 |
41 | ## Objective
42 |
43 | The objective of this challenge is to test the agent's ability to follow instructions and maintain memory of the task IDs throughout the process. The agent successfully completed this challenge if it wrote the task ids in a file.
44 |
45 |
--------------------------------------------------------------------------------
/docs/challenges/memory/challenge_c.md:
--------------------------------------------------------------------------------
1 | # Memory Challenge C
2 |
3 | **Status**: Current level to beat: level 1
4 |
5 | **Command to try**:
6 | ```
7 | pytest -s tests/integration/challenges/memory/test_memory_challenge_c.py --level=2
8 | ``
9 |
10 | ## Description
11 |
12 | The agent, Follow-Instructions-GPT, has the following goals:
13 |
14 | - Use the command `read_file` to read the `instructions_1.txt` file.
15 | - Follow the instructions in the `instructions_1.txt` file.
16 |
17 | The challenge involves a series of text files containing instructions and silly phrases. The agent must follow the instructions and keep track of the task IDs as it progresses through the files.
18 |
19 | ## Files
20 |
21 | - `instructions_1.txt`
22 |
23 | The current phrase is "The purple elephant danced on a rainbow while eating a taco.".\n[NOISE intended to confuse the agent]
24 | Read the file instructions_2.txt using the read_file command.
25 |
26 |
27 | - `instructions_2.txt`
28 |
29 | The current phrase is "The sneaky toaster stole my socks and ran away to Hawaii.".\n[NOISE intended to confuse the agent]
30 | Read the file instructions_3.txt using the read_file command.
31 |
32 |
33 | ...and so on.
34 |
35 | - `instructions_n.txt`
36 |
37 | The current phrase is "My pet rock sings better than Beyoncé on Tuesdays."
38 | Write all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command.
39 |
40 |
41 | ## Objective
42 |
43 | The objective of this challenge is to test the agent's ability to follow instructions and maintain memory of the task IDs throughout the process. The agent successfully completed this challenge if it wrote the phrases in a file.
44 |
45 | This is presumably harder than task ids as the phrases are longer and more likely to be compressed as the agent does more work.
46 |
--------------------------------------------------------------------------------
/docs/challenges/memory/introduction.md:
--------------------------------------------------------------------------------
1 | # Memory Challenges
2 |
3 | Memory challenges are designed to test the ability of an AI agent, like Auto-GPT, to remember and use information throughout a series of tasks. These challenges often involve following instructions, processing text files, and keeping track of important data.
4 |
5 | The goal of memory challenges is to improve an agent's performance in tasks that require remembering and using information over time. By addressing these challenges, we can enhance Auto-GPT's capabilities and make it more useful in real-world applications.
6 |
--------------------------------------------------------------------------------
/docs/challenges/submit.md:
--------------------------------------------------------------------------------
1 | # Submit a Challenge
2 |
3 | If you have identified a task or problem that Auto-GPT struggles with, you can submit it as a challenge for the community to tackle. Here's how you can submit a new challenge:
4 |
5 | ## How to Submit a Challenge
6 |
7 | 1. Create a new `.md` file in the `challenges` directory in the Auto-GPT GitHub repository. Make sure to pick the right category.
8 | 2. Name the file with a descriptive title for the challenge, using hyphens instead of spaces (e.g., `improve-context-understanding.md`).
9 | 3. In the file, follow the [challenge_template.md](challenge_template.md) to describe the problem, define the scope, and evaluate success.
10 | 4. Commit the file and create a pull request.
11 |
12 | Once submitted, the community can review and discuss the challenge. If deemed appropriate, it will be added to the [List of Challenges](list.md).
13 |
14 | If you're looking to contribute by working on an existing challenge, check out [Beat a Challenge](beat.md) for guidelines on how to get started.
15 |
--------------------------------------------------------------------------------
/docs/code-of-conduct.md:
--------------------------------------------------------------------------------
1 | ../CODE_OF_CONDUCT.md
--------------------------------------------------------------------------------
/docs/configuration/imagegen.md:
--------------------------------------------------------------------------------
1 | # 🖼 Image Generation configuration
2 |
3 | | Config variable | Values | |
4 | | ---------------- | ------------------------------- | -------------------- |
5 | | `IMAGE_PROVIDER` | `dalle` `huggingface` `sdwebui` | **default: `dalle`** |
6 |
7 | ## DALL-e
8 |
9 | In `.env`, make sure `IMAGE_PROVIDER` is commented (or set to `dalle`):
10 | ``` ini
11 | # IMAGE_PROVIDER=dalle # this is the default
12 | ```
13 |
14 | Further optional configuration:
15 |
16 | | Config variable | Values | |
17 | | ---------------- | ------------------ | -------------- |
18 | | `IMAGE_SIZE` | `256` `512` `1024` | default: `256` |
19 |
20 | ## Hugging Face
21 |
22 | To use text-to-image models from Hugging Face, you need a Hugging Face API token.
23 | Link to the appropriate settings page: [Hugging Face > Settings > Tokens](https://huggingface.co/settings/tokens)
24 |
25 | Once you have an API token, uncomment and adjust these variables in your `.env`:
26 | ``` ini
27 | IMAGE_PROVIDER=huggingface
28 | HUGGINGFACE_API_TOKEN=your-huggingface-api-token
29 | ```
30 |
31 | Further optional configuration:
32 |
33 | | Config variable | Values | |
34 | | ------------------------- | ---------------------- | ---------------------------------------- |
35 | | `HUGGINGFACE_IMAGE_MODEL` | see [available models] | default: `CompVis/stable-diffusion-v1-4` |
36 |
37 | [available models]: https://huggingface.co/models?pipeline_tag=text-to-image
38 |
39 | ## Stable Diffusion WebUI
40 |
41 | It is possible to use your own self-hosted Stable Diffusion WebUI with Auto-GPT:
42 | ``` ini
43 | IMAGE_PROVIDER=sdwebui
44 | ```
45 |
46 | !!! note
47 | Make sure you are running WebUI with `--api` enabled.
48 |
49 | Further optional configuration:
50 |
51 | | Config variable | Values | |
52 | | --------------- | ----------------------- | -------------------------------- |
53 | | `SD_WEBUI_URL` | URL to your WebUI | default: `http://127.0.0.1:7860` |
54 | | `SD_WEBUI_AUTH` | `{username}:{password}` | *Note: do not copy the braces!* |
55 |
56 | ## Selenium
57 | ``` shell
58 | sudo Xvfb :10 -ac -screen 0 1024x768x24 & DISPLAY=:10
59 | ```
60 |
--------------------------------------------------------------------------------
/docs/configuration/search.md:
--------------------------------------------------------------------------------
1 | ## 🔍 Google API Keys Configuration
2 |
3 | !!! note
4 | This section is optional. Use the official Google API if search attempts return
5 | error 429. To use the `google_official_search` command, you need to set up your
6 | Google API key in your environment variables.
7 |
8 | Create your project:
9 |
10 | 1. Go to the [Google Cloud Console](https://console.cloud.google.com/).
11 | 2. If you don't already have an account, create one and log in
12 | 3. Create a new project by clicking on the *Select a Project* dropdown at the top of the
13 | page and clicking *New Project*
14 | 4. Give it a name and click *Create*
15 | 5. Set up a custom search API and add to your .env file:
16 | 5. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard)
17 | 6. Click *Enable APIs and Services*
18 | 7. Search for *Custom Search API* and click on it
19 | 8. Click *Enable*
20 | 9. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page
21 | 10. Click *Create Credentials*
22 | 11. Choose *API Key*
23 | 12. Copy the API key
24 | 13. Set it as the `GOOGLE_API_KEY` in your `.env` file
25 | 14. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com)
26 | the Custom Search API on your project. (Might need to wait few minutes to propagate.)
27 | Set up a custom search engine and add to your .env file:
28 | 15. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page
29 | 16. Click *Add*
30 | 17. Set up your search engine by following the prompts.
31 | You can choose to search the entire web or specific sites
32 | 18. Once you've created your search engine, click on *Control Panel*
33 | 19. Click *Basics*
34 | 20. Copy the *Search engine ID*
35 | 21. Set it as the `CUSTOM_SEARCH_ENGINE_ID` in your `.env` file
36 |
37 | _Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches._
38 |
--------------------------------------------------------------------------------
/docs/configuration/voice.md:
--------------------------------------------------------------------------------
1 | # Text to Speech
2 |
3 | Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT
4 |
5 | ``` shell
6 | python -m autogpt --speak
7 | ```
8 |
9 | Eleven Labs provides voice technologies such as voice design, speech synthesis, and
10 | premade voices that Auto-GPT can use for speech.
11 |
12 | 1. Go to [ElevenLabs](https://beta.elevenlabs.io/) and make an account if you don't
13 | already have one.
14 | 2. Choose and setup the *Starter* plan.
15 | 3. Click the top right icon and find *Profile* to locate your API Key.
16 |
17 | In the `.env` file set:
18 |
19 | - `ELEVENLABS_API_KEY`
20 | - `ELEVENLABS_VOICE_1_ID` (example: _"premade/Adam"_)
21 |
22 | ### List of available voices
23 |
24 | !!! note
25 | You can use either the name or the voice ID to configure a voice
26 |
27 | | Name | Voice ID |
28 | | ------ | -------- |
29 | | Rachel | `21m00Tcm4TlvDq8ikWAM` |
30 | | Domi | `AZnzlk1XvdvUeBnXmlld` |
31 | | Bella | `EXAVITQu4vr4xnSDxMaL` |
32 | | Antoni | `ErXwobaYiN019PkySvjV` |
33 | | Elli | `MF3mGyEYCl7XYWbV9V6O` |
34 | | Josh | `TxGEqnHWrfWFTfGW9XjX` |
35 | | Arnold | `VR6AewLTigWG4xSOukaG` |
36 | | Adam | `pNInz6obpgDQGcFmaJgB` |
37 | | Sam | `yoZ06aMxZJJ28mfd3POQ` |
38 |
--------------------------------------------------------------------------------
/docs/contributing.md:
--------------------------------------------------------------------------------
1 | ../CONTRIBUTING.md
--------------------------------------------------------------------------------
/docs/imgs/openai-api-key-billing-paid-account.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/docs/imgs/openai-api-key-billing-paid-account.png
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # Auto-GPT
2 |
3 | Welcome to Auto-GPT. Please follow the [Installation](/setup/) guide to get started.
4 |
5 | NOTE: It is recommended to use a virtual machine/container (docker) for tasks that require high security measures to prevent any potential harm to the main computer's system and data. If you are considering to use Auto-GPT outside a virtualized/containerized environment, you are *strongly* advised to use a separate user account just for running Auto-GPT. This is even more important if you are going to allow Auto-GPT to write/execute scripts and run shell commands!
6 |
7 | It is for these reasons that executing python scripts is explicitly disabled when running outside a container environment.
8 |
--------------------------------------------------------------------------------
/docs/plugins.md:
--------------------------------------------------------------------------------
1 | ## Plugins
2 |
3 | ⚠️💀 **WARNING** 💀⚠️: Review the code of any plugin you use thoroughly, as plugins can execute any Python code, potentially leading to malicious activities, such as stealing your API keys.
4 |
5 | See our [Plugins Repo](https://github.com/Significant-Gravitas/Auto-GPT-Plugins) for more info on how to install all the amazing plugins the community has built!
6 |
7 | Alternatively, developers can use the [Auto-GPT Plugin Template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template) as a starting point for creating your own plugins.
8 |
9 |
--------------------------------------------------------------------------------
/docs/testing.md:
--------------------------------------------------------------------------------
1 | # Running tests
2 |
3 | To run all tests, use the following command:
4 |
5 | ``` shell
6 | pytest
7 | ```
8 |
9 | If `pytest` is not found:
10 | ``` shell
11 | python -m pytest
12 | ```
13 |
14 | ### Running specific test suites
15 |
16 | - To run without integration tests:
17 |
18 | :::shell
19 | pytest --without-integration
20 |
21 | - To run without *slow* integration tests:
22 |
23 | :::shell
24 | pytest --without-slow-integration
25 |
26 | - To run tests and see coverage:
27 |
28 | :::shell
29 | pytest --cov=autogpt --without-integration --without-slow-integration
30 |
31 | ## Running the linter
32 |
33 | This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting.
34 | We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`.
35 | See the [flake8 rules](https://www.flake8rules.com/) for more information.
36 |
37 | To run the linter:
38 |
39 | ``` shell
40 | flake8 .
41 | ```
42 |
43 | Or:
44 | ``` shell
45 | python -m flake8 .
46 | ```
47 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | from autogpt import main
2 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: Auto-GPT
2 | site_url: https://docs.agpt.co/
3 | repo_url: https://github.com/Significant-Gravitas/Auto-GPT
4 | nav:
5 | - Home: index.md
6 | - Setup: setup.md
7 | - Usage: usage.md
8 | - Plugins: plugins.md
9 | - Configuration:
10 | - Search: configuration/search.md
11 | - Memory: configuration/memory.md
12 | - Voice: configuration/voice.md
13 | - Image Generation: configuration/imagegen.md
14 |
15 | - Contributing:
16 | - Contribution guide: contributing.md
17 | - Running tests: testing.md
18 | - Code of Conduct: code-of-conduct.md
19 |
20 | - Challenges:
21 | - Introduction: challenges/introduction.md
22 | - List of Challenges:
23 | - Memory:
24 | - Introduction: challenges/memory/introduction.md
25 | - Memory Challenge A: challenges/memory/challenge_a.md
26 | - Memory Challenge B: challenges/memory/challenge_b.md
27 | - Memory Challenge C: challenges/memory/challenge_c.md
28 | - Information retrieval:
29 | - Introduction: challenges/information_retrieval/introduction.md
30 | - Information Retrieval Challenge A: challenges/information_retrieval/challenge_a.md
31 | - Submit a Challenge: challenges/submit.md
32 | - Beat a Challenge: challenges/beat.md
33 |
34 | - License: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/LICENSE
35 |
36 | theme: readthedocs
37 |
38 | markdown_extensions:
39 | admonition:
40 | codehilite:
41 | pymdownx.keys:
42 |
--------------------------------------------------------------------------------
/mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | follow_imports = skip
3 | check_untyped_defs = True
4 | disallow_untyped_defs = True
5 | files = tests/integration/challenges/**/*.py
6 |
7 | [mypy-requests.*]
8 | ignore_missing_imports = True
9 | [mypy-yaml.*]
10 | ignore_missing_imports = True
11 |
--------------------------------------------------------------------------------
/plugin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/plugin.png
--------------------------------------------------------------------------------
/plugins/__PUT_PLUGIN_ZIPS_HERE__:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/plugins/__PUT_PLUGIN_ZIPS_HERE__
--------------------------------------------------------------------------------
/prompt_settings.yaml:
--------------------------------------------------------------------------------
1 | constraints: [
2 | '~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.',
3 | 'If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.',
4 | 'No user assistance',
5 | 'Exclusively use the commands listed below e.g. command_name'
6 | ]
7 | resources: [
8 | 'Internet access for searches and information gathering.',
9 | 'Long Term memory management.',
10 | 'GPT-3.5 powered Agents for delegation of simple tasks.',
11 | 'File output.'
12 | ]
13 | performance_evaluations: [
14 | 'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.',
15 | 'Constructively self-criticize your big-picture behavior constantly.',
16 | 'Reflect on past decisions and strategies to refine your approach.',
17 | 'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.',
18 | 'Write all code to a file.'
19 | ]
20 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling"]
3 | build-backend = "hatchling.build"
4 |
5 | [project]
6 | name = "agpt"
7 | version = "0.3.0"
8 | authors = [
9 | { name="Torantulino", email="support@agpt.co" },
10 | ]
11 | readme = "README.md"
12 | requires-python = ">=3.10"
13 | classifiers = [
14 | "Programming Language :: Python :: 3",
15 | "License :: OSI Approved :: MIT License",
16 | "Operating System :: OS Independent",
17 | ]
18 | description = "An open-source attempt to make GPT-4 autonomous"
19 |
20 | [project.urls]
21 | "Homepage" = "https://github.com/Significant-Gravitas/Auto-GPT"
22 | "Bug Tracker" = "https://github.com/Significant-Gravitas/Auto-GPT"
23 |
24 | [tool.black]
25 | line-length = 88
26 | target-version = ['py310']
27 | include = '\.pyi?$'
28 | packages = ["autogpt"]
29 | extend-exclude = '.+/(dist|.venv|venv|build)/.+'
30 |
31 |
32 | [tool.isort]
33 | profile = "black"
34 | multi_line_output = 3
35 | include_trailing_comma = true
36 | force_grid_wrap = 0
37 | use_parentheses = true
38 | ensure_newline_before_comments = true
39 | line_length = 88
40 | sections = [
41 | "FUTURE",
42 | "STDLIB",
43 | "THIRDPARTY",
44 | "FIRSTPARTY",
45 | "LOCALFOLDER"
46 | ]
47 | skip = '''
48 | .tox
49 | __pycache__
50 | *.pyc
51 | .env
52 | venv*/*
53 | .venv/*
54 | reports/*
55 | dist/*
56 |
57 | '''
58 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | beautifulsoup4>=4.12.2
2 | colorama==0.4.6
3 | distro==1.8.0
4 | openai==0.27.2
5 | playsound==1.2.2
6 | python-dotenv==1.0.0
7 | pyyaml==6.0
8 | readability-lxml==0.8.1
9 | requests
10 | tiktoken==0.3.3
11 | gTTS==2.3.1
12 | docker
13 | duckduckgo-search>=2.9.5
14 | google-api-python-client #(https://developers.google.com/custom-search/v1/overview)
15 | pinecone-client==2.2.1
16 | redis
17 | orjson==3.8.10
18 | Pillow
19 | selenium==4.1.4
20 | webdriver-manager
21 | jsonschema
22 | tweepy
23 | click
24 | charset-normalizer>=3.1.0
25 | spacy>=3.0.0,<4.0.0
26 | en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl
27 | pygithub
28 | ##Dev
29 | coverage
30 | flake8
31 | numpy
32 | pre-commit
33 | black
34 | isort
35 | gitpython==3.1.31
36 | auto-gpt-plugin-template
37 | mkdocs
38 | pymdown-extensions
39 | mypy
40 |
41 | # OpenAI and Generic plugins import
42 | openapi-python-client==0.13.4
43 |
44 | # Items below this point will not be included in the Docker Image
45 |
46 | # Testing dependencies
47 | pytest
48 | asynctest
49 | pytest-asyncio
50 | pytest-benchmark
51 | pytest-cov
52 | pytest-integration
53 | pytest-mock
54 | vcrpy
55 | pytest-recording
56 | pytest-xdist
57 |
--------------------------------------------------------------------------------
/run.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | setlocal enabledelayedexpansion
3 |
4 | :FindPythonCommand
5 | for %%A in (python python3) do (
6 | where /Q %%A
7 | if !errorlevel! EQU 0 (
8 | set "PYTHON_CMD=%%A"
9 | goto :Found
10 | )
11 | )
12 |
13 | echo Python not found. Please install Python.
14 | pause
15 | exit /B 1
16 |
17 | :Found
18 | %PYTHON_CMD% scripts/check_requirements.py requirements.txt
19 | if errorlevel 1 (
20 | echo Installing missing packages...
21 | %PYTHON_CMD% -m pip install -r requirements.txt
22 | )
23 | %PYTHON_CMD% -m autogpt %*
24 | pause
--------------------------------------------------------------------------------
/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function find_python_command() {
4 | if command -v python &> /dev/null
5 | then
6 | echo "python"
7 | elif command -v python3 &> /dev/null
8 | then
9 | echo "python3"
10 | else
11 | echo "Python not found. Please install Python."
12 | exit 1
13 | fi
14 | }
15 |
16 | PYTHON_CMD=$(find_python_command)
17 |
18 | if $PYTHON_CMD -c "import sys; sys.exit(sys.version_info < (3, 10))"; then
19 | $PYTHON_CMD scripts/check_requirements.py requirements.txt
20 | if [ $? -eq 1 ]
21 | then
22 | echo Installing missing packages...
23 | $PYTHON_CMD -m pip install -r requirements.txt
24 | fi
25 | $PYTHON_CMD -m autogpt $@
26 | read -p "Press any key to continue..."
27 | else
28 | echo "Python 3.10 or higher is required to run Auto GPT."
29 | fi
--------------------------------------------------------------------------------
/run_continuous.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | set argument=--continuous
3 | call run.bat %argument%
4 |
--------------------------------------------------------------------------------
/run_continuous.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ./run.sh --continuous $@
4 |
--------------------------------------------------------------------------------
/scripts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/scripts/__init__.py
--------------------------------------------------------------------------------
/scripts/check_requirements.py:
--------------------------------------------------------------------------------
1 | import re
2 | import sys
3 |
4 | import pkg_resources
5 |
6 |
7 | def main():
8 | requirements_file = sys.argv[1]
9 | with open(requirements_file, "r") as f:
10 | required_packages = [
11 | line.strip().split("#")[0].strip() for line in f.readlines()
12 | ]
13 |
14 | installed_packages = {pkg.key: pkg.version for pkg in pkg_resources.working_set}
15 |
16 | missing_packages = []
17 | for required_package in required_packages:
18 | if not required_package: # Skip empty lines
19 | continue
20 | pkg = pkg_resources.Requirement.parse(required_package)
21 | if (
22 | pkg.key not in installed_packages
23 | or pkg_resources.parse_version(installed_packages[pkg.key])
24 | not in pkg.specifier
25 | ):
26 | missing_packages.append(str(pkg))
27 |
28 | if missing_packages:
29 | print("Missing packages:")
30 | print(", ".join(missing_packages))
31 | sys.exit(1)
32 | else:
33 | print("All packages are installed.")
34 |
35 |
36 | if __name__ == "__main__":
37 | main()
38 |
--------------------------------------------------------------------------------
/scripts/install_plugin_deps.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import sys
4 | import zipfile
5 | from pathlib import Path
6 |
7 |
8 | def install_plugin_dependencies():
9 | """
10 | Installs dependencies for all plugins in the plugins dir.
11 |
12 | Args:
13 | None
14 |
15 | Returns:
16 | None
17 | """
18 | plugins_dir = Path(os.getenv("PLUGINS_DIR", "plugins"))
19 | for plugin in plugins_dir.glob("*.zip"):
20 | with zipfile.ZipFile(str(plugin), "r") as zfile:
21 | try:
22 | basedir = zfile.namelist()[0]
23 | basereqs = os.path.join(basedir, "requirements.txt")
24 | extracted = zfile.extract(basereqs, path=plugins_dir)
25 | subprocess.check_call(
26 | [sys.executable, "-m", "pip", "install", "-r", extracted]
27 | )
28 | os.remove(extracted)
29 | os.rmdir(os.path.join(plugins_dir, basedir))
30 | except KeyError:
31 | continue
32 |
33 |
34 | if __name__ == "__main__":
35 | install_plugin_dependencies()
36 |
--------------------------------------------------------------------------------
/tests.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import coverage
4 |
5 | if __name__ == "__main__":
6 | # Start coverage collection
7 | cov = coverage.Coverage()
8 | cov.start()
9 |
10 | # Load all tests from the 'autogpt/tests' package
11 | suite = unittest.defaultTestLoader.discover("./tests")
12 |
13 | # Run the tests
14 | unittest.TextTestRunner().run(suite)
15 |
16 | # Stop coverage collection
17 | cov.stop()
18 | cov.save()
19 |
20 | # Report the coverage
21 | cov.report(show_missing=True)
22 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/tests/__init__.py
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 |
4 | import pytest
5 | from pytest_mock import MockerFixture
6 |
7 | from autogpt.config import Config
8 | from autogpt.llm import ApiManager
9 | from autogpt.workspace import Workspace
10 |
11 | pytest_plugins = ["tests.integration.agent_factory"]
12 |
13 | PROXY = os.environ.get("PROXY")
14 |
15 |
16 | @pytest.fixture()
17 | def workspace_root(tmp_path: Path) -> Path:
18 | return tmp_path / "home/users/monty/auto_gpt_workspace"
19 |
20 |
21 | @pytest.fixture()
22 | def workspace(workspace_root: Path) -> Workspace:
23 | workspace_root = Workspace.make_workspace(workspace_root)
24 | return Workspace(workspace_root, restrict_to_workspace=True)
25 |
26 |
27 | @pytest.fixture()
28 | def config(mocker: MockerFixture, workspace: Workspace) -> Config:
29 | config = Config()
30 |
31 | # Do a little setup and teardown since the config object is a singleton
32 | mocker.patch.multiple(
33 | config,
34 | workspace_path=workspace.root,
35 | file_logger_path=workspace.get_path("file_logger.txt"),
36 | )
37 | yield config
38 |
39 |
40 | @pytest.fixture()
41 | def api_manager() -> ApiManager:
42 | if ApiManager in ApiManager._instances:
43 | del ApiManager._instances[ApiManager]
44 | return ApiManager()
45 |
--------------------------------------------------------------------------------
/tests/context.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | # Add the scripts directory to the path so that we can import the browse module.
5 | sys.path.insert(
6 | 0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../scripts"))
7 | )
8 |
--------------------------------------------------------------------------------
/tests/integration/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/tests/integration/__init__.py
--------------------------------------------------------------------------------
/tests/integration/challenges/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/tests/integration/challenges/__init__.py
--------------------------------------------------------------------------------
/tests/integration/challenges/basic_abilities/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/tests/integration/challenges/basic_abilities/__init__.py
--------------------------------------------------------------------------------
/tests/integration/challenges/basic_abilities/goal_oriented_tasks.md:
--------------------------------------------------------------------------------
1 | If the goal oriented task pipeline fails, it means:
2 | - you somehow changed the way the system prompt is generated
3 | - or you broke autogpt.
4 |
5 | To know which one, you can run the following command:
6 | ```bash
7 | pytest -s -k tests/integration/goal_oriented
8 |
9 | If the test is successful, it will record new cassettes in VCR. Then you can just push these to your branch and the pipeline
10 | will pass
11 |
--------------------------------------------------------------------------------
/tests/integration/challenges/basic_abilities/test_browse_website.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pytest_mock import MockerFixture
3 |
4 | from autogpt.agent import Agent
5 | from autogpt.commands.file_operations import read_file
6 | from tests.integration.challenges.utils import run_interaction_loop
7 | from tests.utils import requires_api_key
8 |
9 | CYCLE_COUNT = 2
10 |
11 |
12 | @requires_api_key("OPENAI_API_KEY")
13 | @pytest.mark.vcr
14 | def test_browse_website(
15 | browser_agent: Agent,
16 | patched_api_requestor: MockerFixture,
17 | monkeypatch: pytest.MonkeyPatch,
18 | ) -> None:
19 | file_path = browser_agent.workspace.get_path("browse_website.txt")
20 | run_interaction_loop(monkeypatch, browser_agent, CYCLE_COUNT)
21 |
22 | content = read_file(file_path)
23 | assert "£25.89" in content, f"Expected £25.89, got {content}"
24 |
--------------------------------------------------------------------------------
/tests/integration/challenges/basic_abilities/test_write_file.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pytest_mock import MockerFixture
3 |
4 | from autogpt.agent import Agent
5 | from autogpt.commands.file_operations import read_file
6 | from tests.integration.challenges.utils import run_interaction_loop
7 | from tests.utils import requires_api_key
8 |
9 | CYCLE_COUNT = 3
10 |
11 |
12 | @requires_api_key("OPENAI_API_KEY")
13 | @pytest.mark.vcr
14 | def test_write_file(
15 | writer_agent: Agent,
16 | patched_api_requestor: MockerFixture,
17 | monkeypatch: pytest.MonkeyPatch,
18 | ) -> None:
19 | file_path = str(writer_agent.workspace.get_path("hello_world.txt"))
20 | run_interaction_loop(monkeypatch, writer_agent, CYCLE_COUNT)
21 |
22 | content = read_file(file_path)
23 | assert content == "Hello World", f"Expected 'Hello World', got {content}"
24 |
--------------------------------------------------------------------------------
/tests/integration/challenges/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from _pytest.config import Config
3 | from _pytest.config.argparsing import Parser
4 | from _pytest.fixtures import FixtureRequest
5 | from _pytest.monkeypatch import MonkeyPatch
6 |
7 |
8 | def pytest_addoption(parser: Parser) -> None:
9 | parser.addoption(
10 | "--level", action="store", default=None, type=int, help="Specify test level"
11 | )
12 |
13 |
14 | def pytest_configure(config: Config) -> None:
15 | config.option.level = config.getoption("--level")
16 |
17 |
18 | @pytest.fixture
19 | def user_selected_level(request: FixtureRequest) -> int:
20 | ## used for challenges in the goal oriented tests
21 | return request.config.option.level
22 |
--------------------------------------------------------------------------------
/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pytest_mock import MockerFixture
3 |
4 | from autogpt.commands.file_operations import read_file, write_to_file
5 | from tests.integration.challenges.utils import run_interaction_loop, run_multiple_times
6 | from tests.utils import requires_api_key
7 |
8 | CYCLE_COUNT = 3
9 | from autogpt.agent import Agent
10 |
11 |
12 | @pytest.mark.skip("This challenge hasn't been beaten yet.")
13 | @pytest.mark.vcr
14 | @requires_api_key("OPENAI_API_KEY")
15 | @run_multiple_times(3)
16 | def test_information_retrieval_challenge_a(
17 | get_company_revenue_agent: Agent,
18 | monkeypatch: pytest.MonkeyPatch,
19 | patched_api_requestor: MockerFixture,
20 | ) -> None:
21 | """
22 | Test the challenge_a function in a given agent by mocking user inputs and checking the output file content.
23 |
24 | :param get_company_revenue_agent: The agent to test.
25 | :param monkeypatch: pytest's monkeypatch utility for modifying builtins.
26 | """
27 | run_interaction_loop(monkeypatch, get_company_revenue_agent, CYCLE_COUNT)
28 |
29 | file_path = str(get_company_revenue_agent.workspace.get_path("output.txt"))
30 | content = read_file(file_path)
31 | assert "81" in content, "Expected the file to contain 81"
32 |
--------------------------------------------------------------------------------
/tests/integration/challenges/kubernetes/test_kubernetes_template_challenge_a.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import yaml
3 |
4 | from autogpt.agent import Agent
5 | from autogpt.commands.file_operations import read_file
6 | from tests.integration.challenges.utils import run_interaction_loop, run_multiple_times
7 | from tests.utils import requires_api_key
8 |
9 | CYCLE_COUNT = 6
10 |
11 |
12 | @pytest.mark.skip("This challenge hasn't been beaten yet.")
13 | @pytest.mark.vcr
14 | @requires_api_key("OPENAI_API_KEY")
15 | @run_multiple_times(3)
16 | def test_kubernetes_template_challenge_a(
17 | kubernetes_agent: Agent, monkeypatch: pytest.MonkeyPatch
18 | ) -> None:
19 | """
20 | Test the challenge_a function in a given agent by mocking user inputs
21 | and checking the output file content.
22 |
23 | :param get_company_revenue_agent: The agent to test.
24 | :param monkeypatch: pytest's monkeypatch utility for modifying builtins.
25 | """
26 | run_interaction_loop(monkeypatch, kubernetes_agent, CYCLE_COUNT)
27 |
28 | file_path = str(kubernetes_agent.workspace.get_path("kube.yaml"))
29 | content = read_file(file_path)
30 |
31 | for word in ["apiVersion", "kind", "metadata", "spec"]:
32 | assert word in content, f"Expected the file to contain {word}"
33 |
34 | content = yaml.safe_load(content)
35 | for word in ["Service", "Deployment", "Pod"]:
36 | assert word in content["kind"], f"Expected the file to contain {word}"
37 |
--------------------------------------------------------------------------------
/tests/integration/challenges/memory/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/tests/integration/challenges/memory/__init__.py
--------------------------------------------------------------------------------
/tests/integration/challenges/memory/test_memory_challenge_a.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pytest_mock import MockerFixture
3 |
4 | from autogpt.agent import Agent
5 | from autogpt.commands.file_operations import read_file, write_to_file
6 | from tests.integration.challenges.utils import get_level_to_run, run_interaction_loop
7 | from tests.utils import requires_api_key
8 |
9 | LEVEL_CURRENTLY_BEATEN = 3 # real level beaten 30 and maybe more, but we can't record it, the cassette is too big
10 | MAX_LEVEL = 3
11 |
12 |
13 | @pytest.mark.vcr
14 | @requires_api_key("OPENAI_API_KEY")
15 | def test_memory_challenge_a(
16 | memory_management_agent: Agent,
17 | user_selected_level: int,
18 | patched_api_requestor: MockerFixture,
19 | monkeypatch: pytest.MonkeyPatch,
20 | ) -> None:
21 | """
22 | The agent reads a file containing a task_id. Then, it reads a series of other files.
23 | After reading 'n' files, the agent must write the task_id into a new file.
24 |
25 | Args:
26 | memory_management_agent (Agent)
27 | user_selected_level (int)
28 | """
29 |
30 | num_files = get_level_to_run(user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL)
31 |
32 | task_id = "2314"
33 | create_instructions_files(memory_management_agent, num_files, task_id)
34 |
35 | run_interaction_loop(monkeypatch, memory_management_agent, num_files + 2)
36 |
37 | file_path = str(memory_management_agent.workspace.get_path("output.txt"))
38 | content = read_file(file_path)
39 | assert task_id in content, f"Expected the file to contain {task_id}"
40 |
41 |
42 | def create_instructions_files(
43 | memory_management_agent: Agent,
44 | num_files: int,
45 | task_id: str,
46 | base_filename: str = "instructions_",
47 | ) -> None:
48 | """
49 | Creates a series of instructions files for the memory challenge.
50 | Args:
51 | memory_management_agent (Agent)
52 | num_files (int)
53 | task_id (str)
54 | base_filename (str, optional)
55 | """
56 | for i in range(1, num_files + 1):
57 | content = generate_content(i, task_id, base_filename, num_files)
58 | file_name = f"{base_filename}{i}.txt"
59 | file_path = str(memory_management_agent.workspace.get_path(file_name))
60 | write_to_file(file_path, content)
61 |
62 |
63 | def generate_content(
64 | index: int, task_id: str, base_filename: str, num_files: int
65 | ) -> str:
66 | """
67 | Args:
68 | index: int
69 | task_id: str
70 | base_filename: str
71 | num_files: int
72 |
73 | Returns: str
74 | """
75 | if index == 1:
76 | return (
77 | f"This task_id is {task_id}\nRead the file {base_filename}{index + 1}.txt"
78 | )
79 | if index != num_files:
80 | return f"Read the file {base_filename}{index + 1}.txt"
81 | return "Write the task_id into the file output.txt\nShutdown"
82 |
--------------------------------------------------------------------------------
/tests/integration/challenges/memory/test_memory_challenge_b.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pytest_mock import MockerFixture
3 |
4 | from autogpt.agent import Agent
5 | from autogpt.commands.file_operations import read_file, write_to_file
6 | from tests.integration.challenges.utils import (
7 | generate_noise,
8 | get_level_to_run,
9 | run_interaction_loop,
10 | )
11 | from tests.utils import requires_api_key
12 |
13 | LEVEL_CURRENTLY_BEATEN = -1
14 | MAX_LEVEL = 5
15 | NOISE = 1000
16 |
17 |
18 | @pytest.mark.vcr
19 | @requires_api_key("OPENAI_API_KEY")
20 | def test_memory_challenge_b(
21 | memory_management_agent: Agent,
22 | user_selected_level: int,
23 | patched_api_requestor: MockerFixture,
24 | monkeypatch: pytest.MonkeyPatch,
25 | ) -> None:
26 | """
27 | The agent reads a series of files, each containing a task_id and noise. After reading 'n' files,
28 | the agent must write all the task_ids into a new file, filtering out the noise.
29 |
30 | Args:
31 | memory_management_agent (Agent)
32 | user_selected_level (int)
33 | """
34 | current_level = get_level_to_run(
35 | user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL
36 | )
37 | task_ids = [str(i * 1111) for i in range(1, current_level + 1)]
38 | create_instructions_files(memory_management_agent, current_level, task_ids)
39 |
40 | run_interaction_loop(monkeypatch, memory_management_agent, current_level + 2)
41 |
42 | file_path = str(memory_management_agent.workspace.get_path("output.txt"))
43 | content = read_file(file_path)
44 | for task_id in task_ids:
45 | assert task_id in content, f"Expected the file to contain {task_id}"
46 |
47 |
48 | def create_instructions_files(
49 | memory_management_agent: Agent,
50 | level: int,
51 | task_ids: list,
52 | base_filename: str = "instructions_",
53 | ) -> None:
54 | """
55 | Creates a series of instructions files for the memory challenge.
56 | Args:
57 | level:
58 | memory_management_agent (Agent)
59 | num_files (int)
60 | task_ids (list)
61 | base_filename (str, optional)
62 | """
63 | for i in range(1, level + 1):
64 | content = generate_content(i, task_ids, base_filename, level)
65 | file_name = f"{base_filename}{i}.txt"
66 | file_path = str(memory_management_agent.workspace.get_path(file_name))
67 | write_to_file(file_path, content)
68 |
69 |
70 | def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str:
71 | """
72 | Args:
73 | index: int
74 | task_ids: list
75 | base_filename: str
76 | num_files: int
77 |
78 | Returns: str
79 | """
80 | task_id = task_ids[index - 1]
81 | noise = generate_noise(NOISE)
82 | if index != level:
83 | if level == 1:
84 | return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command."
85 | return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nRead the file {base_filename}{index + 1}.txt using the read_file command."
86 | return f"{noise}\nThis task_id is {task_id}\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command.\n"
87 |
--------------------------------------------------------------------------------
/tests/integration/challenges/pr_review/base.py:
--------------------------------------------------------------------------------
1 | import os
2 | from datetime import datetime
3 |
4 | from github import Github
5 |
6 | from tests.integration.agent_factory import get_pr_review_agent
7 | from tests.integration.challenges.utils import run_interaction_loop
8 |
9 | PR_TARGET_BRANCH = "hackathon-pr-target"
10 | PR_TARGET_REPO_USER = "merwanehamadi"
11 | REPO_NAME = "Auto-GPT"
12 | PR_TARGET_REPO = f"{PR_TARGET_REPO_USER}/{REPO_NAME}"
13 | GITHUB_TOKEN = os.environ.get("GITHUB_PAT")
14 |
15 |
16 | def create_pr(
17 | source_branch_name,
18 | source_repo_user,
19 | title,
20 | body
21 | ):
22 | # First create a Github instance with your token:
23 |
24 | g = Github(GITHUB_TOKEN)
25 |
26 | # Then get your repository:
27 | repo = g.get_user(source_repo_user).get_repo(REPO_NAME)
28 |
29 | # Get the branch you want to copy
30 |
31 | base_branch = repo.get_branch(source_branch_name)
32 |
33 | # Create the name for the new branch
34 | timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
35 | new_branch_name = f"{source_branch_name}-{timestamp}"
36 |
37 | # Create the new branch
38 | repo.create_git_ref(ref=f"refs/heads/{new_branch_name}", sha=base_branch.commit.sha)
39 | title = f"{os.environ.get('TEAM_MEMBER_NAME')} {title}"
40 | # Create a new pull request
41 | pr = repo.create_pull(
42 | title=title,
43 | body=body,
44 | head=new_branch_name,
45 | base=PR_TARGET_BRANCH,
46 | )
47 |
48 | return pr.number
49 |
50 |
51 | def check_pr(pr_number, parameters):
52 | # First create a Github instance with your token:
53 | g = Github(GITHUB_TOKEN)
54 |
55 | # Get the repository
56 | repo = g.get_user(PR_TARGET_REPO_USER).get_repo(REPO_NAME)
57 |
58 | # Get the pull request
59 | pr = repo.get_pull(pr_number)
60 |
61 | # Count approvals
62 | approvals = 0
63 |
64 | # Get reviews for the pull request
65 | for review in pr.get_reviews():
66 | # Check if the review is an approval
67 | if review.state == "APPROVED":
68 | approvals += 1
69 |
70 | print(
71 | f"The PR number {pr_number} in the repository {PR_TARGET_REPO} has {approvals} approvals."
72 | )
73 | if parameters.approved:
74 | assert approvals > 0
75 | else:
76 | assert approvals == 0
77 |
78 |
79 | def run_tests(parameters, monkeypatch, workspace):
80 | pr_number = create_pr(
81 | parameters.source_branch_name,
82 | parameters.source_repo_user,
83 | parameters.title,
84 | parameters.body,
85 | )
86 | review_agent = get_pr_review_agent(pr_number, PR_TARGET_REPO, workspace)
87 | run_interaction_loop(monkeypatch, review_agent, parameters.cycle_count)
88 | check_pr(pr_number, parameters)
89 |
--------------------------------------------------------------------------------
/tests/integration/challenges/pr_review/test_basic_pr_review.py:
--------------------------------------------------------------------------------
1 | from types import SimpleNamespace
2 |
3 | import pytest
4 |
5 | from autogpt.workspace import Workspace
6 | from tests.integration.challenges.pr_review.base import run_tests
7 | from tests.utils import requires_api_key
8 |
9 | PR_LINK = "https://github.com/merwanehamadi/Auto-GPT/pull/116"
10 | PARAMETERS = SimpleNamespace(
11 | source_branch_name="useless-comment",
12 | source_repo_user="merwanehamadi",
13 |
14 | # PR information
15 | title="Useless comment",
16 | body="Useless comment",
17 | # time allowed to run
18 | cycle_count=3,
19 | # PR success criteria
20 | approved=False,
21 | contains={"bad_variable_name.py": ["variable"]},
22 | )
23 |
24 |
25 | @requires_api_key("OPENAI_API_KEY")
26 | def test_basic_pr_review(monkeypatch: pytest.MonkeyPatch, workspace: Workspace) -> None:
27 | run_tests(PARAMETERS, monkeypatch, workspace)
28 |
--------------------------------------------------------------------------------
/tests/integration/challenges/pr_review/test_basic_pr_review_variable.py:
--------------------------------------------------------------------------------
1 | from types import SimpleNamespace
2 | import pytest
3 | from autogpt.workspace import Workspace
4 | from tests.integration.challenges.pr_review.base import run_tests
5 | from tests.utils import requires_api_key
6 | PR_LINK = "https://github.com/merwanehamadi/Auto-GPT/pull/116"
7 | PARAMETERS = SimpleNamespace(
8 | cycle_count=6,
9 | pr_target_repo_user="merwanehamadi",
10 | pr_target_repo_name="Auto-GPT",
11 | source_branch_name="useless-comment",
12 | title="Useless comment",
13 | body="Useless comment",
14 | approved=True,
15 | contains = {'bad_variables.py': 'not used'}
16 | )
17 |
18 | @requires_api_key("OPENAI_API_KEY")
19 | def test_basic_pr_review(
20 | monkeypatch: pytest.MonkeyPatch, workspace: Workspace
21 | ) -> None:
22 | run_tests(PARAMETERS, monkeypatch, workspace)
23 |
--------------------------------------------------------------------------------
/tests/integration/challenges/test_challenge_should_be_formatted_properly.py:
--------------------------------------------------------------------------------
1 | import importlib.util
2 | import inspect
3 | import os
4 | from types import ModuleType
5 | from typing import List
6 |
7 | # Path to the challenges folder
8 | CHALLENGES_DIR = os.path.join(
9 | os.path.dirname(os.path.realpath(__file__)), "../challenges"
10 | )
11 |
12 |
13 | def get_python_files(directory: str, exclude_file: str) -> List[str]:
14 | """Recursively get all python files in a directory and subdirectories."""
15 | python_files: List[str] = []
16 | for root, dirs, files in os.walk(directory):
17 | for file in files:
18 | if (
19 | file.endswith(".py")
20 | and file.startswith("test_")
21 | and file != exclude_file
22 | ):
23 | python_files.append(os.path.join(root, file))
24 | return python_files
25 |
26 |
27 | def load_module_from_file(test_file: str) -> ModuleType:
28 | spec = importlib.util.spec_from_file_location("module.name", test_file)
29 | assert spec is not None, f"Unable to get spec for module in file {test_file}"
30 | module = importlib.util.module_from_spec(spec)
31 | assert (
32 | spec.loader is not None
33 | ), f"Unable to get loader for module in file {test_file}"
34 | spec.loader.exec_module(module)
35 | return module
36 |
37 |
38 | def get_test_functions(module: ModuleType) -> List:
39 | return [
40 | o
41 | for o in inspect.getmembers(module)
42 | if inspect.isfunction(o[1]) and o[0].startswith("test_")
43 | ]
44 |
45 |
46 | def assert_single_test_function(functions_list: List, test_file: str) -> None:
47 | assert len(functions_list) == 1, f"{test_file} should contain only one function"
48 | assert (
49 | functions_list[0][0][5:] == os.path.basename(test_file)[5:-3]
50 | ), f"The function in {test_file} should have the same name as the file without 'test_' prefix"
51 |
52 |
53 | def test_method_name_and_count() -> None:
54 | current_file: str = os.path.basename(__file__)
55 | test_files: List[str] = get_python_files(CHALLENGES_DIR, current_file)
56 | for test_file in test_files:
57 | module = load_module_from_file(test_file)
58 | functions_list = get_test_functions(module)
59 | assert_single_test_function(functions_list, test_file)
60 |
--------------------------------------------------------------------------------
/tests/integration/challenges/utils.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import random
3 | from functools import wraps
4 | from typing import Any, Callable, Dict, Generator, Optional, Tuple
5 |
6 | import pytest
7 |
8 | from autogpt.agent import Agent
9 |
10 |
11 | def get_level_to_run(
12 | user_selected_level: int,
13 | level_currently_beaten: int,
14 | max_level: int,
15 | ) -> int:
16 | """
17 | Determines the appropriate level to run for a challenge, based on user-selected level, level currently beaten, and maximum level.
18 |
19 | Args:
20 | user_selected_level (int | None): The level selected by the user. If not provided, the level currently beaten is used.
21 | level_currently_beaten (int | None): The highest level beaten so far. If not provided, the test will be skipped.
22 | max_level (int): The maximum level allowed for the challenge.
23 |
24 | Returns:
25 | int: The level to run for the challenge.
26 |
27 | Raises:
28 | ValueError: If the user-selected level is greater than the maximum level allowed.
29 | """
30 | if user_selected_level is None:
31 | if level_currently_beaten == -1:
32 | pytest.skip(
33 | "No one has beaten any levels so we cannot run the test in our pipeline"
34 | )
35 | # by default we run the level currently beaten.
36 | return level_currently_beaten
37 | if user_selected_level > max_level:
38 | raise ValueError(f"This challenge was not designed to go beyond {max_level}")
39 | return user_selected_level
40 |
41 |
42 | def generate_noise(noise_size: int) -> str:
43 | return "".join(
44 | random.choices(
45 | "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
46 | k=noise_size,
47 | )
48 | )
49 |
50 |
51 | def run_multiple_times(times: int) -> Callable:
52 | """
53 | Decorator that runs a test function multiple times.
54 |
55 | :param times: The number of times the test function should be executed.
56 | """
57 |
58 | def decorator(test_func: Callable[..., Any]) -> Callable[..., Any]:
59 | @wraps(test_func)
60 | def wrapper(*args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> None:
61 | for _ in range(times):
62 | test_func(*args, **kwargs)
63 |
64 | return wrapper
65 |
66 | return decorator
67 |
68 |
69 | def setup_mock_input(monkeypatch: pytest.MonkeyPatch, cycle_count: int) -> None:
70 | """
71 | Sets up the mock input for testing.
72 |
73 | :param monkeypatch: pytest's monkeypatch utility for modifying builtins.
74 | :param cycle_count: The number of cycles to mock.
75 | """
76 | input_sequence = ["y"] * (cycle_count) + ["EXIT"]
77 |
78 | def input_generator() -> Generator[str, None, None]:
79 | """
80 | Creates a generator that yields input strings from the given sequence.
81 | """
82 | yield from input_sequence
83 |
84 | gen = input_generator()
85 | monkeypatch.setattr("builtins.input", lambda _: next(gen))
86 |
87 |
88 | def run_interaction_loop(
89 | monkeypatch: pytest.MonkeyPatch, agent: Agent, cycle_count: int
90 | ) -> None:
91 | setup_mock_input(monkeypatch, cycle_count)
92 | with contextlib.suppress(SystemExit):
93 | agent.start_interaction_loop()
94 |
--------------------------------------------------------------------------------
/tests/integration/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import openai
4 | import pytest
5 |
6 | from tests.conftest import PROXY
7 | from tests.vcr.vcr_filter import before_record_request, before_record_response
8 |
9 |
10 | @pytest.fixture(scope="session")
11 | def vcr_config():
12 | # this fixture is called by the pytest-recording vcr decorator.
13 | return {
14 | "record_mode": "new_episodes",
15 | "before_record_request": before_record_request,
16 | "before_record_response": before_record_response,
17 | "filter_headers": [
18 | "Authorization",
19 | "X-OpenAI-Client-User-Agent",
20 | "User-Agent",
21 | ],
22 | "match_on": ["method", "body"],
23 | }
24 |
25 |
26 | def patch_api_base(requestor):
27 | new_api_base = f"{PROXY}/v1"
28 | requestor.api_base = new_api_base
29 | return requestor
30 |
31 |
32 | @pytest.fixture
33 | def patched_api_requestor(mocker):
34 | original_init = openai.api_requestor.APIRequestor.__init__
35 | original_validate_headers = openai.api_requestor.APIRequestor._validate_headers
36 |
37 | def patched_init(requestor, *args, **kwargs):
38 | original_init(requestor, *args, **kwargs)
39 | patch_api_base(requestor)
40 |
41 | def patched_validate_headers(self, supplied_headers):
42 | headers = original_validate_headers(self, supplied_headers)
43 | headers["AGENT-MODE"] = os.environ.get("AGENT_MODE")
44 | headers["AGENT-TYPE"] = os.environ.get("AGENT_TYPE")
45 | return headers
46 |
47 | if PROXY:
48 | mocker.patch("openai.api_requestor.APIRequestor.__init__", new=patched_init)
49 | mocker.patch.object(
50 | openai.api_requestor.APIRequestor,
51 | "_validate_headers",
52 | new=patched_validate_headers,
53 | )
54 |
55 | return mocker
56 |
--------------------------------------------------------------------------------
/tests/integration/goal_oriented/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/tests/integration/goal_oriented/__init__.py
--------------------------------------------------------------------------------
/tests/integration/memory_tests.py:
--------------------------------------------------------------------------------
1 | import random
2 | import string
3 | import sys
4 | import unittest
5 | from pathlib import Path
6 |
7 | from autogpt.config import Config
8 | from autogpt.memory.local import LocalCache
9 |
10 |
11 | class TestLocalCache(unittest.TestCase):
12 | def generate_random_string(self, length):
13 | return "".join(random.choice(string.ascii_letters) for _ in range(length))
14 |
15 | def setUp(self):
16 | """Set up the test environment for the LocalCache tests."""
17 | cfg = cfg = Config()
18 | self.cache = LocalCache(cfg)
19 | self.cache.clear()
20 |
21 | # Add example texts to the cache
22 | self.example_texts = [
23 | "The quick brown fox jumps over the lazy dog",
24 | "I love machine learning and natural language processing",
25 | "The cake is a lie, but the pie is always true",
26 | "ChatGPT is an advanced AI model for conversation",
27 | ]
28 | for text in self.example_texts:
29 | self.cache.add(text)
30 |
31 | # Add some random strings to test noise
32 | for _ in range(5):
33 | self.cache.add(self.generate_random_string(10))
34 |
35 | def test_get_relevant(self):
36 | """Test getting relevant texts from the cache."""
37 | query = "I'm interested in artificial intelligence and NLP"
38 | k = 3
39 | relevant_texts = self.cache.get_relevant(query, k)
40 |
41 | print(f"Top {k} relevant texts for the query '{query}':")
42 | for i, text in enumerate(relevant_texts, start=1):
43 | print(f"{i}. {text}")
44 |
45 | self.assertEqual(len(relevant_texts), k)
46 | self.assertIn(self.example_texts[1], relevant_texts)
47 |
48 |
49 | if __name__ == "__main__":
50 | unittest.main()
51 |
--------------------------------------------------------------------------------
/tests/integration/milvus_memory_tests.py:
--------------------------------------------------------------------------------
1 | # sourcery skip: snake-case-functions
2 | """Tests for the MilvusMemory class."""
3 | import random
4 | import string
5 | import unittest
6 |
7 | from autogpt.config import Config
8 | from autogpt.memory.milvus import MilvusMemory
9 |
10 | try:
11 |
12 | class TestMilvusMemory(unittest.TestCase):
13 | """Unit tests for the MilvusMemory class."""
14 |
15 | def generate_random_string(self, length: int) -> str:
16 | return "".join(random.choice(string.ascii_letters) for _ in range(length))
17 |
18 | def setUp(self) -> None:
19 | cfg = Config()
20 | cfg.milvus_addr = "localhost:19530"
21 | self.memory = MilvusMemory(cfg)
22 | self.memory.clear()
23 |
24 | # Add example texts to the cache
25 | self.example_texts = [
26 | "The quick brown fox jumps over the lazy dog",
27 | "I love machine learning and natural language processing",
28 | "The cake is a lie, but the pie is always true",
29 | "ChatGPT is an advanced AI model for conversation",
30 | ]
31 |
32 | for text in self.example_texts:
33 | self.memory.add(text)
34 |
35 | # Add some random strings to test noise
36 | for _ in range(5):
37 | self.memory.add(self.generate_random_string(10))
38 |
39 | def test_get_relevant(self) -> None:
40 | """Test getting relevant texts from the cache."""
41 | query = "I'm interested in artificial intelligence and NLP"
42 | num_relevant = 3
43 | relevant_texts = self.memory.get_relevant(query, num_relevant)
44 |
45 | print(f"Top {k} relevant texts for the query '{query}':")
46 | for i, text in enumerate(relevant_texts, start=1):
47 | print(f"{i}. {text}")
48 |
49 | self.assertEqual(len(relevant_texts), k)
50 | self.assertIn(self.example_texts[1], relevant_texts)
51 |
52 | except:
53 | print(
54 | "Skipping tests/integration/milvus_memory_tests.py as Milvus is not installed."
55 | )
56 |
--------------------------------------------------------------------------------
/tests/integration/test_commands.py:
--------------------------------------------------------------------------------
1 | """Unit tests for the commands module"""
2 | from unittest.mock import MagicMock, patch
3 |
4 | import pytest
5 |
6 | from autogpt.app import list_agents, start_agent
7 | from tests.utils import requires_api_key
8 |
9 |
10 | @pytest.mark.vcr
11 | @pytest.mark.integration_test
12 | @requires_api_key("OPENAI_API_KEY")
13 | def test_make_agent(patched_api_requestor) -> None:
14 | """Test that an agent can be created"""
15 | # Use the mock agent manager to avoid creating a real agent
16 | with patch("openai.ChatCompletion.create") as mock:
17 | response = MagicMock()
18 | # del response.error
19 | response.choices[0].messages[0].content = "Test message"
20 | response.usage.prompt_tokens = 1
21 | response.usage.completion_tokens = 1
22 | mock.return_value = response
23 | start_agent("Test Agent", "chat", "Hello, how are you?", "gpt-3.5-turbo")
24 | agents = list_agents()
25 | assert "List of agents:\n0: chat" == agents
26 | start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt-3.5-turbo")
27 | agents = list_agents()
28 | assert "List of agents:\n0: chat\n1: write" == agents
29 |
--------------------------------------------------------------------------------
/tests/integration/test_execute_code.py:
--------------------------------------------------------------------------------
1 | import random
2 | import string
3 | import tempfile
4 |
5 | import pytest
6 | from pytest_mock import MockerFixture
7 |
8 | import autogpt.commands.execute_code as sut # system under testing
9 | from autogpt.config import Config
10 |
11 |
12 | @pytest.fixture
13 | def config_allow_execute(config: Config, mocker: MockerFixture):
14 | yield mocker.patch.object(config, "execute_local_commands", True)
15 |
16 |
17 | @pytest.fixture
18 | def python_test_file(config: Config, random_string):
19 | temp_file = tempfile.NamedTemporaryFile(dir=config.workspace_path, suffix=".py")
20 | temp_file.write(str.encode(f"print('Hello {random_string}!')"))
21 | temp_file.flush()
22 |
23 | yield temp_file.name
24 | temp_file.close()
25 |
26 |
27 | @pytest.fixture
28 | def random_string():
29 | return "".join(random.choice(string.ascii_lowercase) for _ in range(10))
30 |
31 |
32 | def test_execute_python_file(python_test_file: str, random_string: str):
33 | result = sut.execute_python_file(python_test_file)
34 | assert result == f"Hello {random_string}!\n"
35 |
36 |
37 | def test_execute_python_file_invalid():
38 | assert all(
39 | s in sut.execute_python_file("not_python").lower()
40 | for s in ["error:", "invalid", ".py"]
41 | )
42 | assert all(
43 | s in sut.execute_python_file("notexist.py").lower()
44 | for s in ["error:", "does not exist"]
45 | )
46 |
47 |
48 | def test_execute_shell(config_allow_execute, random_string):
49 | result = sut.execute_shell(f"echo 'Hello {random_string}!'")
50 | assert f"Hello {random_string}!" in result
51 |
--------------------------------------------------------------------------------
/tests/integration/test_git_commands.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from git.exc import GitCommandError
3 | from git.repo.base import Repo
4 |
5 | from autogpt.commands.git_operations import clone_repository
6 |
7 |
8 | @pytest.fixture
9 | def mock_clone_from(mocker):
10 | return mocker.patch.object(Repo, "clone_from")
11 |
12 |
13 | def test_clone_auto_gpt_repository(workspace, mock_clone_from, config):
14 | mock_clone_from.return_value = None
15 |
16 | repo = "github.com/Significant-Gravitas/Auto-GPT.git"
17 | scheme = "https://"
18 | url = scheme + repo
19 | clone_path = str(workspace.get_path("auto-gpt-repo"))
20 |
21 | expected_output = f"Cloned {url} to {clone_path}"
22 |
23 | clone_result = clone_repository(url=url, clone_path=clone_path)
24 |
25 | assert clone_result == expected_output
26 | mock_clone_from.assert_called_once_with(
27 | url=f"{scheme}{config.github_username}:{config.github_api_key}@{repo}",
28 | to_path=clone_path,
29 | )
30 |
31 |
32 | def test_clone_repository_error(workspace, mock_clone_from):
33 | url = "https://github.com/this-repository/does-not-exist.git"
34 | clone_path = str(workspace.get_path("does-not-exist"))
35 |
36 | mock_clone_from.side_effect = GitCommandError(
37 | "clone", "fatal: repository not found", ""
38 | )
39 |
40 | result = clone_repository(url=url, clone_path=clone_path)
41 |
42 | assert "Error: " in result
43 |
--------------------------------------------------------------------------------
/tests/integration/test_llm_utils.py:
--------------------------------------------------------------------------------
1 | import string
2 | from unittest.mock import MagicMock
3 |
4 | import pytest
5 | from numpy.random import RandomState
6 | from pytest_mock import MockerFixture
7 |
8 | from autogpt.config import Config
9 | from autogpt.llm import llm_utils
10 | from autogpt.llm.api_manager import ApiManager
11 | from autogpt.llm.modelsinfo import COSTS
12 | from tests.utils import requires_api_key
13 |
14 |
15 | @pytest.fixture(scope="session")
16 | def random_large_string():
17 | """Big string used to overwhelm token limits."""
18 | seed = 42
19 | n_characters = 30_000
20 | random = RandomState(seed)
21 | return "".join(random.choice(list(string.ascii_lowercase), size=n_characters))
22 |
23 |
24 | @pytest.fixture()
25 | def api_manager(mocker: MockerFixture):
26 | api_manager = ApiManager()
27 | mocker.patch.multiple(
28 | api_manager,
29 | total_prompt_tokens=0,
30 | total_completion_tokens=0,
31 | total_cost=0,
32 | )
33 | yield api_manager
34 |
35 |
36 | @pytest.fixture()
37 | def spy_create_embedding(mocker: MockerFixture):
38 | return mocker.spy(llm_utils, "create_embedding")
39 |
40 |
41 | @pytest.mark.vcr
42 | @requires_api_key("OPENAI_API_KEY")
43 | def test_get_ada_embedding(
44 | config: Config,
45 | api_manager: ApiManager,
46 | spy_create_embedding: MagicMock,
47 | patched_api_requestor,
48 | ):
49 | token_cost = COSTS[config.embedding_model]["prompt"]
50 | llm_utils.get_ada_embedding("test")
51 |
52 | spy_create_embedding.assert_called_once_with("test", model=config.embedding_model)
53 |
54 | assert (prompt_tokens := api_manager.get_total_prompt_tokens()) == 1
55 | assert api_manager.get_total_completion_tokens() == 0
56 | assert api_manager.get_total_cost() == (prompt_tokens * token_cost) / 1000
57 |
58 |
59 | @pytest.mark.vcr
60 | @requires_api_key("OPENAI_API_KEY")
61 | def test_get_ada_embedding_large_context(random_large_string):
62 | # This test should be able to mock the openai call after we have a fix. We don't need
63 | # to hit the API to test the logic of the function (so not using vcr). This is a quick
64 | # regression test to document the issue.
65 | llm_utils.get_ada_embedding(random_large_string)
66 |
--------------------------------------------------------------------------------
/tests/integration/test_local_cache.py:
--------------------------------------------------------------------------------
1 | # sourcery skip: snake-case-functions
2 | """Tests for LocalCache class"""
3 | import unittest
4 |
5 | import orjson
6 | import pytest
7 |
8 | from autogpt.memory.local import EMBED_DIM, SAVE_OPTIONS
9 | from autogpt.memory.local import LocalCache as LocalCache_
10 | from tests.utils import requires_api_key
11 |
12 |
13 | @pytest.fixture
14 | def LocalCache():
15 | # Hack, real gross. Singletons are not good times.
16 | if LocalCache_ in LocalCache_._instances:
17 | del LocalCache_._instances[LocalCache_]
18 | return LocalCache_
19 |
20 |
21 | @pytest.fixture
22 | def mock_embed_with_ada(mocker):
23 | mocker.patch(
24 | "autogpt.memory.local.get_ada_embedding",
25 | return_value=[0.1] * EMBED_DIM,
26 | )
27 |
28 |
29 | def test_init_without_backing_file(LocalCache, config, workspace):
30 | cache_file = workspace.root / f"{config.memory_index}.json"
31 |
32 | assert not cache_file.exists()
33 | LocalCache(config)
34 | assert cache_file.exists()
35 | assert cache_file.read_text() == "{}"
36 |
37 |
38 | def test_init_with_backing_empty_file(LocalCache, config, workspace):
39 | cache_file = workspace.root / f"{config.memory_index}.json"
40 | cache_file.touch()
41 |
42 | assert cache_file.exists()
43 | LocalCache(config)
44 | assert cache_file.exists()
45 | assert cache_file.read_text() == "{}"
46 |
47 |
48 | def test_init_with_backing_file(LocalCache, config, workspace):
49 | cache_file = workspace.root / f"{config.memory_index}.json"
50 | cache_file.touch()
51 |
52 | raw_data = {"texts": ["test"]}
53 | data = orjson.dumps(raw_data, option=SAVE_OPTIONS)
54 | with cache_file.open("wb") as f:
55 | f.write(data)
56 |
57 | assert cache_file.exists()
58 | LocalCache(config)
59 | assert cache_file.exists()
60 | assert cache_file.read_text() == "{}"
61 |
62 |
63 | def test_add(LocalCache, config, mock_embed_with_ada):
64 | cache = LocalCache(config)
65 | cache.add("test")
66 | assert cache.data.texts == ["test"]
67 | assert cache.data.embeddings.shape == (1, EMBED_DIM)
68 |
69 |
70 | def test_clear(LocalCache, config, mock_embed_with_ada):
71 | cache = LocalCache(config)
72 | assert cache.data.texts == []
73 | assert cache.data.embeddings.shape == (0, EMBED_DIM)
74 |
75 | cache.add("test")
76 | assert cache.data.texts == ["test"]
77 | assert cache.data.embeddings.shape == (1, EMBED_DIM)
78 |
79 | cache.clear()
80 | assert cache.data.texts == []
81 | assert cache.data.embeddings.shape == (0, EMBED_DIM)
82 |
83 |
84 | def test_get(LocalCache, config, mock_embed_with_ada):
85 | cache = LocalCache(config)
86 | assert cache.get("test") == []
87 |
88 | cache.add("test")
89 | assert cache.get("test") == ["test"]
90 |
91 |
92 | @pytest.mark.vcr
93 | @requires_api_key("OPENAI_API_KEY")
94 | def test_get_relevant(LocalCache, config, patched_api_requestor) -> None:
95 | cache = LocalCache(config)
96 | text1 = "Sample text 1"
97 | text2 = "Sample text 2"
98 | cache.add(text1)
99 | cache.add(text2)
100 |
101 | result = cache.get_relevant(text1, 1)
102 | assert result == [text1]
103 |
104 |
105 | def test_get_stats(LocalCache, config, mock_embed_with_ada) -> None:
106 | cache = LocalCache(config)
107 | text = "Sample text"
108 | cache.add(text)
109 | stats = cache.get_stats()
110 | assert stats == (1, cache.data.embeddings.shape)
111 |
--------------------------------------------------------------------------------
/tests/integration/test_memory_management.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import pytest
4 |
5 | from autogpt.config import Config
6 | from autogpt.memory import get_memory
7 | from autogpt.memory_management.store_memory import (
8 | save_memory_trimmed_from_context_window,
9 | )
10 | from tests.utils import requires_api_key
11 |
12 |
13 | @pytest.fixture
14 | def message_history_fixture():
15 | assistant_reply = {
16 | "thoughts": {
17 | "text": "thoughts",
18 | "reasoning": "reasoning",
19 | "plan": "plan",
20 | "criticism": "criticism",
21 | "speak": "speak",
22 | },
23 | "command": {"name": "google", "args": {"query": "google_query"}},
24 | }
25 | return [
26 | {"content": json.dumps(assistant_reply, indent=4)},
27 | {"content": "Command Result: Important Information."},
28 | ]
29 |
30 |
31 | @pytest.fixture
32 | def expected_permanent_memory() -> str:
33 | return """Assistant Reply: {
34 | "thoughts": {
35 | "text": "thoughts",
36 | "reasoning": "reasoning",
37 | "plan": "plan",
38 | "criticism": "criticism",
39 | "speak": "speak"
40 | },
41 | "command": {
42 | "name": "google",
43 | "args": {
44 | "query": "google_query"
45 | }
46 | }
47 | }
48 | Result: None
49 | Human Feedback:Command Result: Important Information."""
50 |
51 |
52 | @requires_api_key("OPENAI_API_KEY")
53 | @pytest.mark.vcr
54 | def test_save_memory_trimmed_from_context_window(
55 | message_history_fixture,
56 | expected_permanent_memory,
57 | config: Config,
58 | patched_api_requestor,
59 | ):
60 | next_message_to_add_index = len(message_history_fixture) - 1
61 | memory = get_memory(config, init=True)
62 | save_memory_trimmed_from_context_window(
63 | message_history_fixture, next_message_to_add_index, memory
64 | )
65 |
66 | memory_found = memory.get_relevant("Important Information", 5)
67 | assert memory_found[0] == expected_permanent_memory
68 |
--------------------------------------------------------------------------------
/tests/integration/test_setup.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import patch
2 |
3 | import pytest
4 |
5 | from autogpt.config.ai_config import AIConfig
6 | from autogpt.setup import (
7 | generate_aiconfig_automatic,
8 | generate_aiconfig_manual,
9 | prompt_user,
10 | )
11 | from tests.utils import requires_api_key
12 |
13 |
14 | @pytest.mark.vcr
15 | @requires_api_key("OPENAI_API_KEY")
16 | def test_generate_aiconfig_automatic_default(patched_api_requestor):
17 | user_inputs = [""]
18 | with patch("builtins.input", side_effect=user_inputs):
19 | ai_config = prompt_user()
20 |
21 | assert isinstance(ai_config, AIConfig)
22 | assert ai_config.ai_name is not None
23 | assert ai_config.ai_role is not None
24 | assert 1 <= len(ai_config.ai_goals) <= 5
25 |
26 |
27 | @pytest.mark.vcr
28 | @requires_api_key("OPENAI_API_KEY")
29 | def test_generate_aiconfig_automatic_typical(patched_api_requestor):
30 | user_prompt = "Help me create a rock opera about cybernetic giraffes"
31 | ai_config = generate_aiconfig_automatic(user_prompt)
32 |
33 | assert isinstance(ai_config, AIConfig)
34 | assert ai_config.ai_name is not None
35 | assert ai_config.ai_role is not None
36 | assert 1 <= len(ai_config.ai_goals) <= 5
37 |
38 |
39 | @pytest.mark.vcr
40 | @requires_api_key("OPENAI_API_KEY")
41 | def test_generate_aiconfig_automatic_fallback(patched_api_requestor):
42 | user_inputs = [
43 | "T&GF£OIBECC()!*",
44 | "Chef-GPT",
45 | "an AI designed to browse bake a cake.",
46 | "Purchase ingredients",
47 | "Bake a cake",
48 | "",
49 | "",
50 | ]
51 | with patch("builtins.input", side_effect=user_inputs):
52 | ai_config = prompt_user()
53 |
54 | assert isinstance(ai_config, AIConfig)
55 | assert ai_config.ai_name == "Chef-GPT"
56 | assert ai_config.ai_role == "an AI designed to browse bake a cake."
57 | assert ai_config.ai_goals == ["Purchase ingredients", "Bake a cake"]
58 |
59 |
60 | @pytest.mark.vcr
61 | @requires_api_key("OPENAI_API_KEY")
62 | def test_prompt_user_manual_mode(patched_api_requestor):
63 | user_inputs = [
64 | "--manual",
65 | "Chef-GPT",
66 | "an AI designed to browse bake a cake.",
67 | "Purchase ingredients",
68 | "Bake a cake",
69 | "",
70 | "",
71 | ]
72 | with patch("builtins.input", side_effect=user_inputs):
73 | ai_config = prompt_user()
74 |
75 | assert isinstance(ai_config, AIConfig)
76 | assert ai_config.ai_name == "Chef-GPT"
77 | assert ai_config.ai_role == "an AI designed to browse bake a cake."
78 | assert ai_config.ai_goals == ["Purchase ingredients", "Bake a cake"]
79 |
--------------------------------------------------------------------------------
/tests/milvus_memory_test.py:
--------------------------------------------------------------------------------
1 | # sourcery skip: snake-case-functions
2 | """Tests for the MilvusMemory class."""
3 | import os
4 | import sys
5 | import unittest
6 |
7 | try:
8 | from autogpt.config import Config
9 | from autogpt.memory.milvus import MilvusMemory
10 |
11 | def mock_config() -> Config:
12 | """Mock the config object for testing purposes."""
13 |
14 | # Return a mock config object with the required attributes
15 | class MockConfig(Config):
16 | debug_mode = False
17 | continuous_mode = False
18 | speak_mode = False
19 | milvus_collection = "autogpt"
20 | milvus_addr = "localhost:19530"
21 |
22 | return MockConfig()
23 |
24 | class TestMilvusMemory(unittest.TestCase):
25 | """Tests for the MilvusMemory class."""
26 |
27 | def setUp(self) -> None:
28 | """Set up the test environment"""
29 | self.cfg = mock_config()
30 | self.memory = MilvusMemory(self.cfg)
31 |
32 | def test_add(self) -> None:
33 | """Test adding a text to the cache"""
34 | text = "Sample text"
35 | self.memory.clear()
36 | self.memory.add(text)
37 | result = self.memory.get(text)
38 | self.assertEqual([text], result)
39 |
40 | def test_clear(self) -> None:
41 | """Test clearing the cache"""
42 | self.memory.clear()
43 | self.assertEqual(self.memory.collection.num_entities, 0)
44 |
45 | def test_get(self) -> None:
46 | """Test getting a text from the cache"""
47 | text = "Sample text"
48 | self.memory.clear()
49 | self.memory.add(text)
50 | result = self.memory.get(text)
51 | self.assertEqual(result, [text])
52 |
53 | def test_get_relevant(self) -> None:
54 | """Test getting relevant texts from the cache"""
55 | text1 = "Sample text 1"
56 | text2 = "Sample text 2"
57 | self.memory.clear()
58 | self.memory.add(text1)
59 | self.memory.add(text2)
60 | result = self.memory.get_relevant(text1, 1)
61 | self.assertEqual(result, [text1])
62 |
63 | def test_get_stats(self) -> None:
64 | """Test getting the cache stats"""
65 | text = "Sample text"
66 | self.memory.clear()
67 | self.memory.add(text)
68 | stats = self.memory.get_stats()
69 | self.assertEqual(15, len(stats))
70 |
71 | except ImportError as err:
72 | print(f"Skipping tests for MilvusMemory: {err}")
73 |
--------------------------------------------------------------------------------
/tests/mocks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/tests/mocks/__init__.py
--------------------------------------------------------------------------------
/tests/mocks/mock_commands.py:
--------------------------------------------------------------------------------
1 | from autogpt.commands.command import command
2 |
3 |
4 | @command("function_based", "Function-based test command")
5 | def function_based(arg1: int, arg2: str) -> str:
6 | """A function-based test command that returns a string with the two arguments separated by a dash."""
7 | return f"{arg1} - {arg2}"
8 |
--------------------------------------------------------------------------------
/tests/test_agent.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import MagicMock
2 |
3 | import pytest
4 |
5 | from autogpt.agent import Agent
6 | from autogpt.config import Config
7 |
8 |
9 | @pytest.fixture
10 | def agent():
11 | ai_name = "Test AI"
12 | memory = MagicMock()
13 | full_message_history = []
14 | next_action_count = 0
15 | command_registry = MagicMock()
16 | config = Config()
17 | system_prompt = "System prompt"
18 | triggering_prompt = "Triggering prompt"
19 | workspace_directory = "workspace_directory"
20 |
21 | agent = Agent(
22 | ai_name,
23 | memory,
24 | full_message_history,
25 | next_action_count,
26 | command_registry,
27 | config,
28 | system_prompt,
29 | triggering_prompt,
30 | workspace_directory,
31 | )
32 | return agent
33 |
34 |
35 | def test_agent_initialization(agent):
36 | assert agent.ai_name == "Test AI"
37 | assert agent.memory == agent.memory
38 | assert agent.full_message_history == []
39 | assert agent.next_action_count == 0
40 | assert agent.command_registry == agent.command_registry
41 | assert agent.config == agent.config
42 | assert agent.system_prompt == "System prompt"
43 | assert agent.triggering_prompt == "Triggering prompt"
44 |
45 |
46 | # More test methods can be added for specific agent interactions
47 | # For example, mocking chat_with_ai and testing the agent's interaction loop
48 |
--------------------------------------------------------------------------------
/tests/test_agent_manager.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from autogpt.agent.agent_manager import AgentManager
4 | from autogpt.llm import create_chat_completion
5 |
6 |
7 | @pytest.fixture
8 | def agent_manager():
9 | # Hack, real gross. Singletons are not good times.
10 | if AgentManager in AgentManager._instances:
11 | del AgentManager._instances[AgentManager]
12 | return AgentManager()
13 |
14 |
15 | @pytest.fixture
16 | def task():
17 | return "translate English to French"
18 |
19 |
20 | @pytest.fixture
21 | def prompt():
22 | return "Translate the following English text to French: 'Hello, how are you?'"
23 |
24 |
25 | @pytest.fixture
26 | def model():
27 | return "gpt-3.5-turbo"
28 |
29 |
30 | @pytest.fixture(autouse=True)
31 | def mock_create_chat_completion(mocker):
32 | mock_create_chat_completion = mocker.patch(
33 | "autogpt.agent.agent_manager.create_chat_completion",
34 | wraps=create_chat_completion,
35 | )
36 | mock_create_chat_completion.return_value = "irrelevant"
37 | return mock_create_chat_completion
38 |
39 |
40 | def test_create_agent(agent_manager, task, prompt, model):
41 | key, agent_reply = agent_manager.create_agent(task, prompt, model)
42 | assert isinstance(key, int)
43 | assert isinstance(agent_reply, str)
44 | assert key in agent_manager.agents
45 |
46 |
47 | def test_message_agent(agent_manager, task, prompt, model):
48 | key, _ = agent_manager.create_agent(task, prompt, model)
49 | user_message = "Please translate 'Good morning' to French."
50 | agent_reply = agent_manager.message_agent(key, user_message)
51 | assert isinstance(agent_reply, str)
52 |
53 |
54 | def test_list_agents(agent_manager, task, prompt, model):
55 | key, _ = agent_manager.create_agent(task, prompt, model)
56 | agents_list = agent_manager.list_agents()
57 | assert isinstance(agents_list, list)
58 | assert (key, task) in agents_list
59 |
60 |
61 | def test_delete_agent(agent_manager, task, prompt, model):
62 | key, _ = agent_manager.create_agent(task, prompt, model)
63 | success = agent_manager.delete_agent(key)
64 | assert success
65 | assert key not in agent_manager.agents
66 |
--------------------------------------------------------------------------------
/tests/test_ai_config.py:
--------------------------------------------------------------------------------
1 | from autogpt.config.ai_config import AIConfig
2 |
3 | """
4 | Test cases for the AIConfig class, which handles loads the AI configuration
5 | settings from a YAML file.
6 | """
7 |
8 |
9 | def test_goals_are_always_lists_of_strings(tmp_path):
10 | """Test if the goals attribute is always a list of strings."""
11 |
12 | yaml_content = """
13 | ai_goals:
14 | - Goal 1: Make a sandwich
15 | - Goal 2, Eat the sandwich
16 | - Goal 3 - Go to sleep
17 | - "Goal 4: Wake up"
18 | ai_name: McFamished
19 | ai_role: A hungry AI
20 | api_budget: 0.0
21 | """
22 | config_file = tmp_path / "ai_settings.yaml"
23 | config_file.write_text(yaml_content)
24 |
25 | ai_config = AIConfig.load(config_file)
26 |
27 | assert len(ai_config.ai_goals) == 4
28 | assert ai_config.ai_goals[0] == "Goal 1: Make a sandwich"
29 | assert ai_config.ai_goals[1] == "Goal 2, Eat the sandwich"
30 | assert ai_config.ai_goals[2] == "Goal 3 - Go to sleep"
31 | assert ai_config.ai_goals[3] == "Goal 4: Wake up"
32 |
33 | config_file.write_text("")
34 | ai_config.save(config_file)
35 |
36 | yaml_content2 = """ai_goals:
37 | - 'Goal 1: Make a sandwich'
38 | - Goal 2, Eat the sandwich
39 | - Goal 3 - Go to sleep
40 | - 'Goal 4: Wake up'
41 | ai_name: McFamished
42 | ai_role: A hungry AI
43 | api_budget: 0.0
44 | """
45 | assert config_file.read_text() == yaml_content2
46 |
--------------------------------------------------------------------------------
/tests/test_logs.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from autogpt.logs import remove_color_codes
4 |
5 |
6 | @pytest.mark.parametrize(
7 | "raw_text, clean_text",
8 | [
9 | (
10 | "COMMAND = \x1b[36mbrowse_website\x1b[0m ARGUMENTS = \x1b[36m{'url': 'https://www.google.com', 'question': 'What is the capital of France?'}\x1b[0m",
11 | "COMMAND = browse_website ARGUMENTS = {'url': 'https://www.google.com', 'question': 'What is the capital of France?'}",
12 | ),
13 | (
14 | "{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': 'https://github.com/Significant-Gravitas/Auto-GPT, https://discord.gg/autogpt und https://twitter.com/SigGravitas'}",
15 | "{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': 'https://github.com/Significant-Gravitas/Auto-GPT, https://discord.gg/autogpt und https://twitter.com/SigGravitas'}",
16 | ),
17 | ("", ""),
18 | ("hello", "hello"),
19 | ("hello\x1B[31m world", "hello world"),
20 | ("\x1B[36mHello,\x1B[32m World!", "Hello, World!"),
21 | (
22 | "\x1B[1m\x1B[31mError:\x1B[0m\x1B[31m file not found",
23 | "Error: file not found",
24 | ),
25 | ],
26 | )
27 | def test_remove_color_codes(raw_text, clean_text):
28 | assert remove_color_codes(raw_text) == clean_text
29 |
--------------------------------------------------------------------------------
/tests/test_prompt_config.py:
--------------------------------------------------------------------------------
1 | from autogpt.config.prompt_config import PromptConfig
2 |
3 | """
4 | Test cases for the PromptConfig class, which handles loads the Prompts configuration
5 | settings from a YAML file.
6 | """
7 |
8 |
9 | def test_prompt_config_loading(tmp_path):
10 | """Test if the prompt configuration loads correctly"""
11 |
12 | yaml_content = """
13 | constraints:
14 | - A test constraint
15 | - Another test constraint
16 | - A third test constraint
17 | resources:
18 | - A test resource
19 | - Another test resource
20 | - A third test resource
21 | performance_evaluations:
22 | - A test performance evaluation
23 | - Another test performance evaluation
24 | - A third test performance evaluation
25 | """
26 | config_file = tmp_path / "test_prompt_settings.yaml"
27 | config_file.write_text(yaml_content)
28 |
29 | prompt_config = PromptConfig(config_file)
30 |
31 | assert len(prompt_config.constraints) == 3
32 | assert prompt_config.constraints[0] == "A test constraint"
33 | assert prompt_config.constraints[1] == "Another test constraint"
34 | assert prompt_config.constraints[2] == "A third test constraint"
35 | assert len(prompt_config.resources) == 3
36 | assert prompt_config.resources[0] == "A test resource"
37 | assert prompt_config.resources[1] == "Another test resource"
38 | assert prompt_config.resources[2] == "A third test resource"
39 | assert len(prompt_config.performance_evaluations) == 3
40 | assert prompt_config.performance_evaluations[0] == "A test performance evaluation"
41 | assert (
42 | prompt_config.performance_evaluations[1]
43 | == "Another test performance evaluation"
44 | )
45 | assert (
46 | prompt_config.performance_evaluations[2]
47 | == "A third test performance evaluation"
48 | )
49 |
--------------------------------------------------------------------------------
/tests/test_token_counter.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from autogpt.llm import count_message_tokens, count_string_tokens
4 |
5 |
6 | def test_count_message_tokens():
7 | messages = [
8 | {"role": "user", "content": "Hello"},
9 | {"role": "assistant", "content": "Hi there!"},
10 | ]
11 | assert count_message_tokens(messages) == 17
12 |
13 |
14 | def test_count_message_tokens_with_name():
15 | messages = [
16 | {"role": "user", "content": "Hello", "name": "John"},
17 | {"role": "assistant", "content": "Hi there!"},
18 | ]
19 | assert count_message_tokens(messages) == 17
20 |
21 |
22 | def test_count_message_tokens_empty_input():
23 | """Empty input should return 3 tokens"""
24 | assert count_message_tokens([]) == 3
25 |
26 |
27 | def test_count_message_tokens_invalid_model():
28 | """Invalid model should raise a KeyError"""
29 | messages = [
30 | {"role": "user", "content": "Hello"},
31 | {"role": "assistant", "content": "Hi there!"},
32 | ]
33 | with pytest.raises(KeyError):
34 | count_message_tokens(messages, model="invalid_model")
35 |
36 |
37 | def test_count_message_tokens_gpt_4():
38 | messages = [
39 | {"role": "user", "content": "Hello"},
40 | {"role": "assistant", "content": "Hi there!"},
41 | ]
42 | assert count_message_tokens(messages, model="gpt-4-0314") == 15
43 |
44 |
45 | def test_count_string_tokens():
46 | """Test that the string tokens are counted correctly."""
47 |
48 | string = "Hello, world!"
49 | assert count_string_tokens(string, model_name="gpt-3.5-turbo-0301") == 4
50 |
51 |
52 | def test_count_string_tokens_empty_input():
53 | """Test that the string tokens are counted correctly."""
54 |
55 | assert count_string_tokens("", model_name="gpt-3.5-turbo-0301") == 0
56 |
57 |
58 | def test_count_message_tokens_invalid_model():
59 | """Invalid model should raise a NotImplementedError"""
60 | messages = [
61 | {"role": "user", "content": "Hello"},
62 | {"role": "assistant", "content": "Hi there!"},
63 | ]
64 | with pytest.raises(NotImplementedError):
65 | count_message_tokens(messages, model="invalid_model")
66 |
67 |
68 | def test_count_string_tokens_gpt_4():
69 | """Test that the string tokens are counted correctly."""
70 |
71 | string = "Hello, world!"
72 | assert count_string_tokens(string, model_name="gpt-4-0314") == 4
73 |
--------------------------------------------------------------------------------
/tests/test_workspace.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | from pathlib import Path
3 |
4 | import pytest
5 |
6 | from autogpt.workspace import Workspace
7 |
8 | _WORKSPACE_ROOT = Path("home/users/monty/auto_gpt_workspace")
9 |
10 | _ACCESSIBLE_PATHS = [
11 | Path("."),
12 | Path("test_file.txt"),
13 | Path("test_folder"),
14 | Path("test_folder/test_file.txt"),
15 | Path("test_folder/.."),
16 | Path("test_folder/../test_file.txt"),
17 | Path("test_folder/../test_folder"),
18 | Path("test_folder/../test_folder/test_file.txt"),
19 | ]
20 |
21 | _INACCESSIBLE_PATHS = (
22 | [
23 | # Takes us out of the workspace
24 | Path(".."),
25 | Path("../test_file.txt"),
26 | Path("../not_auto_gpt_workspace"),
27 | Path("../not_auto_gpt_workspace/test_file.txt"),
28 | Path("test_folder/../.."),
29 | Path("test_folder/../../test_file.txt"),
30 | Path("test_folder/../../not_auto_gpt_workspace"),
31 | Path("test_folder/../../not_auto_gpt_workspace/test_file.txt"),
32 | ]
33 | + [
34 | # Contains null bytes
35 | Path(template.format(null_byte=null_byte))
36 | for template, null_byte in itertools.product(
37 | [
38 | "{null_byte}",
39 | "{null_byte}test_file.txt",
40 | "test_folder/{null_byte}",
41 | "test_folder/{null_byte}test_file.txt",
42 | ],
43 | Workspace.NULL_BYTES,
44 | )
45 | ]
46 | + [
47 | # Absolute paths
48 | Path("/"),
49 | Path("/test_file.txt"),
50 | Path("/home"),
51 | ]
52 | )
53 |
54 |
55 | @pytest.fixture()
56 | def workspace_root(tmp_path):
57 | return tmp_path / _WORKSPACE_ROOT
58 |
59 |
60 | @pytest.fixture(params=_ACCESSIBLE_PATHS)
61 | def accessible_path(request):
62 | return request.param
63 |
64 |
65 | @pytest.fixture(params=_INACCESSIBLE_PATHS)
66 | def inaccessible_path(request):
67 | return request.param
68 |
69 |
70 | def test_sanitize_path_accessible(accessible_path, workspace_root):
71 | full_path = Workspace._sanitize_path(
72 | accessible_path,
73 | root=workspace_root,
74 | restrict_to_root=True,
75 | )
76 | assert full_path.is_absolute()
77 | assert full_path.is_relative_to(workspace_root)
78 |
79 |
80 | def test_sanitize_path_inaccessible(inaccessible_path, workspace_root):
81 | with pytest.raises(ValueError):
82 | Workspace._sanitize_path(
83 | inaccessible_path,
84 | root=workspace_root,
85 | restrict_to_root=True,
86 | )
87 |
88 |
89 | def test_get_path_accessible(accessible_path, workspace_root):
90 | workspace = Workspace(workspace_root, True)
91 | full_path = workspace.get_path(accessible_path)
92 | assert full_path.is_absolute()
93 | assert full_path.is_relative_to(workspace_root)
94 |
95 |
96 | def test_get_path_inaccessible(inaccessible_path, workspace_root):
97 | workspace = Workspace(workspace_root, True)
98 | with pytest.raises(ValueError):
99 | workspace.get_path(inaccessible_path)
100 |
--------------------------------------------------------------------------------
/tests/unit/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/tests/unit/__init__.py
--------------------------------------------------------------------------------
/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip
--------------------------------------------------------------------------------
/tests/unit/test_chat.py:
--------------------------------------------------------------------------------
1 | # Generated by CodiumAI
2 | import time
3 | from unittest.mock import patch
4 |
5 | from autogpt.llm import create_chat_message, generate_context
6 |
7 |
8 | def test_happy_path_role_content():
9 | """Test that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content."""
10 | result = create_chat_message("system", "Hello, world!")
11 | assert result == {"role": "system", "content": "Hello, world!"}
12 |
13 |
14 | def test_empty_role_content():
15 | """Test that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content."""
16 | result = create_chat_message("", "")
17 | assert result == {"role": "", "content": ""}
18 |
19 |
20 | def test_generate_context_empty_inputs(mocker):
21 | """Test the behavior of the generate_context function when all input parameters are empty."""
22 | # Mock the time.strftime function to return a fixed value
23 | mocker.patch("time.strftime", return_value="Sat Apr 15 00:00:00 2023")
24 | # Arrange
25 | prompt = ""
26 | relevant_memory = ""
27 | full_message_history = []
28 | model = "gpt-3.5-turbo-0301"
29 |
30 | # Act
31 | result = generate_context(prompt, relevant_memory, full_message_history, model)
32 |
33 | # Assert
34 | expected_result = (
35 | -1,
36 | 32,
37 | 2,
38 | [
39 | {"role": "system", "content": ""},
40 | {
41 | "role": "system",
42 | "content": f"The current time and date is {time.strftime('%c')}",
43 | },
44 | ],
45 | )
46 | assert result == expected_result
47 |
48 |
49 | def test_generate_context_valid_inputs():
50 | """Test that the function successfully generates a current_context given valid inputs."""
51 | # Given
52 | prompt = "What is your favorite color?"
53 | relevant_memory = "You once painted your room blue."
54 | full_message_history = [
55 | create_chat_message("user", "Hi there!"),
56 | create_chat_message("assistant", "Hello! How can I assist you today?"),
57 | create_chat_message("user", "Can you tell me a joke?"),
58 | create_chat_message(
59 | "assistant",
60 | "Why did the tomato turn red? Because it saw the salad dressing!",
61 | ),
62 | create_chat_message("user", "Haha, that's funny."),
63 | ]
64 | model = "gpt-3.5-turbo-0301"
65 |
66 | # When
67 | result = generate_context(prompt, relevant_memory, full_message_history, model)
68 |
69 | # Then
70 | assert isinstance(result[0], int)
71 | assert isinstance(result[1], int)
72 | assert isinstance(result[2], int)
73 | assert isinstance(result[3], list)
74 | assert result[0] >= 0
75 | assert result[2] >= 0
76 | assert result[1] >= 0
77 | assert len(result[3]) >= 2 # current_context should have at least 2 messages
78 | assert result[1] <= 2048 # token limit for GPT-3.5-turbo-0301 is 2048 tokens
79 |
--------------------------------------------------------------------------------
/tests/unit/test_get_self_feedback.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 |
3 | from autogpt.agent.agent import Agent
4 | from autogpt.config import AIConfig
5 | from autogpt.llm import create_chat_completion
6 | from autogpt.log_cycle.log_cycle import LogCycleHandler
7 |
8 |
9 | def test_get_self_feedback(mocker):
10 | # Define a sample thoughts dictionary
11 | thoughts = {
12 | "reasoning": "Sample reasoning.",
13 | "plan": "Sample plan.",
14 | "thoughts": "Sample thoughts.",
15 | }
16 |
17 | # Define a fake response for the create_chat_completion function
18 | fake_response = (
19 | "The AI Agent has demonstrated a reasonable thought process, but there is room for improvement. "
20 | "For example, the reasoning could be elaborated to better justify the plan, and the plan itself "
21 | "could be more detailed to ensure its effectiveness. In addition, the AI Agent should focus more "
22 | "on its core role and prioritize thoughts that align with that role."
23 | )
24 |
25 | # Mock the create_chat_completion function
26 | mock_create_chat_completion = mocker.patch(
27 | "autogpt.agent.agent.create_chat_completion", wraps=create_chat_completion
28 | )
29 | mock_create_chat_completion.return_value = fake_response
30 |
31 | # Create a MagicMock object to replace the Agent instance
32 | agent_mock = mocker.MagicMock(spec=Agent)
33 |
34 | # Mock the config attribute of the Agent instance
35 | agent_mock.config = AIConfig()
36 |
37 | # Mock the log_cycle_handler attribute of the Agent instance
38 | agent_mock.log_cycle_handler = LogCycleHandler()
39 |
40 | # Mock the create_nested_directory method of the LogCycleHandler instance
41 | agent_mock.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
42 |
43 | # Mock the cycle_count attribute of the Agent instance
44 | agent_mock.cycle_count = 0
45 |
46 | # Call the get_self_feedback method
47 | feedback = Agent.get_self_feedback(
48 | agent_mock,
49 | thoughts,
50 | "gpt-3.5-turbo",
51 | )
52 |
53 | # Check if the response is a non-empty string
54 | assert isinstance(feedback, str) and len(feedback) > 0
55 |
56 | # Check if certain keywords from input thoughts are present in the feedback response
57 | for keyword in ["reasoning", "plan", "thoughts"]:
58 | assert keyword in feedback
59 |
--------------------------------------------------------------------------------
/tests/unit/test_spinner.py:
--------------------------------------------------------------------------------
1 | # Generated by CodiumAI
2 | import time
3 |
4 | from autogpt.spinner import Spinner
5 |
6 | """
7 | Code Analysis
8 |
9 | Main functionalities:
10 | The Spinner class provides a simple way to display a spinning animation while a process is running. It can be used to indicate that a process is ongoing and to provide visual feedback to the user. The class can be used as a context manager, which means that it can be used with the 'with' statement to automatically start and stop the spinner animation.
11 |
12 | Methods:
13 | - __init__(self, message: str = "Loading...", delay: float = 0.1) -> None: Initializes the Spinner class with a message to display and a delay between each spinner update.
14 | - spin(self) -> None: Spins the spinner animation while the process is running.
15 | - __enter__(self): Starts the spinner animation when used as a context manager.
16 | - __exit__(self, exc_type, exc_value, exc_traceback) -> None: Stops the spinner animation when used as a context manager.
17 | - update_message(self, new_message, delay=0.1): Updates the message displayed by the spinner animation.
18 |
19 | Fields:
20 | - spinner: An itertools.cycle object that contains the characters used for the spinner animation.
21 | - delay: The delay between each spinner update.
22 | - message: The message to display.
23 | - running: A boolean value that indicates whether the spinner animation is running.
24 | - spinner_thread: A threading.Thread object that runs the spin method in a separate thread.
25 | """
26 |
27 | ALMOST_DONE_MESSAGE = "Almost done..."
28 | PLEASE_WAIT = "Please wait..."
29 |
30 |
31 | def test_spinner_initializes_with_default_values():
32 | """Tests that the spinner initializes with default values."""
33 | with Spinner() as spinner:
34 | assert spinner.message == "Loading..."
35 | assert spinner.delay == 0.1
36 |
37 |
38 | def test_spinner_initializes_with_custom_values():
39 | """Tests that the spinner initializes with custom message and delay values."""
40 | with Spinner(message=PLEASE_WAIT, delay=0.2) as spinner:
41 | assert spinner.message == PLEASE_WAIT
42 | assert spinner.delay == 0.2
43 |
44 |
45 | #
46 | def test_spinner_stops_spinning():
47 | """Tests that the spinner starts spinning and stops spinning without errors."""
48 | with Spinner() as spinner:
49 | time.sleep(1)
50 | spinner.update_message(ALMOST_DONE_MESSAGE)
51 | time.sleep(1)
52 | assert spinner.running == False
53 |
54 |
55 | def test_spinner_updates_message_and_still_spins():
56 | """Tests that the spinner message can be updated while the spinner is running and the spinner continues spinning."""
57 | with Spinner() as spinner:
58 | assert spinner.running == True
59 | time.sleep(1)
60 | spinner.update_message(ALMOST_DONE_MESSAGE)
61 | time.sleep(1)
62 | assert spinner.message == ALMOST_DONE_MESSAGE
63 | assert spinner.running == False
64 |
65 |
66 | def test_spinner_can_be_used_as_context_manager():
67 | """Tests that the spinner can be used as a context manager."""
68 | with Spinner() as spinner:
69 | assert spinner.running == True
70 | assert spinner.running == False
71 |
--------------------------------------------------------------------------------
/tests/unit/test_url_validation.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pytest import raises
3 |
4 | from autogpt.url_utils.validators import validate_url
5 |
6 |
7 | @validate_url
8 | def dummy_method(url):
9 | return url
10 |
11 |
12 | successful_test_data = (
13 | ("https://google.com/search?query=abc"),
14 | ("https://google.com/search?query=abc&p=123"),
15 | ("http://google.com/"),
16 | ("http://a.lot.of.domain.net/param1/param2"),
17 | )
18 |
19 |
20 | @pytest.mark.parametrize("url", successful_test_data)
21 | def test_url_validation_succeeds(url):
22 | assert dummy_method(url) == url
23 |
24 |
25 | bad_protocol_data = (
26 | ("htt://example.com"),
27 | ("httppp://example.com"),
28 | (" https://example.com"),
29 | )
30 |
31 |
32 | @pytest.mark.parametrize("url", bad_protocol_data)
33 | def test_url_validation_fails_bad_protocol(url):
34 | with raises(ValueError, match="Invalid URL format"):
35 | dummy_method(url)
36 |
37 |
38 | missing_loc = (("http://?query=q"),)
39 |
40 |
41 | @pytest.mark.parametrize("url", missing_loc)
42 | def test_url_validation_fails_bad_protocol(url):
43 | with raises(ValueError, match="Missing Scheme or Network location"):
44 | dummy_method(url)
45 |
46 |
47 | local_file = (
48 | ("http://localhost"),
49 | ("https://localhost/"),
50 | ("http://2130706433"),
51 | ("https://2130706433"),
52 | ("http://127.0.0.1/"),
53 | )
54 |
55 |
56 | @pytest.mark.parametrize("url", local_file)
57 | def test_url_validation_fails_local_path(url):
58 | with raises(ValueError, match="Access to local files is restricted"):
59 | dummy_method(url)
60 |
--------------------------------------------------------------------------------
/tests/unit/test_web_selenium.py:
--------------------------------------------------------------------------------
1 | from autogpt.commands.web_selenium import browse_website
2 |
3 |
4 | def test_browse_website():
5 | url = "https://barrel-roll.com"
6 | question = "How to execute a barrel roll"
7 |
8 | response = browse_website(url, question)
9 | assert "Error" in response
10 | # Sanity check that the response is not too long
11 | assert len(response) < 200
12 |
--------------------------------------------------------------------------------
/tests/utils.py:
--------------------------------------------------------------------------------
1 | import functools
2 | import os
3 | from contextlib import contextmanager
4 |
5 | import pytest
6 |
7 | from autogpt.config import Config
8 |
9 |
10 | @contextmanager
11 | def dummy_openai_api_key():
12 | # even when we record the VCR cassettes, openAI wants an API key
13 | config = Config()
14 | original_api_key = config.openai_api_key
15 | config.set_openai_api_key("sk-dummy")
16 |
17 | try:
18 | yield
19 | finally:
20 | config.set_openai_api_key(original_api_key)
21 |
22 |
23 | def requires_api_key(env_var):
24 | def decorator(func):
25 | @functools.wraps(func)
26 | def wrapper(*args, **kwargs):
27 | if env_var == "OPENAI_API_KEY":
28 | if not os.environ.get(env_var) and env_var == "OPENAI_API_KEY":
29 | with dummy_openai_api_key():
30 | return func(*args, **kwargs)
31 | return func(*args, **kwargs)
32 |
33 | return wrapper
34 |
35 | return decorator
36 |
37 |
38 | def skip_in_ci(test_function):
39 | return pytest.mark.skipif(
40 | os.environ.get("CI") == "true",
41 | reason="This test doesn't work on GitHub Actions.",
42 | )(test_function)
43 |
44 |
45 | def get_workspace_file_path(workspace, file_name):
46 | return str(workspace.get_path(file_name))
47 |
--------------------------------------------------------------------------------
/tests/vcr/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/smol-ai/Auto-GPT/8a942d683056b90e8790522278b46e46e5e7ac9c/tests/vcr/__init__.py
--------------------------------------------------------------------------------
/tests/vcr/openai_filter.py:
--------------------------------------------------------------------------------
1 | import json
2 | import re
3 |
4 |
5 | def replace_timestamp_in_request(request):
6 | # Check if the request body contains a JSON object
7 |
8 | try:
9 | if not request or not request.body:
10 | return request
11 | body = json.loads(request.body)
12 | except ValueError:
13 | return request
14 |
15 | if "messages" not in body:
16 | return request
17 |
18 | for message in body["messages"]:
19 | if "content" in message and "role" in message and message["role"] == "system":
20 | timestamp_regex = re.compile(r"\w{3} \w{3} \d{2} \d{2}:\d{2}:\d{2} \d{4}")
21 | message["content"] = timestamp_regex.sub(
22 | "Tue Jan 01 00:00:00 2000", message["content"]
23 | )
24 |
25 | request.body = json.dumps(body)
26 | return request
27 |
28 |
29 | def before_record_response(response):
30 | if "Transfer-Encoding" in response["headers"]:
31 | del response["headers"]["Transfer-Encoding"]
32 | return response
33 |
34 |
35 | def before_record_request(request):
36 | filtered_request = filter_hostnames(request)
37 | filtered_request_without_dynamic_data = replace_timestamp_in_request(
38 | filtered_request
39 | )
40 | return filtered_request_without_dynamic_data
41 |
42 |
43 | def filter_hostnames(request):
44 | allowed_hostnames = [
45 | "api.openai.com",
46 | "localhost:50337",
47 | ] # List of hostnames you want to allow
48 |
49 | if any(hostname in request.url for hostname in allowed_hostnames):
50 | return request
51 | else:
52 | return None
53 |
--------------------------------------------------------------------------------
/tests/vcr/vcr_filter.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import re
4 | from typing import Any, Dict, List
5 |
6 | from tests.conftest import PROXY
7 |
8 | REPLACEMENTS: List[Dict[str, str]] = [
9 | {
10 | "regex": r"\w{3} \w{3} {1,2}\d{1,2} \d{2}:\d{2}:\d{2} \d{4}",
11 | "replacement": "Tue Jan 1 00:00:00 2000",
12 | },
13 | {
14 | "regex": r"]*>",
15 | "replacement": "",
16 | },
17 | ]
18 |
19 | ALLOWED_HOSTNAMES: List[str] = [
20 | "api.openai.com",
21 | "localhost:50337",
22 | ]
23 |
24 | if PROXY:
25 | ALLOWED_HOSTNAMES.append(PROXY)
26 | ORIGINAL_URL = PROXY
27 | else:
28 | ORIGINAL_URL = "no_ci"
29 |
30 | NEW_URL = "api.openai.com"
31 |
32 |
33 | def replace_message_content(content: str, replacements: List[Dict[str, str]]) -> str:
34 | for replacement in replacements:
35 | pattern = re.compile(replacement["regex"])
36 | content = pattern.sub(replacement["replacement"], content)
37 |
38 | return content
39 |
40 |
41 | def replace_timestamp_in_request(request: Any) -> Any:
42 | try:
43 | if not request or not request.body:
44 | return request
45 | body = json.loads(request.body)
46 | except ValueError:
47 | return request
48 |
49 | if "messages" not in body:
50 | return request
51 | body[
52 | "max_tokens"
53 | ] = 0 # this field is inconsistent between requests and not used at the moment.
54 | for message in body["messages"]:
55 | if "content" in message and "role" in message:
56 | if message["role"] == "system":
57 | message["content"] = replace_message_content(
58 | message["content"], REPLACEMENTS
59 | )
60 |
61 | request.body = json.dumps(body)
62 | return request
63 |
64 |
65 | def before_record_response(response: Dict[str, Any]) -> Dict[str, Any]:
66 | if "Transfer-Encoding" in response["headers"]:
67 | del response["headers"]["Transfer-Encoding"]
68 | return response
69 |
70 |
71 | def before_record_request(request: Any) -> Any:
72 | request = replace_request_hostname(request, ORIGINAL_URL, NEW_URL)
73 |
74 | filtered_request = filter_hostnames(request)
75 | filtered_request_without_dynamic_data = replace_timestamp_in_request(
76 | filtered_request
77 | )
78 | return filtered_request_without_dynamic_data
79 |
80 |
81 | from urllib.parse import urlparse, urlunparse
82 |
83 |
84 | def replace_request_hostname(request: Any, original_url: str, new_hostname: str) -> Any:
85 | parsed_url = urlparse(request.uri)
86 |
87 | if parsed_url.hostname in original_url:
88 | new_path = parsed_url.path.replace("/proxy_function", "")
89 | request.uri = urlunparse(
90 | parsed_url._replace(netloc=new_hostname, path=new_path, scheme="https")
91 | )
92 |
93 | return request
94 |
95 |
96 | def filter_hostnames(request: Any) -> Any:
97 | # Add your implementation here for filtering hostnames
98 | if any(hostname in request.url for hostname in ALLOWED_HOSTNAMES):
99 | return request
100 | else:
101 | return None
102 |
--------------------------------------------------------------------------------